2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1982, 1986, 1988, 1993
30 * The Regents of the University of California. All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * From: @(#)tcp_usrreq.c 8.2 (Berkeley) 1/3/94
61 * $FreeBSD: src/sys/netinet/tcp_usrreq.c,v 1.51.2.9 2001/08/22 00:59:12 silby Exp $
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/kernel.h>
68 #include <sys/sysctl.h>
71 #include <sys/domain.h>
76 #include <sys/socket.h>
77 #include <sys/socketvar.h>
78 #include <sys/protosw.h>
79 #include <sys/syslog.h>
82 #include <net/route.h>
83 #include <net/ntstat.h>
84 #include <net/content_filter.h>
86 #include <netinet/in.h>
87 #include <netinet/in_systm.h>
89 #include <netinet/ip6.h>
91 #include <netinet/in_pcb.h>
93 #include <netinet6/in6_pcb.h>
95 #include <netinet/in_var.h>
96 #include <netinet/ip_var.h>
98 #include <netinet6/ip6_var.h>
100 #include <netinet/tcp.h>
101 #include <netinet/tcp_fsm.h>
102 #include <netinet/tcp_seq.h>
103 #include <netinet/tcp_timer.h>
104 #include <netinet/tcp_var.h>
105 #include <netinet/tcpip.h>
106 #include <netinet/tcp_cc.h>
107 #include <mach/sdt.h>
109 #include <netinet/tcp_debug.h>
112 #include <netinet/mptcp_var.h>
116 #include <netinet6/ipsec.h>
120 #include <netinet/flow_divert.h>
121 #endif /* FLOW_DIVERT */
123 errno_t
tcp_fill_info_for_info_tuple(struct info_tuple
*, struct tcp_info
*);
125 int tcp_sysctl_info(struct sysctl_oid
*, void *, int, struct sysctl_req
*);
126 static void tcp_connection_fill_info(struct tcpcb
*tp
,
127 struct tcp_connection_info
*tci
);
130 * TCP protocol interface to socket abstraction.
132 extern char *tcpstates
[]; /* XXX ??? */
134 static int tcp_attach(struct socket
*, struct proc
*);
135 static int tcp_connect(struct tcpcb
*, struct sockaddr
*, struct proc
*);
137 static int tcp6_connect(struct tcpcb
*, struct sockaddr
*, struct proc
*);
138 static int tcp6_usr_connect(struct socket
*, struct sockaddr
*,
141 static struct tcpcb
*tcp_disconnect(struct tcpcb
*);
142 static struct tcpcb
*tcp_usrclosed(struct tcpcb
*);
143 extern void tcp_sbrcv_trim(struct tcpcb
*tp
, struct sockbuf
*sb
);
146 #define TCPDEBUG0 int ostate = 0
147 #define TCPDEBUG1() ostate = tp ? tp->t_state : 0
148 #define TCPDEBUG2(req) if (tp && (so->so_options & SO_DEBUG)) \
149 tcp_trace(TA_USER, ostate, tp, 0, 0, req)
153 #define TCPDEBUG2(req)
156 SYSCTL_PROC(_net_inet_tcp
, OID_AUTO
, info
,
157 CTLFLAG_RW
| CTLFLAG_LOCKED
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
,
158 0, 0, tcp_sysctl_info
, "S", "TCP info per tuple");
161 * TCP attaches to socket via pru_attach(), reserving space,
162 * and an internet control block.
168 * tcp_attach:??? [IPSEC specific]
171 tcp_usr_attach(struct socket
*so
, __unused
int proto
, struct proc
*p
)
174 struct inpcb
*inp
= sotoinpcb(so
);
175 struct tcpcb
*tp
= 0;
184 error
= tcp_attach(so
, p
);
189 if ((so
->so_options
& SO_LINGER
) && so
->so_linger
== 0) {
190 so
->so_linger
= TCP_LINGERTIME
* hz
;
194 TCPDEBUG2(PRU_ATTACH
);
199 * pru_detach() detaches the TCP protocol from the socket.
200 * If the protocol state is non-embryonic, then can't
201 * do this directly: have to initiate a pru_disconnect(),
202 * which may finish later; embryonic TCB's can just
206 tcp_usr_detach(struct socket
*so
)
209 struct inpcb
*inp
= sotoinpcb(so
);
213 if (inp
== 0 || (inp
->inp_state
== INPCB_STATE_DEAD
)) {
214 return EINVAL
; /* XXX */
216 socket_lock_assert_owned(so
);
218 /* In case we got disconnected from the peer */
224 calculate_tcp_clock();
226 tp
= tcp_disconnect(tp
);
228 TCPDEBUG2(PRU_DETACH
);
233 #define COMMON_START() TCPDEBUG0; \
235 if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) \
237 if (necp_socket_should_use_flow_divert(inp)) \
238 return (EPROTOTYPE); \
239 tp = intotcpcb(inp); \
241 calculate_tcp_clock(); \
244 #define COMMON_START() TCPDEBUG0; \
246 if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) \
248 tp = intotcpcb(inp); \
250 calculate_tcp_clock(); \
254 #define COMMON_END(req) out: TCPDEBUG2(req); return error; goto out
258 * Give the socket an address.
261 * EINVAL Invalid argument [COMMON_START]
262 * EAFNOSUPPORT Address family not supported
263 * in_pcbbind:EADDRNOTAVAIL Address not available.
264 * in_pcbbind:EINVAL Invalid argument
265 * in_pcbbind:EAFNOSUPPORT Address family not supported [notdef]
266 * in_pcbbind:EACCES Permission denied
267 * in_pcbbind:EADDRINUSE Address in use
268 * in_pcbbind:EAGAIN Resource unavailable, try again
269 * in_pcbbind:EPERM Operation not permitted
272 tcp_usr_bind(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
275 struct inpcb
*inp
= sotoinpcb(so
);
277 struct sockaddr_in
*sinp
;
281 if (nam
->sa_family
!= 0 && nam
->sa_family
!= AF_INET
) {
282 error
= EAFNOSUPPORT
;
287 * Must check for multicast addresses and disallow binding
290 sinp
= (struct sockaddr_in
*)(void *)nam
;
291 if (sinp
->sin_family
== AF_INET
&&
292 IN_MULTICAST(ntohl(sinp
->sin_addr
.s_addr
))) {
293 error
= EAFNOSUPPORT
;
296 error
= in_pcbbind(inp
, nam
, p
);
302 /* Update NECP client with bind result if not in middle of connect */
303 if ((inp
->inp_flags2
& INP2_CONNECT_IN_PROGRESS
) &&
304 !uuid_is_null(inp
->necp_client_uuid
)) {
305 socket_unlock(so
, 0);
306 necp_client_assign_from_socket(so
->last_pid
, inp
->necp_client_uuid
, inp
);
311 COMMON_END(PRU_BIND
);
316 tcp6_usr_bind(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
319 struct inpcb
*inp
= sotoinpcb(so
);
321 struct sockaddr_in6
*sin6p
;
325 if (nam
->sa_family
!= 0 && nam
->sa_family
!= AF_INET6
) {
326 error
= EAFNOSUPPORT
;
331 * Must check for multicast addresses and disallow binding
334 sin6p
= (struct sockaddr_in6
*)(void *)nam
;
335 if (sin6p
->sin6_family
== AF_INET6
&&
336 IN6_IS_ADDR_MULTICAST(&sin6p
->sin6_addr
)) {
337 error
= EAFNOSUPPORT
;
340 inp
->inp_vflag
&= ~INP_IPV4
;
341 inp
->inp_vflag
|= INP_IPV6
;
342 if ((inp
->inp_flags
& IN6P_IPV6_V6ONLY
) == 0) {
343 if (IN6_IS_ADDR_UNSPECIFIED(&sin6p
->sin6_addr
)) {
344 inp
->inp_vflag
|= INP_IPV4
;
345 } else if (IN6_IS_ADDR_V4MAPPED(&sin6p
->sin6_addr
)) {
346 struct sockaddr_in sin
;
348 in6_sin6_2_sin(&sin
, sin6p
);
349 inp
->inp_vflag
|= INP_IPV4
;
350 inp
->inp_vflag
&= ~INP_IPV6
;
351 error
= in_pcbbind(inp
, (struct sockaddr
*)&sin
, p
);
355 error
= in6_pcbbind(inp
, nam
, p
);
359 COMMON_END(PRU_BIND
);
364 * Prepare to accept connections.
367 * EINVAL [COMMON_START]
368 * in_pcbbind:EADDRNOTAVAIL Address not available.
369 * in_pcbbind:EINVAL Invalid argument
370 * in_pcbbind:EAFNOSUPPORT Address family not supported [notdef]
371 * in_pcbbind:EACCES Permission denied
372 * in_pcbbind:EADDRINUSE Address in use
373 * in_pcbbind:EAGAIN Resource unavailable, try again
374 * in_pcbbind:EPERM Operation not permitted
377 tcp_usr_listen(struct socket
*so
, struct proc
*p
)
380 struct inpcb
*inp
= sotoinpcb(so
);
384 if (inp
->inp_lport
== 0) {
385 error
= in_pcbbind(inp
, NULL
, p
);
388 tp
->t_state
= TCPS_LISTEN
;
390 COMMON_END(PRU_LISTEN
);
395 tcp6_usr_listen(struct socket
*so
, struct proc
*p
)
398 struct inpcb
*inp
= sotoinpcb(so
);
402 if (inp
->inp_lport
== 0) {
403 inp
->inp_vflag
&= ~INP_IPV4
;
404 if ((inp
->inp_flags
& IN6P_IPV6_V6ONLY
) == 0) {
405 inp
->inp_vflag
|= INP_IPV4
;
407 error
= in6_pcbbind(inp
, NULL
, p
);
410 tp
->t_state
= TCPS_LISTEN
;
412 COMMON_END(PRU_LISTEN
);
417 tcp_connect_complete(struct socket
*so
)
419 struct tcpcb
*tp
= sototcpcb(so
);
420 struct inpcb
*inp
= sotoinpcb(so
);
423 /* TFO delays the tcp_output until later, when the app calls write() */
424 if (so
->so_flags1
& SOF1_PRECONNECT_DATA
) {
425 if (!necp_socket_is_allowed_to_send_recv(sotoinpcb(so
), NULL
, NULL
, NULL
)) {
429 /* Initialize enough state so that we can actually send data */
430 tcp_mss(tp
, -1, IFSCOPE_NONE
);
431 tp
->snd_wnd
= tp
->t_maxseg
;
432 tp
->max_sndwnd
= tp
->snd_wnd
;
434 error
= tcp_output(tp
);
438 /* Update NECP client with connected five-tuple */
439 if (error
== 0 && !uuid_is_null(inp
->necp_client_uuid
)) {
440 socket_unlock(so
, 0);
441 necp_client_assign_from_socket(so
->last_pid
, inp
->necp_client_uuid
, inp
);
450 * Initiate connection to peer.
451 * Create a template for use in transmissions on this connection.
452 * Enter SYN_SENT state, and mark socket as connecting.
453 * Start keep-alive timer, and seed output sequence space.
454 * Send initial segment on connection.
457 tcp_usr_connect(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
460 struct inpcb
*inp
= sotoinpcb(so
);
462 struct sockaddr_in
*sinp
;
467 } else if (inp
->inp_state
== INPCB_STATE_DEAD
) {
469 error
= so
->so_error
;
478 else if (necp_socket_should_use_flow_divert(inp
)) {
479 uint32_t fd_ctl_unit
= necp_socket_get_flow_divert_control_unit(inp
);
480 if (fd_ctl_unit
> 0) {
481 error
= flow_divert_pcb_init(so
, fd_ctl_unit
);
483 error
= flow_divert_connect_out(so
, nam
, p
);
491 #endif /* FLOW_DIVERT */
493 error
= cfil_sock_attach(so
);
497 #endif /* CONTENT_FILTER */
502 calculate_tcp_clock();
504 if (nam
->sa_family
!= 0 && nam
->sa_family
!= AF_INET
) {
505 error
= EAFNOSUPPORT
;
509 * Must disallow TCP ``connections'' to multicast addresses.
511 sinp
= (struct sockaddr_in
*)(void *)nam
;
512 if (sinp
->sin_family
== AF_INET
513 && IN_MULTICAST(ntohl(sinp
->sin_addr
.s_addr
))) {
514 error
= EAFNOSUPPORT
;
518 if ((error
= tcp_connect(tp
, nam
, p
)) != 0) {
522 error
= tcp_connect_complete(so
);
524 COMMON_END(PRU_CONNECT
);
528 tcp_usr_connectx_common(struct socket
*so
, int af
,
529 struct sockaddr
*src
, struct sockaddr
*dst
,
530 struct proc
*p
, uint32_t ifscope
, sae_associd_t aid
, sae_connid_t
*pcid
,
531 uint32_t flags
, void *arg
, uint32_t arglen
, struct uio
*auio
,
532 user_ssize_t
*bytes_written
)
534 #pragma unused(aid, flags, arg, arglen)
535 struct inpcb
*inp
= sotoinpcb(so
);
537 user_ssize_t datalen
= 0;
545 ASSERT(!(inp
->inp_flags2
& INP2_CONNECT_IN_PROGRESS
));
546 inp
->inp_flags2
|= INP2_CONNECT_IN_PROGRESS
;
549 inp_update_necp_policy(inp
, src
, dst
, ifscope
);
552 if ((so
->so_flags1
& SOF1_DATA_IDEMPOTENT
) &&
553 (tcp_fastopen
& TCP_FASTOPEN_CLIENT
)) {
554 sototcpcb(so
)->t_flagsext
|= TF_FASTOPEN
;
557 /* bind socket to the specified interface, if requested */
558 if (ifscope
!= IFSCOPE_NONE
&&
559 (error
= inp_bindif(inp
, ifscope
, NULL
)) != 0) {
563 /* if source address and/or port is specified, bind to it */
565 error
= sobindlock(so
, src
, 0); /* already locked */
573 error
= tcp_usr_connect(so
, dst
, p
);
577 error
= tcp6_usr_connect(so
, dst
, p
);
589 /* if there is data, copy it */
591 socket_unlock(so
, 0);
593 VERIFY(bytes_written
!= NULL
);
595 datalen
= uio_resid(auio
);
596 error
= so
->so_proto
->pr_usrreqs
->pru_sosend(so
, NULL
,
597 (uio_t
)auio
, NULL
, NULL
, 0);
600 if (error
== 0 || error
== EWOULDBLOCK
) {
601 *bytes_written
= datalen
- uio_resid(auio
);
605 * sosend returns EWOULDBLOCK if it's a non-blocking
606 * socket or a timeout occured (this allows to return
607 * the amount of queued data through sendit()).
609 * However, connectx() returns EINPROGRESS in case of a
610 * blocking socket. So we change the return value here.
612 if (error
== EWOULDBLOCK
) {
617 if (error
== 0 && pcid
!= NULL
) {
618 *pcid
= 1; /* there is only one connection in regular TCP */
621 if (error
&& error
!= EINPROGRESS
) {
622 so
->so_flags1
&= ~SOF1_PRECONNECT_DATA
;
625 inp
->inp_flags2
&= ~INP2_CONNECT_IN_PROGRESS
;
630 tcp_usr_connectx(struct socket
*so
, struct sockaddr
*src
,
631 struct sockaddr
*dst
, struct proc
*p
, uint32_t ifscope
,
632 sae_associd_t aid
, sae_connid_t
*pcid
, uint32_t flags
, void *arg
,
633 uint32_t arglen
, struct uio
*uio
, user_ssize_t
*bytes_written
)
635 return tcp_usr_connectx_common(so
, AF_INET
, src
, dst
, p
, ifscope
, aid
,
636 pcid
, flags
, arg
, arglen
, uio
, bytes_written
);
641 tcp6_usr_connect(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
644 struct inpcb
*inp
= sotoinpcb(so
);
646 struct sockaddr_in6
*sin6p
;
651 } else if (inp
->inp_state
== INPCB_STATE_DEAD
) {
653 error
= so
->so_error
;
662 else if (necp_socket_should_use_flow_divert(inp
)) {
663 uint32_t fd_ctl_unit
= necp_socket_get_flow_divert_control_unit(inp
);
664 if (fd_ctl_unit
> 0) {
665 error
= flow_divert_pcb_init(so
, fd_ctl_unit
);
667 error
= flow_divert_connect_out(so
, nam
, p
);
675 #endif /* FLOW_DIVERT */
677 error
= cfil_sock_attach(so
);
681 #endif /* CONTENT_FILTER */
687 calculate_tcp_clock();
689 if (nam
->sa_family
!= 0 && nam
->sa_family
!= AF_INET6
) {
690 error
= EAFNOSUPPORT
;
695 * Must disallow TCP ``connections'' to multicast addresses.
697 sin6p
= (struct sockaddr_in6
*)(void *)nam
;
698 if (sin6p
->sin6_family
== AF_INET6
699 && IN6_IS_ADDR_MULTICAST(&sin6p
->sin6_addr
)) {
700 error
= EAFNOSUPPORT
;
704 if (IN6_IS_ADDR_V4MAPPED(&sin6p
->sin6_addr
)) {
705 struct sockaddr_in sin
;
707 if ((inp
->inp_flags
& IN6P_IPV6_V6ONLY
) != 0) {
711 in6_sin6_2_sin(&sin
, sin6p
);
712 inp
->inp_vflag
|= INP_IPV4
;
713 inp
->inp_vflag
&= ~INP_IPV6
;
714 if ((error
= tcp_connect(tp
, (struct sockaddr
*)&sin
, p
)) != 0) {
718 error
= tcp_connect_complete(so
);
721 inp
->inp_vflag
&= ~INP_IPV4
;
722 inp
->inp_vflag
|= INP_IPV6
;
723 if ((error
= tcp6_connect(tp
, nam
, p
)) != 0) {
727 error
= tcp_connect_complete(so
);
728 COMMON_END(PRU_CONNECT
);
732 tcp6_usr_connectx(struct socket
*so
, struct sockaddr
*src
,
733 struct sockaddr
*dst
, struct proc
*p
, uint32_t ifscope
,
734 sae_associd_t aid
, sae_connid_t
*pcid
, uint32_t flags
, void *arg
,
735 uint32_t arglen
, struct uio
*uio
, user_ssize_t
*bytes_written
)
737 return tcp_usr_connectx_common(so
, AF_INET6
, src
, dst
, p
, ifscope
, aid
,
738 pcid
, flags
, arg
, arglen
, uio
, bytes_written
);
743 * Initiate disconnect from peer.
744 * If connection never passed embryonic stage, just drop;
745 * else if don't need to let data drain, then can just drop anyways,
746 * else have to begin TCP shutdown process: mark socket disconnecting,
747 * drain unread data, state switch to reflect user close, and
748 * send segment (e.g. FIN) to peer. Socket will be really disconnected
749 * when peer sends FIN and acks ours.
751 * SHOULD IMPLEMENT LATER PRU_CONNECT VIA REALLOC TCPCB.
754 tcp_usr_disconnect(struct socket
*so
)
757 struct inpcb
*inp
= sotoinpcb(so
);
760 socket_lock_assert_owned(so
);
762 /* In case we got disconnected from the peer */
766 tp
= tcp_disconnect(tp
);
767 COMMON_END(PRU_DISCONNECT
);
771 * User-protocol pru_disconnectx callback.
774 tcp_usr_disconnectx(struct socket
*so
, sae_associd_t aid
, sae_connid_t cid
)
777 if (aid
!= SAE_ASSOCID_ANY
&& aid
!= SAE_ASSOCID_ALL
) {
781 return tcp_usr_disconnect(so
);
785 * Accept a connection. Essentially all the work is
786 * done at higher levels; just return the address
787 * of the peer, storing through addr.
790 tcp_usr_accept(struct socket
*so
, struct sockaddr
**nam
)
793 struct inpcb
*inp
= sotoinpcb(so
);
794 struct tcpcb
*tp
= NULL
;
797 in_getpeeraddr(so
, nam
);
799 if (so
->so_state
& SS_ISDISCONNECTED
) {
800 error
= ECONNABORTED
;
803 if (inp
== NULL
|| inp
->inp_state
== INPCB_STATE_DEAD
) {
807 else if (necp_socket_should_use_flow_divert(inp
)) {
811 error
= cfil_sock_attach(so
);
815 #endif /* CONTENT_FILTER */
821 calculate_tcp_clock();
823 COMMON_END(PRU_ACCEPT
);
828 tcp6_usr_accept(struct socket
*so
, struct sockaddr
**nam
)
831 struct inpcb
*inp
= sotoinpcb(so
);
832 struct tcpcb
*tp
= NULL
;
835 if (so
->so_state
& SS_ISDISCONNECTED
) {
836 error
= ECONNABORTED
;
839 if (inp
== NULL
|| inp
->inp_state
== INPCB_STATE_DEAD
) {
843 else if (necp_socket_should_use_flow_divert(inp
)) {
847 error
= cfil_sock_attach(so
);
851 #endif /* CONTENT_FILTER */
857 calculate_tcp_clock();
859 in6_mapped_peeraddr(so
, nam
);
860 COMMON_END(PRU_ACCEPT
);
865 * Mark the connection as being incapable of further output.
868 * EINVAL [COMMON_START]
869 * tcp_output:EADDRNOTAVAIL
871 * tcp_output:EMSGSIZE
872 * tcp_output:EHOSTUNREACH
873 * tcp_output:ENETUNREACH
874 * tcp_output:ENETDOWN
877 * tcp_output:EMSGSIZE
879 * tcp_output:??? [ignorable: mostly IPSEC/firewall/DLIL]
882 tcp_usr_shutdown(struct socket
*so
)
885 struct inpcb
*inp
= sotoinpcb(so
);
889 if (inp
== NULL
|| inp
->inp_state
== INPCB_STATE_DEAD
) {
896 * In case we got disconnected from the peer, or if this is
897 * a socket that is to be flow-diverted (but not yet).
904 || (necp_socket_should_use_flow_divert(inp
))
913 calculate_tcp_clock();
915 tp
= tcp_usrclosed(tp
);
917 /* A reset has been sent but socket exists, do not send FIN */
918 if ((so
->so_flags
& SOF_MP_SUBFLOW
) &&
919 (tp
) && (tp
->t_mpflags
& TMPF_RESET
)) {
924 /* Don't send a FIN yet */
925 if (tp
&& !(so
->so_state
& SS_ISDISCONNECTED
) &&
926 cfil_sock_data_pending(&so
->so_snd
)) {
929 #endif /* CONTENT_FILTER */
931 error
= tcp_output(tp
);
933 COMMON_END(PRU_SHUTDOWN
);
937 * After a receive, possibly send window update to peer.
940 tcp_usr_rcvd(struct socket
*so
, __unused
int flags
)
943 struct inpcb
*inp
= sotoinpcb(so
);
947 /* In case we got disconnected from the peer */
951 tcp_sbrcv_trim(tp
, &so
->so_rcv
);
954 * This tcp_output is solely there to trigger window-updates.
955 * However, we really do not want these window-updates while we
956 * are still in SYN_SENT or SYN_RECEIVED.
958 if (TCPS_HAVEESTABLISHED(tp
->t_state
)) {
963 cfil_sock_buf_update(&so
->so_rcv
);
964 #endif /* CONTENT_FILTER */
966 COMMON_END(PRU_RCVD
);
970 * Do a send by putting data in output queue and updating urgent
971 * marker if URG set. Possibly send more data. Unlike the other
972 * pru_*() routines, the mbuf chains are our responsibility. We
973 * must either enqueue them or free them. The other pru_* routines
974 * generally are caller-frees.
980 * tcp_connect:EADDRINUSE Address in use
981 * tcp_connect:EADDRNOTAVAIL Address not available.
982 * tcp_connect:EINVAL Invalid argument
983 * tcp_connect:EAFNOSUPPORT Address family not supported [notdef]
984 * tcp_connect:EACCES Permission denied
985 * tcp_connect:EAGAIN Resource unavailable, try again
986 * tcp_connect:EPERM Operation not permitted
987 * tcp_output:EADDRNOTAVAIL
989 * tcp_output:EMSGSIZE
990 * tcp_output:EHOSTUNREACH
991 * tcp_output:ENETUNREACH
992 * tcp_output:ENETDOWN
995 * tcp_output:EMSGSIZE
997 * tcp_output:??? [ignorable: mostly IPSEC/firewall/DLIL]
998 * tcp6_connect:??? [IPV6 only]
1001 tcp_usr_send(struct socket
*so
, int flags
, struct mbuf
*m
,
1002 struct sockaddr
*nam
, struct mbuf
*control
, struct proc
*p
)
1005 struct inpcb
*inp
= sotoinpcb(so
);
1007 uint32_t msgpri
= MSG_PRI_DEFAULT
;
1013 if (inp
== NULL
|| inp
->inp_state
== INPCB_STATE_DEAD
1015 || (necp_socket_should_use_flow_divert(inp
))
1019 * OOPS! we lost a race, the TCP session got reset after
1020 * we checked SS_CANTSENDMORE, eg: while doing uiomove or a
1021 * network interrupt in the non-splnet() section of sosend().
1026 if (control
!= NULL
) {
1032 error
= ECONNRESET
; /* XXX EPIPE? */
1041 isipv6
= nam
&& nam
->sa_family
== AF_INET6
;
1043 tp
= intotcpcb(inp
);
1046 calculate_tcp_clock();
1048 if (control
!= NULL
) {
1049 if (so
->so_flags
& SOF_ENABLE_MSGS
) {
1050 /* Get the msg priority from control mbufs */
1051 error
= tcp_get_msg_priority(control
, &msgpri
);
1063 } else if (control
->m_len
) {
1065 * if not unordered, TCP should not have
1079 if (so
->so_flags
& SOF_ENABLE_MSGS
) {
1080 VERIFY(m
->m_flags
& M_PKTHDR
);
1081 m
->m_pkthdr
.msg_pri
= msgpri
;
1084 /* MPTCP sublow socket buffers must not be compressed */
1085 VERIFY(!(so
->so_flags
& SOF_MP_SUBFLOW
) ||
1086 (so
->so_snd
.sb_flags
& SB_NOCOMPRESS
));
1088 if (!(flags
& PRUS_OOB
) || (so
->so_flags1
& SOF1_PRECONNECT_DATA
)) {
1089 /* Call msg send if message delivery is enabled */
1090 if (so
->so_flags
& SOF_ENABLE_MSGS
) {
1091 sbappendmsg_snd(&so
->so_snd
, m
);
1093 sbappendstream(&so
->so_snd
, m
);
1096 if (nam
&& tp
->t_state
< TCPS_SYN_SENT
) {
1098 * Do implied connect if not yet connected,
1099 * initialize window to default value, and
1100 * initialize maxseg/maxopd using peer's cached
1105 error
= tcp6_connect(tp
, nam
, p
);
1108 error
= tcp_connect(tp
, nam
, p
);
1112 tp
->snd_wnd
= TTCP_CLIENT_SND_WND
;
1113 tp
->max_sndwnd
= tp
->snd_wnd
;
1114 tcp_mss(tp
, -1, IFSCOPE_NONE
);
1117 if (flags
& PRUS_EOF
) {
1119 * Close the send side of the connection after
1123 tp
= tcp_usrclosed(tp
);
1126 if (flags
& PRUS_MORETOCOME
) {
1127 tp
->t_flags
|= TF_MORETOCOME
;
1129 error
= tcp_output(tp
);
1130 if (flags
& PRUS_MORETOCOME
) {
1131 tp
->t_flags
&= ~TF_MORETOCOME
;
1135 if (sbspace(&so
->so_snd
) == 0) {
1136 /* if no space is left in sockbuf,
1137 * do not try to squeeze in OOB traffic */
1143 * According to RFC961 (Assigned Protocols),
1144 * the urgent pointer points to the last octet
1145 * of urgent data. We continue, however,
1146 * to consider it to indicate the first octet
1147 * of data past the urgent section.
1148 * Otherwise, snd_up should be one lower.
1150 sbappendstream(&so
->so_snd
, m
);
1151 if (nam
&& tp
->t_state
< TCPS_SYN_SENT
) {
1153 * Do implied connect if not yet connected,
1154 * initialize window to default value, and
1155 * initialize maxseg/maxopd using peer's cached
1160 error
= tcp6_connect(tp
, nam
, p
);
1163 error
= tcp_connect(tp
, nam
, p
);
1167 tp
->snd_wnd
= TTCP_CLIENT_SND_WND
;
1168 tp
->max_sndwnd
= tp
->snd_wnd
;
1169 tcp_mss(tp
, -1, IFSCOPE_NONE
);
1171 tp
->snd_up
= tp
->snd_una
+ so
->so_snd
.sb_cc
;
1172 tp
->t_flagsext
|= TF_FORCE
;
1173 error
= tcp_output(tp
);
1174 tp
->t_flagsext
&= ~TF_FORCE
;
1179 * We wait for the socket to successfully connect before returning.
1180 * This allows us to signal a timeout to the application.
1182 if (so
->so_state
& SS_ISCONNECTING
) {
1183 if (so
->so_state
& SS_NBIO
) {
1184 error
= EWOULDBLOCK
;
1186 error
= sbwait(&so
->so_snd
);
1190 COMMON_END((flags
& PRUS_OOB
) ? PRU_SENDOOB
:
1191 ((flags
& PRUS_EOF
) ? PRU_SEND_EOF
: PRU_SEND
));
1198 tcp_usr_abort(struct socket
*so
)
1201 struct inpcb
*inp
= sotoinpcb(so
);
1205 /* In case we got disconnected from the peer */
1209 tp
= tcp_drop(tp
, ECONNABORTED
);
1210 VERIFY(so
->so_usecount
> 0);
1212 COMMON_END(PRU_ABORT
);
1216 * Receive out-of-band data.
1218 * Returns: 0 Success
1219 * EINVAL [COMMON_START]
1224 tcp_usr_rcvoob(struct socket
*so
, struct mbuf
*m
, int flags
)
1227 struct inpcb
*inp
= sotoinpcb(so
);
1231 if ((so
->so_oobmark
== 0 &&
1232 (so
->so_state
& SS_RCVATMARK
) == 0) ||
1233 so
->so_options
& SO_OOBINLINE
||
1234 tp
->t_oobflags
& TCPOOB_HADDATA
) {
1238 if ((tp
->t_oobflags
& TCPOOB_HAVEDATA
) == 0) {
1239 error
= EWOULDBLOCK
;
1243 *mtod(m
, caddr_t
) = tp
->t_iobc
;
1244 so
->so_state
&= ~SS_RCVATMARK
;
1245 if ((flags
& MSG_PEEK
) == 0) {
1246 tp
->t_oobflags
^= (TCPOOB_HAVEDATA
| TCPOOB_HADDATA
);
1248 COMMON_END(PRU_RCVOOB
);
1252 tcp_usr_preconnect(struct socket
*so
)
1254 struct inpcb
*inp
= sotoinpcb(so
);
1258 if (necp_socket_should_use_flow_divert(inp
)) {
1259 /* May happen, if in tcp_usr_connect we did not had a chance
1260 * to set the usrreqs (due to some error). So, let's get out
1267 error
= tcp_output(sototcpcb(so
));
1269 soclearfastopen(so
);
1271 COMMON_END(PRU_PRECONNECT
);
1274 /* xxx - should be const */
1275 struct pr_usrreqs tcp_usrreqs
= {
1276 .pru_abort
= tcp_usr_abort
,
1277 .pru_accept
= tcp_usr_accept
,
1278 .pru_attach
= tcp_usr_attach
,
1279 .pru_bind
= tcp_usr_bind
,
1280 .pru_connect
= tcp_usr_connect
,
1281 .pru_connectx
= tcp_usr_connectx
,
1282 .pru_control
= in_control
,
1283 .pru_detach
= tcp_usr_detach
,
1284 .pru_disconnect
= tcp_usr_disconnect
,
1285 .pru_disconnectx
= tcp_usr_disconnectx
,
1286 .pru_listen
= tcp_usr_listen
,
1287 .pru_peeraddr
= in_getpeeraddr
,
1288 .pru_rcvd
= tcp_usr_rcvd
,
1289 .pru_rcvoob
= tcp_usr_rcvoob
,
1290 .pru_send
= tcp_usr_send
,
1291 .pru_shutdown
= tcp_usr_shutdown
,
1292 .pru_sockaddr
= in_getsockaddr
,
1293 .pru_sosend
= sosend
,
1294 .pru_soreceive
= soreceive
,
1295 .pru_preconnect
= tcp_usr_preconnect
,
1299 struct pr_usrreqs tcp6_usrreqs
= {
1300 .pru_abort
= tcp_usr_abort
,
1301 .pru_accept
= tcp6_usr_accept
,
1302 .pru_attach
= tcp_usr_attach
,
1303 .pru_bind
= tcp6_usr_bind
,
1304 .pru_connect
= tcp6_usr_connect
,
1305 .pru_connectx
= tcp6_usr_connectx
,
1306 .pru_control
= in6_control
,
1307 .pru_detach
= tcp_usr_detach
,
1308 .pru_disconnect
= tcp_usr_disconnect
,
1309 .pru_disconnectx
= tcp_usr_disconnectx
,
1310 .pru_listen
= tcp6_usr_listen
,
1311 .pru_peeraddr
= in6_mapped_peeraddr
,
1312 .pru_rcvd
= tcp_usr_rcvd
,
1313 .pru_rcvoob
= tcp_usr_rcvoob
,
1314 .pru_send
= tcp_usr_send
,
1315 .pru_shutdown
= tcp_usr_shutdown
,
1316 .pru_sockaddr
= in6_mapped_sockaddr
,
1317 .pru_sosend
= sosend
,
1318 .pru_soreceive
= soreceive
,
1319 .pru_preconnect
= tcp_usr_preconnect
,
1324 * Common subroutine to open a TCP connection to remote host specified
1325 * by struct sockaddr_in in mbuf *nam. Call in_pcbbind to assign a local
1326 * port number if needed. Call in_pcbladdr to do the routing and to choose
1327 * a local host address (interface). If there is an existing incarnation
1328 * of the same connection in TIME-WAIT state and if the remote host was
1329 * sending CC options and if the connection duration was < MSL, then
1330 * truncate the previous TIME-WAIT state and proceed.
1331 * Initialize connection parameters and enter SYN-SENT state.
1333 * Returns: 0 Success
1336 * in_pcbbind:EADDRNOTAVAIL Address not available.
1337 * in_pcbbind:EINVAL Invalid argument
1338 * in_pcbbind:EAFNOSUPPORT Address family not supported [notdef]
1339 * in_pcbbind:EACCES Permission denied
1340 * in_pcbbind:EADDRINUSE Address in use
1341 * in_pcbbind:EAGAIN Resource unavailable, try again
1342 * in_pcbbind:EPERM Operation not permitted
1343 * in_pcbladdr:EINVAL Invalid argument
1344 * in_pcbladdr:EAFNOSUPPORT Address family not supported
1345 * in_pcbladdr:EADDRNOTAVAIL Address not available
1348 tcp_connect(struct tcpcb
*tp
, struct sockaddr
*nam
, struct proc
*p
)
1350 struct inpcb
*inp
= tp
->t_inpcb
, *oinp
;
1351 struct socket
*so
= inp
->inp_socket
;
1353 struct sockaddr_in
*sin
= (struct sockaddr_in
*)(void *)nam
;
1354 struct in_addr laddr
;
1356 struct ifnet
*outif
= NULL
;
1358 if (inp
->inp_lport
== 0) {
1359 error
= in_pcbbind(inp
, NULL
, p
);
1366 * Cannot simply call in_pcbconnect, because there might be an
1367 * earlier incarnation of this same connection still in
1368 * TIME_WAIT state, creating an ADDRINUSE error.
1370 error
= in_pcbladdr(inp
, nam
, &laddr
, IFSCOPE_NONE
, &outif
, 0);
1375 socket_unlock(inp
->inp_socket
, 0);
1376 oinp
= in_pcblookup_hash(inp
->inp_pcbinfo
,
1377 sin
->sin_addr
, sin
->sin_port
,
1378 inp
->inp_laddr
.s_addr
!= INADDR_ANY
? inp
->inp_laddr
: laddr
,
1379 inp
->inp_lport
, 0, NULL
);
1381 socket_lock(inp
->inp_socket
, 0);
1383 if (oinp
!= inp
) { /* 4143933: avoid deadlock if inp == oinp */
1384 socket_lock(oinp
->inp_socket
, 1);
1386 if (in_pcb_checkstate(oinp
, WNT_RELEASE
, 1) == WNT_STOPUSING
) {
1388 socket_unlock(oinp
->inp_socket
, 1);
1393 if (oinp
!= inp
&& (otp
= intotcpcb(oinp
)) != NULL
&&
1394 otp
->t_state
== TCPS_TIME_WAIT
&&
1395 ((int)(tcp_now
- otp
->t_starttime
)) < tcp_msl
&&
1396 (otp
->t_flags
& TF_RCVD_CC
)) {
1397 otp
= tcp_close(otp
);
1399 printf("tcp_connect: inp=0x%llx err=EADDRINUSE\n",
1400 (uint64_t)VM_KERNEL_ADDRPERM(inp
));
1402 socket_unlock(oinp
->inp_socket
, 1);
1408 socket_unlock(oinp
->inp_socket
, 1);
1412 if ((inp
->inp_laddr
.s_addr
== INADDR_ANY
? laddr
.s_addr
:
1413 inp
->inp_laddr
.s_addr
) == sin
->sin_addr
.s_addr
&&
1414 inp
->inp_lport
== sin
->sin_port
) {
1418 if (!lck_rw_try_lock_exclusive(inp
->inp_pcbinfo
->ipi_lock
)) {
1419 /*lock inversion issue, mostly with udp multicast packets */
1420 socket_unlock(inp
->inp_socket
, 0);
1421 lck_rw_lock_exclusive(inp
->inp_pcbinfo
->ipi_lock
);
1422 socket_lock(inp
->inp_socket
, 0);
1424 if (inp
->inp_laddr
.s_addr
== INADDR_ANY
) {
1425 inp
->inp_laddr
= laddr
;
1426 /* no reference needed */
1427 inp
->inp_last_outifp
= outif
;
1429 inp
->inp_flags
|= INP_INADDR_ANY
;
1431 inp
->inp_faddr
= sin
->sin_addr
;
1432 inp
->inp_fport
= sin
->sin_port
;
1434 lck_rw_done(inp
->inp_pcbinfo
->ipi_lock
);
1436 if (inp
->inp_flowhash
== 0) {
1437 inp
->inp_flowhash
= inp_calc_flowhash(inp
);
1440 tcp_set_max_rwinscale(tp
, so
, outif
);
1443 tcpstat
.tcps_connattempt
++;
1444 tp
->t_state
= TCPS_SYN_SENT
;
1445 tp
->t_timer
[TCPT_KEEP
] = OFFSET_FROM_START(tp
, TCP_CONN_KEEPINIT(tp
));
1446 tp
->iss
= tcp_new_isn(tp
);
1447 tcp_sendseqinit(tp
);
1448 if (nstat_collect
) {
1449 nstat_route_connect_attempt(inp
->inp_route
.ro_rt
);
1453 if (outif
!= NULL
) {
1454 ifnet_release(outif
);
1462 tcp6_connect(struct tcpcb
*tp
, struct sockaddr
*nam
, struct proc
*p
)
1464 struct inpcb
*inp
= tp
->t_inpcb
, *oinp
;
1465 struct socket
*so
= inp
->inp_socket
;
1467 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)(void *)nam
;
1468 struct in6_addr addr6
;
1470 struct ifnet
*outif
= NULL
;
1472 if (inp
->inp_lport
== 0) {
1473 error
= in6_pcbbind(inp
, NULL
, p
);
1480 * Cannot simply call in_pcbconnect, because there might be an
1481 * earlier incarnation of this same connection still in
1482 * TIME_WAIT state, creating an ADDRINUSE error.
1484 * in6_pcbladdr() might return an ifp with its reference held
1485 * even in the error case, so make sure that it's released
1486 * whenever it's non-NULL.
1488 error
= in6_pcbladdr(inp
, nam
, &addr6
, &outif
);
1492 socket_unlock(inp
->inp_socket
, 0);
1493 oinp
= in6_pcblookup_hash(inp
->inp_pcbinfo
,
1494 &sin6
->sin6_addr
, sin6
->sin6_port
,
1495 IN6_IS_ADDR_UNSPECIFIED(&inp
->in6p_laddr
)
1498 inp
->inp_lport
, 0, NULL
);
1499 socket_lock(inp
->inp_socket
, 0);
1501 if (oinp
!= inp
&& (otp
= intotcpcb(oinp
)) != NULL
&&
1502 otp
->t_state
== TCPS_TIME_WAIT
&&
1503 ((int)(tcp_now
- otp
->t_starttime
)) < tcp_msl
&&
1504 (otp
->t_flags
& TF_RCVD_CC
)) {
1505 otp
= tcp_close(otp
);
1511 if (!lck_rw_try_lock_exclusive(inp
->inp_pcbinfo
->ipi_lock
)) {
1512 /*lock inversion issue, mostly with udp multicast packets */
1513 socket_unlock(inp
->inp_socket
, 0);
1514 lck_rw_lock_exclusive(inp
->inp_pcbinfo
->ipi_lock
);
1515 socket_lock(inp
->inp_socket
, 0);
1517 if (IN6_IS_ADDR_UNSPECIFIED(&inp
->in6p_laddr
)) {
1518 inp
->in6p_laddr
= addr6
;
1519 inp
->in6p_last_outifp
= outif
; /* no reference needed */
1520 inp
->in6p_flags
|= INP_IN6ADDR_ANY
;
1522 inp
->in6p_faddr
= sin6
->sin6_addr
;
1523 inp
->inp_fport
= sin6
->sin6_port
;
1524 if ((sin6
->sin6_flowinfo
& IPV6_FLOWINFO_MASK
) != 0) {
1525 inp
->inp_flow
= sin6
->sin6_flowinfo
;
1528 lck_rw_done(inp
->inp_pcbinfo
->ipi_lock
);
1530 if (inp
->inp_flowhash
== 0) {
1531 inp
->inp_flowhash
= inp_calc_flowhash(inp
);
1533 /* update flowinfo - RFC 6437 */
1534 if (inp
->inp_flow
== 0 && inp
->in6p_flags
& IN6P_AUTOFLOWLABEL
) {
1535 inp
->inp_flow
&= ~IPV6_FLOWLABEL_MASK
;
1537 (htonl(inp
->inp_flowhash
) & IPV6_FLOWLABEL_MASK
);
1540 tcp_set_max_rwinscale(tp
, so
, outif
);
1543 tcpstat
.tcps_connattempt
++;
1544 tp
->t_state
= TCPS_SYN_SENT
;
1545 tp
->t_timer
[TCPT_KEEP
] = OFFSET_FROM_START(tp
,
1546 TCP_CONN_KEEPINIT(tp
));
1547 tp
->iss
= tcp_new_isn(tp
);
1548 tcp_sendseqinit(tp
);
1549 if (nstat_collect
) {
1550 nstat_route_connect_attempt(inp
->inp_route
.ro_rt
);
1554 if (outif
!= NULL
) {
1555 ifnet_release(outif
);
1563 * Export TCP internal state information via a struct tcp_info
1566 tcp_fill_info(struct tcpcb
*tp
, struct tcp_info
*ti
)
1568 struct inpcb
*inp
= tp
->t_inpcb
;
1570 bzero(ti
, sizeof(*ti
));
1572 ti
->tcpi_state
= tp
->t_state
;
1573 ti
->tcpi_flowhash
= inp
->inp_flowhash
;
1575 if (tp
->t_state
> TCPS_LISTEN
) {
1576 if (TSTMP_SUPPORTED(tp
)) {
1577 ti
->tcpi_options
|= TCPI_OPT_TIMESTAMPS
;
1579 if (SACK_ENABLED(tp
)) {
1580 ti
->tcpi_options
|= TCPI_OPT_SACK
;
1582 if (TCP_WINDOW_SCALE_ENABLED(tp
)) {
1583 ti
->tcpi_options
|= TCPI_OPT_WSCALE
;
1584 ti
->tcpi_snd_wscale
= tp
->snd_scale
;
1585 ti
->tcpi_rcv_wscale
= tp
->rcv_scale
;
1587 if (TCP_ECN_ENABLED(tp
)) {
1588 ti
->tcpi_options
|= TCPI_OPT_ECN
;
1591 /* Are we in retranmission episode */
1592 if (IN_FASTRECOVERY(tp
) || tp
->t_rxtshift
> 0) {
1593 ti
->tcpi_flags
|= TCPI_FLAG_LOSSRECOVERY
;
1596 if (tp
->t_flags
& TF_STREAMING_ON
) {
1597 ti
->tcpi_flags
|= TCPI_FLAG_STREAMING_ON
;
1600 ti
->tcpi_rto
= tp
->t_timer
[TCPT_REXMT
] ? tp
->t_rxtcur
: 0;
1601 ti
->tcpi_snd_mss
= tp
->t_maxseg
;
1602 ti
->tcpi_rcv_mss
= tp
->t_maxseg
;
1604 ti
->tcpi_rttcur
= tp
->t_rttcur
;
1605 ti
->tcpi_srtt
= tp
->t_srtt
>> TCP_RTT_SHIFT
;
1606 ti
->tcpi_rttvar
= tp
->t_rttvar
>> TCP_RTTVAR_SHIFT
;
1607 ti
->tcpi_rttbest
= tp
->t_rttbest
>> TCP_RTT_SHIFT
;
1609 ti
->tcpi_snd_ssthresh
= tp
->snd_ssthresh
;
1610 ti
->tcpi_snd_cwnd
= tp
->snd_cwnd
;
1611 ti
->tcpi_snd_sbbytes
= inp
->inp_socket
->so_snd
.sb_cc
;
1613 ti
->tcpi_rcv_space
= tp
->rcv_wnd
;
1615 ti
->tcpi_snd_wnd
= tp
->snd_wnd
;
1616 ti
->tcpi_snd_nxt
= tp
->snd_nxt
;
1617 ti
->tcpi_rcv_nxt
= tp
->rcv_nxt
;
1619 /* convert bytes/msec to bits/sec */
1620 if ((tp
->t_flagsext
& TF_MEASURESNDBW
) != 0 &&
1621 tp
->t_bwmeas
!= NULL
) {
1622 ti
->tcpi_snd_bw
= (tp
->t_bwmeas
->bw_sndbw
* 8000);
1625 ti
->tcpi_last_outif
= (tp
->t_inpcb
->inp_last_outifp
== NULL
) ? 0 :
1626 tp
->t_inpcb
->inp_last_outifp
->if_index
;
1628 //atomic_get_64(ti->tcpi_txbytes, &inp->inp_stat->txbytes);
1629 ti
->tcpi_txpackets
= inp
->inp_stat
->txpackets
;
1630 ti
->tcpi_txbytes
= inp
->inp_stat
->txbytes
;
1631 ti
->tcpi_txretransmitbytes
= tp
->t_stat
.txretransmitbytes
;
1632 ti
->tcpi_txretransmitpackets
= tp
->t_stat
.rxmitpkts
;
1633 ti
->tcpi_txunacked
= tp
->snd_max
- tp
->snd_una
;
1635 //atomic_get_64(ti->tcpi_rxbytes, &inp->inp_stat->rxbytes);
1636 ti
->tcpi_rxpackets
= inp
->inp_stat
->rxpackets
;
1637 ti
->tcpi_rxbytes
= inp
->inp_stat
->rxbytes
;
1638 ti
->tcpi_rxduplicatebytes
= tp
->t_stat
.rxduplicatebytes
;
1639 ti
->tcpi_rxoutoforderbytes
= tp
->t_stat
.rxoutoforderbytes
;
1641 if (tp
->t_state
> TCPS_LISTEN
) {
1642 ti
->tcpi_synrexmits
= tp
->t_stat
.synrxtshift
;
1644 ti
->tcpi_cell_rxpackets
= inp
->inp_cstat
->rxpackets
;
1645 ti
->tcpi_cell_rxbytes
= inp
->inp_cstat
->rxbytes
;
1646 ti
->tcpi_cell_txpackets
= inp
->inp_cstat
->txpackets
;
1647 ti
->tcpi_cell_txbytes
= inp
->inp_cstat
->txbytes
;
1649 ti
->tcpi_wifi_rxpackets
= inp
->inp_wstat
->rxpackets
;
1650 ti
->tcpi_wifi_rxbytes
= inp
->inp_wstat
->rxbytes
;
1651 ti
->tcpi_wifi_txpackets
= inp
->inp_wstat
->txpackets
;
1652 ti
->tcpi_wifi_txbytes
= inp
->inp_wstat
->txbytes
;
1654 ti
->tcpi_wired_rxpackets
= inp
->inp_Wstat
->rxpackets
;
1655 ti
->tcpi_wired_rxbytes
= inp
->inp_Wstat
->rxbytes
;
1656 ti
->tcpi_wired_txpackets
= inp
->inp_Wstat
->txpackets
;
1657 ti
->tcpi_wired_txbytes
= inp
->inp_Wstat
->txbytes
;
1658 tcp_get_connectivity_status(tp
, &ti
->tcpi_connstatus
);
1660 ti
->tcpi_tfo_syn_data_rcv
= !!(tp
->t_tfo_stats
& TFO_S_SYNDATA_RCV
);
1661 ti
->tcpi_tfo_cookie_req_rcv
= !!(tp
->t_tfo_stats
& TFO_S_COOKIEREQ_RECV
);
1662 ti
->tcpi_tfo_cookie_sent
= !!(tp
->t_tfo_stats
& TFO_S_COOKIE_SENT
);
1663 ti
->tcpi_tfo_cookie_invalid
= !!(tp
->t_tfo_stats
& TFO_S_COOKIE_INVALID
);
1665 ti
->tcpi_tfo_cookie_req
= !!(tp
->t_tfo_stats
& TFO_S_COOKIE_REQ
);
1666 ti
->tcpi_tfo_cookie_rcv
= !!(tp
->t_tfo_stats
& TFO_S_COOKIE_RCV
);
1667 ti
->tcpi_tfo_syn_data_sent
= !!(tp
->t_tfo_stats
& TFO_S_SYN_DATA_SENT
);
1668 ti
->tcpi_tfo_syn_data_acked
= !!(tp
->t_tfo_stats
& TFO_S_SYN_DATA_ACKED
);
1669 ti
->tcpi_tfo_syn_loss
= !!(tp
->t_tfo_stats
& TFO_S_SYN_LOSS
);
1670 ti
->tcpi_tfo_cookie_wrong
= !!(tp
->t_tfo_stats
& TFO_S_COOKIE_WRONG
);
1671 ti
->tcpi_tfo_no_cookie_rcv
= !!(tp
->t_tfo_stats
& TFO_S_NO_COOKIE_RCV
);
1672 ti
->tcpi_tfo_heuristics_disable
= !!(tp
->t_tfo_stats
& TFO_S_HEURISTICS_DISABLE
);
1673 ti
->tcpi_tfo_send_blackhole
= !!(tp
->t_tfo_stats
& TFO_S_SEND_BLACKHOLE
);
1674 ti
->tcpi_tfo_recv_blackhole
= !!(tp
->t_tfo_stats
& TFO_S_RECV_BLACKHOLE
);
1675 ti
->tcpi_tfo_onebyte_proxy
= !!(tp
->t_tfo_stats
& TFO_S_ONE_BYTE_PROXY
);
1677 ti
->tcpi_ecn_client_setup
= !!(tp
->ecn_flags
& TE_SETUPSENT
);
1678 ti
->tcpi_ecn_server_setup
= !!(tp
->ecn_flags
& TE_SETUPRECEIVED
);
1679 ti
->tcpi_ecn_success
= (tp
->ecn_flags
& TE_ECN_ON
) == TE_ECN_ON
? 1 : 0;
1680 ti
->tcpi_ecn_lost_syn
= !!(tp
->ecn_flags
& TE_LOST_SYN
);
1681 ti
->tcpi_ecn_lost_synack
= !!(tp
->ecn_flags
& TE_LOST_SYNACK
);
1683 ti
->tcpi_local_peer
= !!(tp
->t_flags
& TF_LOCAL
);
1685 if (tp
->t_inpcb
->inp_last_outifp
!= NULL
) {
1686 if (IFNET_IS_CELLULAR(tp
->t_inpcb
->inp_last_outifp
)) {
1687 ti
->tcpi_if_cell
= 1;
1689 if (IFNET_IS_WIFI(tp
->t_inpcb
->inp_last_outifp
)) {
1690 ti
->tcpi_if_wifi
= 1;
1692 if (IFNET_IS_WIRED(tp
->t_inpcb
->inp_last_outifp
)) {
1693 ti
->tcpi_if_wired
= 1;
1695 if (IFNET_IS_WIFI_INFRA(tp
->t_inpcb
->inp_last_outifp
)) {
1696 ti
->tcpi_if_wifi_infra
= 1;
1698 if (tp
->t_inpcb
->inp_last_outifp
->if_eflags
& IFEF_AWDL
) {
1699 ti
->tcpi_if_wifi_awdl
= 1;
1702 if (tp
->tcp_cc_index
== TCP_CC_ALGO_BACKGROUND_INDEX
) {
1703 ti
->tcpi_snd_background
= 1;
1705 if (tcp_recv_bg
== 1 ||
1706 IS_TCP_RECV_BG(tp
->t_inpcb
->inp_socket
)) {
1707 ti
->tcpi_rcv_background
= 1;
1710 ti
->tcpi_ecn_recv_ce
= tp
->t_ecn_recv_ce
;
1711 ti
->tcpi_ecn_recv_cwr
= tp
->t_ecn_recv_cwr
;
1713 ti
->tcpi_rcvoopack
= tp
->t_rcvoopack
;
1714 ti
->tcpi_pawsdrop
= tp
->t_pawsdrop
;
1715 ti
->tcpi_sack_recovery_episode
= tp
->t_sack_recovery_episode
;
1716 ti
->tcpi_reordered_pkts
= tp
->t_reordered_pkts
;
1717 ti
->tcpi_dsack_sent
= tp
->t_dsack_sent
;
1718 ti
->tcpi_dsack_recvd
= tp
->t_dsack_recvd
;
1722 __private_extern__ errno_t
1723 tcp_fill_info_for_info_tuple(struct info_tuple
*itpl
, struct tcp_info
*ti
)
1725 struct inpcbinfo
*pcbinfo
= NULL
;
1726 struct inpcb
*inp
= NULL
;
1730 if (itpl
->itpl_proto
== IPPROTO_TCP
) {
1736 if (itpl
->itpl_local_sa
.sa_family
== AF_INET
&&
1737 itpl
->itpl_remote_sa
.sa_family
== AF_INET
) {
1738 inp
= in_pcblookup_hash(pcbinfo
,
1739 itpl
->itpl_remote_sin
.sin_addr
,
1740 itpl
->itpl_remote_sin
.sin_port
,
1741 itpl
->itpl_local_sin
.sin_addr
,
1742 itpl
->itpl_local_sin
.sin_port
,
1744 } else if (itpl
->itpl_local_sa
.sa_family
== AF_INET6
&&
1745 itpl
->itpl_remote_sa
.sa_family
== AF_INET6
) {
1746 struct in6_addr ina6_local
;
1747 struct in6_addr ina6_remote
;
1749 ina6_local
= itpl
->itpl_local_sin6
.sin6_addr
;
1750 if (IN6_IS_SCOPE_LINKLOCAL(&ina6_local
) &&
1751 itpl
->itpl_local_sin6
.sin6_scope_id
) {
1752 ina6_local
.s6_addr16
[1] = htons(itpl
->itpl_local_sin6
.sin6_scope_id
);
1755 ina6_remote
= itpl
->itpl_remote_sin6
.sin6_addr
;
1756 if (IN6_IS_SCOPE_LINKLOCAL(&ina6_remote
) &&
1757 itpl
->itpl_remote_sin6
.sin6_scope_id
) {
1758 ina6_remote
.s6_addr16
[1] = htons(itpl
->itpl_remote_sin6
.sin6_scope_id
);
1761 inp
= in6_pcblookup_hash(pcbinfo
,
1763 itpl
->itpl_remote_sin6
.sin6_port
,
1765 itpl
->itpl_local_sin6
.sin6_port
,
1770 if (inp
== NULL
|| (so
= inp
->inp_socket
) == NULL
) {
1775 if (in_pcb_checkstate(inp
, WNT_RELEASE
, 1) == WNT_STOPUSING
) {
1776 socket_unlock(so
, 0);
1779 tp
= intotcpcb(inp
);
1781 tcp_fill_info(tp
, ti
);
1782 socket_unlock(so
, 0);
1788 tcp_connection_fill_info(struct tcpcb
*tp
, struct tcp_connection_info
*tci
)
1790 struct inpcb
*inp
= tp
->t_inpcb
;
1792 bzero(tci
, sizeof(*tci
));
1793 tci
->tcpi_state
= tp
->t_state
;
1794 if (tp
->t_state
> TCPS_LISTEN
) {
1795 if (TSTMP_SUPPORTED(tp
)) {
1796 tci
->tcpi_options
|= TCPCI_OPT_TIMESTAMPS
;
1798 if (SACK_ENABLED(tp
)) {
1799 tci
->tcpi_options
|= TCPCI_OPT_SACK
;
1801 if (TCP_WINDOW_SCALE_ENABLED(tp
)) {
1802 tci
->tcpi_options
|= TCPCI_OPT_WSCALE
;
1803 tci
->tcpi_snd_wscale
= tp
->snd_scale
;
1804 tci
->tcpi_rcv_wscale
= tp
->rcv_scale
;
1806 if (TCP_ECN_ENABLED(tp
)) {
1807 tci
->tcpi_options
|= TCPCI_OPT_ECN
;
1809 if (IN_FASTRECOVERY(tp
) || tp
->t_rxtshift
> 0) {
1810 tci
->tcpi_flags
|= TCPCI_FLAG_LOSSRECOVERY
;
1812 if (tp
->t_flagsext
& TF_PKTS_REORDERED
) {
1813 tci
->tcpi_flags
|= TCPCI_FLAG_REORDERING_DETECTED
;
1815 tci
->tcpi_rto
= (tp
->t_timer
[TCPT_REXMT
] > 0) ?
1817 tci
->tcpi_maxseg
= tp
->t_maxseg
;
1818 tci
->tcpi_snd_ssthresh
= tp
->snd_ssthresh
;
1819 tci
->tcpi_snd_cwnd
= tp
->snd_cwnd
;
1820 tci
->tcpi_snd_wnd
= tp
->snd_wnd
;
1821 tci
->tcpi_snd_sbbytes
= inp
->inp_socket
->so_snd
.sb_cc
;
1822 tci
->tcpi_rcv_wnd
= tp
->rcv_wnd
;
1823 tci
->tcpi_rttcur
= tp
->t_rttcur
;
1824 tci
->tcpi_srtt
= (tp
->t_srtt
>> TCP_RTT_SHIFT
);
1825 tci
->tcpi_rttvar
= (tp
->t_rttvar
>> TCP_RTTVAR_SHIFT
);
1826 tci
->tcpi_txpackets
= inp
->inp_stat
->txpackets
;
1827 tci
->tcpi_txbytes
= inp
->inp_stat
->txbytes
;
1828 tci
->tcpi_txretransmitbytes
= tp
->t_stat
.txretransmitbytes
;
1829 tci
->tcpi_txretransmitpackets
= tp
->t_stat
.rxmitpkts
;
1830 tci
->tcpi_rxpackets
= inp
->inp_stat
->rxpackets
;
1831 tci
->tcpi_rxbytes
= inp
->inp_stat
->rxbytes
;
1832 tci
->tcpi_rxoutoforderbytes
= tp
->t_stat
.rxoutoforderbytes
;
1834 tci
->tcpi_tfo_syn_data_rcv
= !!(tp
->t_tfo_stats
& TFO_S_SYNDATA_RCV
);
1835 tci
->tcpi_tfo_cookie_req_rcv
= !!(tp
->t_tfo_stats
& TFO_S_COOKIEREQ_RECV
);
1836 tci
->tcpi_tfo_cookie_sent
= !!(tp
->t_tfo_stats
& TFO_S_COOKIE_SENT
);
1837 tci
->tcpi_tfo_cookie_invalid
= !!(tp
->t_tfo_stats
& TFO_S_COOKIE_INVALID
);
1838 tci
->tcpi_tfo_cookie_req
= !!(tp
->t_tfo_stats
& TFO_S_COOKIE_REQ
);
1839 tci
->tcpi_tfo_cookie_rcv
= !!(tp
->t_tfo_stats
& TFO_S_COOKIE_RCV
);
1840 tci
->tcpi_tfo_syn_data_sent
= !!(tp
->t_tfo_stats
& TFO_S_SYN_DATA_SENT
);
1841 tci
->tcpi_tfo_syn_data_acked
= !!(tp
->t_tfo_stats
& TFO_S_SYN_DATA_ACKED
);
1842 tci
->tcpi_tfo_syn_loss
= !!(tp
->t_tfo_stats
& TFO_S_SYN_LOSS
);
1843 tci
->tcpi_tfo_cookie_wrong
= !!(tp
->t_tfo_stats
& TFO_S_COOKIE_WRONG
);
1844 tci
->tcpi_tfo_no_cookie_rcv
= !!(tp
->t_tfo_stats
& TFO_S_NO_COOKIE_RCV
);
1845 tci
->tcpi_tfo_heuristics_disable
= !!(tp
->t_tfo_stats
& TFO_S_HEURISTICS_DISABLE
);
1846 tci
->tcpi_tfo_send_blackhole
= !!(tp
->t_tfo_stats
& TFO_S_SEND_BLACKHOLE
);
1847 tci
->tcpi_tfo_recv_blackhole
= !!(tp
->t_tfo_stats
& TFO_S_RECV_BLACKHOLE
);
1848 tci
->tcpi_tfo_onebyte_proxy
= !!(tp
->t_tfo_stats
& TFO_S_ONE_BYTE_PROXY
);
1853 __private_extern__
int
1854 tcp_sysctl_info(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1857 struct tcp_info ti
= {};
1858 struct info_tuple itpl
;
1859 #if !CONFIG_EMBEDDED
1860 proc_t caller
= PROC_NULL
;
1861 proc_t caller_parent
= PROC_NULL
;
1862 char command_name
[MAXCOMLEN
+ 1] = "";
1863 char parent_name
[MAXCOMLEN
+ 1] = "";
1865 if ((caller
= proc_self()) != PROC_NULL
) {
1866 /* get process name */
1867 strlcpy(command_name
, caller
->p_comm
, sizeof(command_name
));
1869 /* get parent process name if possible */
1870 if ((caller_parent
= proc_find(caller
->p_ppid
)) != PROC_NULL
) {
1871 strlcpy(parent_name
, caller_parent
->p_comm
,
1872 sizeof(parent_name
));
1873 proc_rele(caller_parent
);
1876 if ((escape_str(command_name
, strlen(command_name
) + 1,
1877 sizeof(command_name
)) == 0) &&
1878 (escape_str(parent_name
, strlen(parent_name
) + 1,
1879 sizeof(parent_name
)) == 0)) {
1880 kern_asl_msg(LOG_DEBUG
, "messagetracer",
1882 "com.apple.message.domain",
1883 "com.apple.kernel.tcpstat", /* 1 */
1884 "com.apple.message.signature",
1886 "com.apple.message.signature2", command_name
, /* 3 */
1887 "com.apple.message.signature3", parent_name
, /* 4 */
1888 "com.apple.message.summarize", "YES", /* 5 */
1893 if (caller
!= PROC_NULL
) {
1896 #endif /* !CONFIG_EMBEDDED */
1898 if (req
->newptr
== USER_ADDR_NULL
) {
1901 if (req
->newlen
< sizeof(struct info_tuple
)) {
1904 error
= SYSCTL_IN(req
, &itpl
, sizeof(struct info_tuple
));
1908 error
= tcp_fill_info_for_info_tuple(&itpl
, &ti
);
1912 error
= SYSCTL_OUT(req
, &ti
, sizeof(struct tcp_info
));
1921 tcp_lookup_peer_pid_locked(struct socket
*so
, pid_t
*out_pid
)
1923 int error
= EHOSTUNREACH
;
1925 if ((so
->so_state
& SS_ISCONNECTED
) == 0) {
1929 struct inpcb
*inp
= (struct inpcb
*)so
->so_pcb
;
1930 uint16_t lport
= inp
->inp_lport
;
1931 uint16_t fport
= inp
->inp_fport
;
1932 struct inpcb
*finp
= NULL
;
1933 struct in6_addr laddr6
, faddr6
;
1934 struct in_addr laddr4
, faddr4
;
1936 if (inp
->inp_vflag
& INP_IPV6
) {
1937 laddr6
= inp
->in6p_laddr
;
1938 faddr6
= inp
->in6p_faddr
;
1939 } else if (inp
->inp_vflag
& INP_IPV4
) {
1940 laddr4
= inp
->inp_laddr
;
1941 faddr4
= inp
->inp_faddr
;
1944 socket_unlock(so
, 0);
1945 if (inp
->inp_vflag
& INP_IPV6
) {
1946 finp
= in6_pcblookup_hash(&tcbinfo
, &laddr6
, lport
, &faddr6
, fport
, 0, NULL
);
1947 } else if (inp
->inp_vflag
& INP_IPV4
) {
1948 finp
= in_pcblookup_hash(&tcbinfo
, laddr4
, lport
, faddr4
, fport
, 0, NULL
);
1952 *out_pid
= finp
->inp_socket
->last_pid
;
1954 in_pcb_checkstate(finp
, WNT_RELEASE
, 0);
1962 tcp_getconninfo(struct socket
*so
, struct conninfo_tcp
*tcp_ci
)
1964 (void) tcp_lookup_peer_pid_locked(so
, &tcp_ci
->tcpci_peer_pid
);
1965 tcp_fill_info(sototcpcb(so
), &tcp_ci
->tcpci_tcp_info
);
1969 * The new sockopt interface makes it possible for us to block in the
1970 * copyin/out step (if we take a page fault). Taking a page fault at
1971 * splnet() is probably a Bad Thing. (Since sockets and pcbs both now
1972 * use TSM, there probably isn't any need for this function to run at
1973 * splnet() any more. This needs more examination.)
1976 tcp_ctloutput(struct socket
*so
, struct sockopt
*sopt
)
1978 int error
= 0, opt
= 0, optval
= 0;
1982 inp
= sotoinpcb(so
);
1986 /* Allow <SOL_SOCKET,SO_FLUSH/SO_TRAFFIC_MGT_BACKGROUND> at this level */
1987 if (sopt
->sopt_level
!= IPPROTO_TCP
&&
1988 !(sopt
->sopt_level
== SOL_SOCKET
&& (sopt
->sopt_name
== SO_FLUSH
||
1989 sopt
->sopt_name
== SO_TRAFFIC_MGT_BACKGROUND
))) {
1991 if (SOCK_CHECK_DOM(so
, PF_INET6
)) {
1992 error
= ip6_ctloutput(so
, sopt
);
1995 error
= ip_ctloutput(so
, sopt
);
1998 tp
= intotcpcb(inp
);
2003 calculate_tcp_clock();
2005 switch (sopt
->sopt_dir
) {
2007 switch (sopt
->sopt_name
) {
2011 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2017 switch (sopt
->sopt_name
) {
2028 opt
= 0; /* dead code to fool gcc */
2035 tp
->t_flags
&= ~opt
;
2038 case TCP_RXT_FINDROP
:
2039 case TCP_NOTIMEWAIT
:
2040 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2045 switch (sopt
->sopt_name
) {
2046 case TCP_RXT_FINDROP
:
2047 opt
= TF_RXTFINDROP
;
2049 case TCP_NOTIMEWAIT
:
2050 opt
= TF_NOTIMEWAIT
;
2057 tp
->t_flagsext
|= opt
;
2059 tp
->t_flagsext
&= ~opt
;
2062 case TCP_MEASURE_SND_BW
:
2063 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2068 opt
= TF_MEASURESNDBW
;
2070 if (tp
->t_bwmeas
== NULL
) {
2071 tp
->t_bwmeas
= tcp_bwmeas_alloc(tp
);
2072 if (tp
->t_bwmeas
== NULL
) {
2077 tp
->t_flagsext
|= opt
;
2079 tp
->t_flagsext
&= ~opt
;
2080 /* Reset snd bw measurement state */
2081 tp
->t_flagsext
&= ~(TF_BWMEAS_INPROGRESS
);
2082 if (tp
->t_bwmeas
!= NULL
) {
2083 tcp_bwmeas_free(tp
);
2087 case TCP_MEASURE_BW_BURST
: {
2088 struct tcp_measure_bw_burst in
;
2089 uint32_t minpkts
, maxpkts
;
2090 bzero(&in
, sizeof(in
));
2092 error
= sooptcopyin(sopt
, &in
, sizeof(in
),
2097 if ((tp
->t_flagsext
& TF_MEASURESNDBW
) == 0 ||
2098 tp
->t_bwmeas
== NULL
) {
2102 minpkts
= (in
.min_burst_size
!= 0) ? in
.min_burst_size
:
2103 tp
->t_bwmeas
->bw_minsizepkts
;
2104 maxpkts
= (in
.max_burst_size
!= 0) ? in
.max_burst_size
:
2105 tp
->t_bwmeas
->bw_maxsizepkts
;
2106 if (minpkts
> maxpkts
) {
2110 tp
->t_bwmeas
->bw_minsizepkts
= minpkts
;
2111 tp
->t_bwmeas
->bw_maxsizepkts
= maxpkts
;
2112 tp
->t_bwmeas
->bw_minsize
= (minpkts
* tp
->t_maxseg
);
2113 tp
->t_bwmeas
->bw_maxsize
= (maxpkts
* tp
->t_maxseg
);
2117 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2123 if (optval
> 0 && optval
<= tp
->t_maxseg
&&
2124 optval
+ 40 >= tcp_minmss
) {
2125 tp
->t_maxseg
= optval
;
2132 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2137 if (optval
< 0 || optval
> UINT32_MAX
/ TCP_RETRANSHZ
) {
2140 tp
->t_keepidle
= optval
* TCP_RETRANSHZ
;
2141 /* reset the timer to new value */
2142 tp
->t_timer
[TCPT_KEEP
] = OFFSET_FROM_START(tp
,
2143 TCP_CONN_KEEPIDLE(tp
));
2144 tcp_check_timer_state(tp
);
2148 case TCP_CONNECTIONTIMEOUT
:
2149 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2154 if (optval
< 0 || optval
> UINT32_MAX
/ TCP_RETRANSHZ
) {
2157 tp
->t_keepinit
= optval
* TCP_RETRANSHZ
;
2158 if (tp
->t_state
== TCPS_SYN_RECEIVED
||
2159 tp
->t_state
== TCPS_SYN_SENT
) {
2160 tp
->t_timer
[TCPT_KEEP
] = OFFSET_FROM_START(tp
,
2161 TCP_CONN_KEEPINIT(tp
));
2162 tcp_check_timer_state(tp
);
2168 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2173 if (optval
< 0 || optval
> UINT32_MAX
/ TCP_RETRANSHZ
) {
2176 tp
->t_keepintvl
= optval
* TCP_RETRANSHZ
;
2177 if (tp
->t_state
== TCPS_FIN_WAIT_2
&&
2178 TCP_CONN_MAXIDLE(tp
) > 0) {
2179 tp
->t_timer
[TCPT_2MSL
] = OFFSET_FROM_START(tp
,
2180 TCP_CONN_MAXIDLE(tp
));
2181 tcp_check_timer_state(tp
);
2187 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2192 if (optval
< 0 || optval
> INT32_MAX
) {
2195 tp
->t_keepcnt
= optval
;
2196 if (tp
->t_state
== TCPS_FIN_WAIT_2
&&
2197 TCP_CONN_MAXIDLE(tp
) > 0) {
2198 tp
->t_timer
[TCPT_2MSL
] = OFFSET_FROM_START(tp
,
2199 TCP_CONN_MAXIDLE(tp
));
2200 tcp_check_timer_state(tp
);
2205 case TCP_KEEPALIVE_OFFLOAD
:
2206 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2211 if (optval
< 0 || optval
> INT32_MAX
) {
2216 inp
->inp_flags2
|= INP2_KEEPALIVE_OFFLOAD
;
2218 inp
->inp_flags2
&= ~INP2_KEEPALIVE_OFFLOAD
;
2222 case PERSIST_TIMEOUT
:
2223 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2231 tp
->t_persist_timeout
= optval
* TCP_RETRANSHZ
;
2234 case TCP_RXT_CONNDROPTIME
:
2235 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2243 tp
->t_rxt_conndroptime
= optval
* TCP_RETRANSHZ
;
2246 case TCP_NOTSENT_LOWAT
:
2247 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2257 so
->so_flags
&= ~(SOF_NOTSENT_LOWAT
);
2258 tp
->t_notsent_lowat
= 0;
2260 so
->so_flags
|= SOF_NOTSENT_LOWAT
;
2261 tp
->t_notsent_lowat
= optval
;
2265 case TCP_ADAPTIVE_READ_TIMEOUT
:
2266 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2272 optval
> TCP_ADAPTIVE_TIMEOUT_MAX
) {
2275 } else if (optval
== 0) {
2276 tp
->t_adaptive_rtimo
= 0;
2277 tcp_keepalive_reset(tp
);
2280 mptcp_reset_keepalive(tp
);
2283 tp
->t_adaptive_rtimo
= optval
;
2286 case TCP_ADAPTIVE_WRITE_TIMEOUT
:
2287 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2293 optval
> TCP_ADAPTIVE_TIMEOUT_MAX
) {
2297 tp
->t_adaptive_wtimo
= optval
;
2300 case TCP_ENABLE_MSGS
:
2301 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2306 if (optval
< 0 || optval
> 1) {
2308 } else if (optval
== 1) {
2310 * Check if messages option is already
2311 * enabled, if so return.
2313 if (so
->so_flags
& SOF_ENABLE_MSGS
) {
2314 VERIFY(so
->so_msg_state
!= NULL
);
2319 * allocate memory for storing message
2322 VERIFY(so
->so_msg_state
== NULL
);
2323 MALLOC(so
->so_msg_state
,
2325 sizeof(struct msg_state
),
2326 M_TEMP
, M_WAITOK
| M_ZERO
);
2327 if (so
->so_msg_state
== NULL
) {
2332 /* Enable message delivery */
2333 so
->so_flags
|= SOF_ENABLE_MSGS
;
2336 * Can't disable message delivery on socket
2337 * because of restrictions imposed by
2343 case TCP_SENDMOREACKS
:
2344 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2349 if (optval
< 0 || optval
> 1) {
2351 } else if (optval
== 0) {
2352 tp
->t_flagsext
&= ~(TF_NOSTRETCHACK
);
2354 tp
->t_flagsext
|= TF_NOSTRETCHACK
;
2357 case TCP_DISABLE_BLACKHOLE_DETECTION
:
2358 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2363 if (optval
< 0 || optval
> 1) {
2365 } else if (optval
== 0) {
2366 tp
->t_flagsext
&= ~TF_NOBLACKHOLE_DETECTION
;
2368 tp
->t_flagsext
|= TF_NOBLACKHOLE_DETECTION
;
2369 if ((tp
->t_flags
& TF_BLACKHOLE
) &&
2370 tp
->t_pmtud_saved_maxopd
> 0) {
2371 tcp_pmtud_revert_segment_size(tp
);
2376 if (!(tcp_fastopen
& TCP_FASTOPEN_SERVER
)) {
2381 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2386 if (optval
< 0 || optval
> 1) {
2390 if (tp
->t_state
!= TCPS_LISTEN
) {
2395 tp
->t_flagsext
|= TF_FASTOPEN
;
2397 tcp_disable_tfo(tp
);
2400 case TCP_FASTOPEN_FORCE_HEURISTICS
:
2401 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2407 if (optval
< 0 || optval
> 1) {
2412 if (tp
->t_state
!= TCPS_CLOSED
) {
2417 tp
->t_flagsext
|= TF_FASTOPEN_HEUR
;
2419 tp
->t_flagsext
&= ~TF_FASTOPEN_HEUR
;
2423 case TCP_ENABLE_ECN
:
2424 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2430 tp
->ecn_flags
|= TE_ECN_MODE_ENABLE
;
2431 tp
->ecn_flags
&= ~TE_ECN_MODE_DISABLE
;
2433 tp
->ecn_flags
&= ~TE_ECN_MODE_ENABLE
;
2434 tp
->ecn_flags
|= TE_ECN_MODE_DISABLE
;
2438 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2443 if (optval
== ECN_MODE_DEFAULT
) {
2444 tp
->ecn_flags
&= ~TE_ECN_MODE_ENABLE
;
2445 tp
->ecn_flags
&= ~TE_ECN_MODE_DISABLE
;
2446 } else if (optval
== ECN_MODE_ENABLE
) {
2447 tp
->ecn_flags
|= TE_ECN_MODE_ENABLE
;
2448 tp
->ecn_flags
&= ~TE_ECN_MODE_DISABLE
;
2449 } else if (optval
== ECN_MODE_DISABLE
) {
2450 tp
->ecn_flags
&= ~TE_ECN_MODE_ENABLE
;
2451 tp
->ecn_flags
|= TE_ECN_MODE_DISABLE
;
2456 case TCP_NOTIFY_ACKNOWLEDGEMENT
:
2457 error
= sooptcopyin(sopt
, &optval
,
2458 sizeof(optval
), sizeof(optval
));
2466 if (tp
->t_notify_ack_count
>= TCP_MAX_NOTIFY_ACK
) {
2467 error
= ETOOMANYREFS
;
2472 * validate that the given marker id is not
2473 * a duplicate to avoid ambiguity
2475 if ((error
= tcp_notify_ack_id_valid(tp
, so
,
2479 error
= tcp_add_notify_ack_marker(tp
, optval
);
2482 if ((error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2483 sizeof(optval
))) != 0) {
2487 error
= inp_flush(inp
, optval
);
2490 case SO_TRAFFIC_MGT_BACKGROUND
:
2491 if ((error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2492 sizeof(optval
))) != 0) {
2497 socket_set_traffic_mgt_flags_locked(so
,
2498 TRAFFIC_MGT_SO_BACKGROUND
);
2500 socket_clear_traffic_mgt_flags_locked(so
,
2501 TRAFFIC_MGT_SO_BACKGROUND
);
2504 case TCP_RXT_MINIMUM_TIMEOUT
:
2505 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2515 tp
->t_rxt_minimum_timeout
= 0;
2517 tp
->t_rxt_minimum_timeout
= min(optval
,
2518 TCP_RXT_MINIMUM_TIMEOUT_LIMIT
);
2519 /* convert to milliseconds */
2520 tp
->t_rxt_minimum_timeout
*= TCP_RETRANSHZ
;
2524 error
= ENOPROTOOPT
;
2530 switch (sopt
->sopt_name
) {
2532 optval
= tp
->t_flags
& TF_NODELAY
;
2535 optval
= tp
->t_maxseg
;
2538 if (tp
->t_keepidle
> 0) {
2539 optval
= tp
->t_keepidle
/ TCP_RETRANSHZ
;
2541 optval
= tcp_keepidle
/ TCP_RETRANSHZ
;
2545 if (tp
->t_keepintvl
> 0) {
2546 optval
= tp
->t_keepintvl
/ TCP_RETRANSHZ
;
2548 optval
= tcp_keepintvl
/ TCP_RETRANSHZ
;
2552 if (tp
->t_keepcnt
> 0) {
2553 optval
= tp
->t_keepcnt
;
2555 optval
= tcp_keepcnt
;
2558 case TCP_KEEPALIVE_OFFLOAD
:
2559 optval
= !!(inp
->inp_flags2
& INP2_KEEPALIVE_OFFLOAD
);
2562 optval
= tp
->t_flags
& TF_NOOPT
;
2565 optval
= tp
->t_flags
& TF_NOPUSH
;
2567 case TCP_ENABLE_ECN
:
2568 optval
= (tp
->ecn_flags
& TE_ECN_MODE_ENABLE
) ? 1 : 0;
2571 if (tp
->ecn_flags
& TE_ECN_MODE_ENABLE
) {
2572 optval
= ECN_MODE_ENABLE
;
2573 } else if (tp
->ecn_flags
& TE_ECN_MODE_DISABLE
) {
2574 optval
= ECN_MODE_DISABLE
;
2576 optval
= ECN_MODE_DEFAULT
;
2579 case TCP_CONNECTIONTIMEOUT
:
2580 optval
= tp
->t_keepinit
/ TCP_RETRANSHZ
;
2582 case PERSIST_TIMEOUT
:
2583 optval
= tp
->t_persist_timeout
/ TCP_RETRANSHZ
;
2585 case TCP_RXT_CONNDROPTIME
:
2586 optval
= tp
->t_rxt_conndroptime
/ TCP_RETRANSHZ
;
2588 case TCP_RXT_FINDROP
:
2589 optval
= tp
->t_flagsext
& TF_RXTFINDROP
;
2591 case TCP_NOTIMEWAIT
:
2592 optval
= (tp
->t_flagsext
& TF_NOTIMEWAIT
) ? 1 : 0;
2595 if (tp
->t_state
!= TCPS_LISTEN
||
2596 !(tcp_fastopen
& TCP_FASTOPEN_SERVER
)) {
2600 optval
= tfo_enabled(tp
);
2602 case TCP_FASTOPEN_FORCE_HEURISTICS
:
2603 optval
= (tp
->t_flagsext
& TF_FASTOPEN_HEUR
) ? 1 : 0;
2605 case TCP_MEASURE_SND_BW
:
2606 optval
= tp
->t_flagsext
& TF_MEASURESNDBW
;
2611 tcp_fill_info(tp
, &ti
);
2612 error
= sooptcopyout(sopt
, &ti
, sizeof(struct tcp_info
));
2616 case TCP_CONNECTION_INFO
: {
2617 struct tcp_connection_info tci
;
2618 tcp_connection_fill_info(tp
, &tci
);
2619 error
= sooptcopyout(sopt
, &tci
,
2620 sizeof(struct tcp_connection_info
));
2623 case TCP_MEASURE_BW_BURST
: {
2624 struct tcp_measure_bw_burst out
= {};
2625 if ((tp
->t_flagsext
& TF_MEASURESNDBW
) == 0 ||
2626 tp
->t_bwmeas
== NULL
) {
2630 out
.min_burst_size
= tp
->t_bwmeas
->bw_minsizepkts
;
2631 out
.max_burst_size
= tp
->t_bwmeas
->bw_maxsizepkts
;
2632 error
= sooptcopyout(sopt
, &out
, sizeof(out
));
2635 case TCP_NOTSENT_LOWAT
:
2636 if ((so
->so_flags
& SOF_NOTSENT_LOWAT
) != 0) {
2637 optval
= tp
->t_notsent_lowat
;
2643 case TCP_ENABLE_MSGS
:
2644 if (so
->so_flags
& SOF_ENABLE_MSGS
) {
2650 case TCP_SENDMOREACKS
:
2651 if (tp
->t_flagsext
& TF_NOSTRETCHACK
) {
2657 case TCP_DISABLE_BLACKHOLE_DETECTION
:
2658 if (tp
->t_flagsext
& TF_NOBLACKHOLE_DETECTION
) {
2664 case TCP_PEER_PID
: {
2666 error
= tcp_lookup_peer_pid_locked(so
, &pid
);
2668 error
= sooptcopyout(sopt
, &pid
, sizeof(pid
));
2672 case TCP_ADAPTIVE_READ_TIMEOUT
:
2673 optval
= tp
->t_adaptive_rtimo
;
2675 case TCP_ADAPTIVE_WRITE_TIMEOUT
:
2676 optval
= tp
->t_adaptive_wtimo
;
2678 case SO_TRAFFIC_MGT_BACKGROUND
:
2679 optval
= (so
->so_flags1
&
2680 SOF1_TRAFFIC_MGT_SO_BACKGROUND
) ? 1 : 0;
2682 case TCP_NOTIFY_ACKNOWLEDGEMENT
: {
2683 struct tcp_notify_ack_complete retid
;
2685 if (sopt
->sopt_valsize
!= sizeof(retid
)) {
2689 bzero(&retid
, sizeof(retid
));
2690 tcp_get_notify_ack_count(tp
, &retid
);
2691 if (retid
.notify_complete_count
> 0) {
2692 tcp_get_notify_ack_ids(tp
, &retid
);
2695 error
= sooptcopyout(sopt
, &retid
, sizeof(retid
));
2698 case TCP_RXT_MINIMUM_TIMEOUT
:
2699 optval
= tp
->t_rxt_minimum_timeout
/ TCP_RETRANSHZ
;
2702 error
= ENOPROTOOPT
;
2706 error
= sooptcopyout(sopt
, &optval
, sizeof optval
);
2715 * tcp_sendspace and tcp_recvspace are the default send and receive window
2716 * sizes, respectively. These are obsolescent (this information should
2717 * be set by the route).
2719 u_int32_t tcp_sendspace
= 1448 * 256;
2720 u_int32_t tcp_recvspace
= 1448 * 384;
2722 /* During attach, the size of socket buffer allocated is limited to
2723 * sb_max in sbreserve. Disallow setting the tcp send and recv space
2724 * to be more than sb_max because that will cause tcp_attach to fail
2725 * (see radar 5713060)
2728 sysctl_tcp_sospace(struct sysctl_oid
*oidp
, __unused
void *arg1
,
2729 int arg2
, struct sysctl_req
*req
)
2731 #pragma unused(arg2)
2732 u_int32_t new_value
= 0, *space_p
= NULL
;
2733 int changed
= 0, error
= 0;
2734 u_quad_t sb_effective_max
= (sb_max
/ (MSIZE
+ MCLBYTES
)) * MCLBYTES
;
2736 switch (oidp
->oid_number
) {
2737 case TCPCTL_SENDSPACE
:
2738 space_p
= &tcp_sendspace
;
2740 case TCPCTL_RECVSPACE
:
2741 space_p
= &tcp_recvspace
;
2746 error
= sysctl_io_number(req
, *space_p
, sizeof(u_int32_t
),
2747 &new_value
, &changed
);
2749 if (new_value
> 0 && new_value
<= sb_effective_max
) {
2750 *space_p
= new_value
;
2751 SYSCTL_SKMEM_UPDATE_AT_OFFSET(arg2
, new_value
);
2760 SYSCTL_PROC(_net_inet_tcp
, TCPCTL_SENDSPACE
, sendspace
,
2761 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, &tcp_sendspace
,
2762 offsetof(skmem_sysctl
, tcp
.sendspace
), sysctl_tcp_sospace
,
2763 "IU", "Maximum outgoing TCP datagram size");
2764 SYSCTL_PROC(_net_inet_tcp
, TCPCTL_RECVSPACE
, recvspace
,
2765 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, &tcp_recvspace
,
2766 offsetof(skmem_sysctl
, tcp
.recvspace
), sysctl_tcp_sospace
,
2767 "IU", "Maximum incoming TCP datagram size");
2768 #else /* SYSCTL_SKMEM */
2769 SYSCTL_PROC(_net_inet_tcp
, TCPCTL_SENDSPACE
, sendspace
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2770 &tcp_sendspace
, 0, &sysctl_tcp_sospace
, "IU", "Maximum outgoing TCP datagram size");
2771 SYSCTL_PROC(_net_inet_tcp
, TCPCTL_RECVSPACE
, recvspace
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2772 &tcp_recvspace
, 0, &sysctl_tcp_sospace
, "IU", "Maximum incoming TCP datagram size");
2773 #endif /* SYSCTL_SKMEM */
2776 * Attach TCP protocol to socket, allocating
2777 * internet protocol control block, tcp control block,
2778 * bufer space, and entering LISTEN state if to accept connections.
2780 * Returns: 0 Success
2781 * in_pcballoc:ENOBUFS
2782 * in_pcballoc:ENOMEM
2783 * in_pcballoc:??? [IPSEC specific]
2787 tcp_attach(struct socket
*so
, struct proc
*p
)
2793 int isipv6
= SOCK_CHECK_DOM(so
, PF_INET6
) != 0;
2796 error
= in_pcballoc(so
, &tcbinfo
, p
);
2801 inp
= sotoinpcb(so
);
2803 if (so
->so_snd
.sb_hiwat
== 0 || so
->so_rcv
.sb_hiwat
== 0) {
2804 error
= soreserve(so
, tcp_sendspace
, tcp_recvspace
);
2810 if (so
->so_snd
.sb_preconn_hiwat
== 0) {
2811 soreserve_preconnect(so
, 2048);
2814 if ((so
->so_rcv
.sb_flags
& SB_USRSIZE
) == 0) {
2815 so
->so_rcv
.sb_flags
|= SB_AUTOSIZE
;
2817 if ((so
->so_snd
.sb_flags
& SB_USRSIZE
) == 0) {
2818 so
->so_snd
.sb_flags
|= SB_AUTOSIZE
;
2823 inp
->inp_vflag
|= INP_IPV6
;
2824 inp
->in6p_hops
= -1; /* use kernel default */
2827 inp
->inp_vflag
|= INP_IPV4
;
2828 tp
= tcp_newtcpcb(inp
);
2830 int nofd
= so
->so_state
& SS_NOFDREF
; /* XXX */
2832 so
->so_state
&= ~SS_NOFDREF
; /* don't free the socket yet */
2839 so
->so_state
|= nofd
;
2842 if (nstat_collect
) {
2843 nstat_tcp_new_pcb(inp
);
2845 tp
->t_state
= TCPS_CLOSED
;
2850 * Initiate (or continue) disconnect.
2851 * If embryonic state, just send reset (once).
2852 * If in ``let data drain'' option and linger null, just drop.
2853 * Otherwise (hard), mark socket disconnecting and drop
2854 * current input data; switch states based on user close, and
2855 * send segment to peer (with FIN).
2857 static struct tcpcb
*
2858 tcp_disconnect(struct tcpcb
*tp
)
2860 struct socket
*so
= tp
->t_inpcb
->inp_socket
;
2862 if (so
->so_rcv
.sb_cc
!= 0 || tp
->t_reassqlen
!= 0) {
2863 return tcp_drop(tp
, 0);
2866 if (tp
->t_state
< TCPS_ESTABLISHED
) {
2868 } else if ((so
->so_options
& SO_LINGER
) && so
->so_linger
== 0) {
2869 tp
= tcp_drop(tp
, 0);
2871 soisdisconnecting(so
);
2872 sbflush(&so
->so_rcv
);
2873 tp
= tcp_usrclosed(tp
);
2875 /* A reset has been sent but socket exists, do not send FIN */
2876 if ((so
->so_flags
& SOF_MP_SUBFLOW
) &&
2877 (tp
) && (tp
->t_mpflags
& TMPF_RESET
)) {
2882 (void) tcp_output(tp
);
2889 * User issued close, and wish to trail through shutdown states:
2890 * if never received SYN, just forget it. If got a SYN from peer,
2891 * but haven't sent FIN, then go to FIN_WAIT_1 state to send peer a FIN.
2892 * If already got a FIN from peer, then almost done; go to LAST_ACK
2893 * state. In all other cases, have already sent FIN to peer (e.g.
2894 * after PRU_SHUTDOWN), and just have to play tedious game waiting
2895 * for peer to send FIN or not respond to keep-alives, etc.
2896 * We can let the user exit from the close as soon as the FIN is acked.
2898 static struct tcpcb
*
2899 tcp_usrclosed(struct tcpcb
*tp
)
2901 switch (tp
->t_state
) {
2908 case TCPS_SYN_RECEIVED
:
2909 tp
->t_flags
|= TF_NEEDFIN
;
2912 case TCPS_ESTABLISHED
:
2913 DTRACE_TCP4(state__change
, void, NULL
,
2914 struct inpcb
*, tp
->t_inpcb
,
2916 int32_t, TCPS_FIN_WAIT_1
);
2917 tp
->t_state
= TCPS_FIN_WAIT_1
;
2920 case TCPS_CLOSE_WAIT
:
2921 DTRACE_TCP4(state__change
, void, NULL
,
2922 struct inpcb
*, tp
->t_inpcb
,
2924 int32_t, TCPS_LAST_ACK
);
2925 tp
->t_state
= TCPS_LAST_ACK
;
2928 if (tp
&& tp
->t_state
>= TCPS_FIN_WAIT_2
) {
2929 soisdisconnected(tp
->t_inpcb
->inp_socket
);
2930 /* To prevent the connection hanging in FIN_WAIT_2 forever. */
2931 if (tp
->t_state
== TCPS_FIN_WAIT_2
) {
2932 tp
->t_timer
[TCPT_2MSL
] = OFFSET_FROM_START(tp
,
2933 TCP_CONN_MAXIDLE(tp
));
2940 tcp_in_cksum_stats(u_int32_t len
)
2942 tcpstat
.tcps_rcv_swcsum
++;
2943 tcpstat
.tcps_rcv_swcsum_bytes
+= len
;
2947 tcp_out_cksum_stats(u_int32_t len
)
2949 tcpstat
.tcps_snd_swcsum
++;
2950 tcpstat
.tcps_snd_swcsum_bytes
+= len
;
2955 tcp_in6_cksum_stats(u_int32_t len
)
2957 tcpstat
.tcps_rcv6_swcsum
++;
2958 tcpstat
.tcps_rcv6_swcsum_bytes
+= len
;
2962 tcp_out6_cksum_stats(u_int32_t len
)
2964 tcpstat
.tcps_snd6_swcsum
++;
2965 tcpstat
.tcps_snd6_swcsum_bytes
+= len
;
2969 * When messages are enabled on a TCP socket, the message priority
2970 * is sent as a control message. This function will extract it.
2973 tcp_get_msg_priority(struct mbuf
*control
, uint32_t *msgpri
)
2976 if (control
== NULL
) {
2980 for (cm
= M_FIRST_CMSGHDR(control
);
2981 is_cmsg_valid(control
, cm
);
2982 cm
= M_NXT_CMSGHDR(control
, cm
)) {
2983 if (cm
->cmsg_level
== SOL_SOCKET
&&
2984 cm
->cmsg_type
== SCM_MSG_PRIORITY
) {
2985 if (cm
->cmsg_len
!= CMSG_LEN(sizeof(uint32_t))) {
2988 *msgpri
= *(uint32_t *)(void *)CMSG_DATA(cm
);
2989 if (*msgpri
< MSG_PRI_MIN
|| *msgpri
> MSG_PRI_MAX
) {