2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * @(#)tcp_output.c 8.4 (Berkeley) 5/24/95
61 * $FreeBSD: src/sys/netinet/tcp_output.c,v 1.39.2.10 2001/07/07 04:30:38 silby Exp $
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/kernel.h>
76 #include <sys/sysctl.h>
78 #include <sys/domain.h>
79 #include <sys/protosw.h>
80 #include <sys/socket.h>
81 #include <sys/socketvar.h>
83 #include <net/route.h>
84 #include <net/ntstat.h>
85 #include <net/if_var.h>
87 #include <net/if_types.h>
90 #include <netinet/in.h>
91 #include <netinet/in_systm.h>
92 #include <netinet/in_var.h>
93 #include <netinet/in_tclass.h>
94 #include <netinet/ip.h>
95 #include <netinet/in_pcb.h>
96 #include <netinet/ip_var.h>
99 #include <netinet6/in6_pcb.h>
100 #include <netinet/ip6.h>
101 #include <netinet6/ip6_var.h>
103 #include <netinet/tcp.h>
105 #include <netinet/tcp_cache.h>
106 #include <netinet/tcp_fsm.h>
107 #include <netinet/tcp_seq.h>
108 #include <netinet/tcp_timer.h>
109 #include <netinet/tcp_var.h>
110 #include <netinet/tcpip.h>
111 #include <netinet/tcp_cc.h>
113 #include <netinet/tcp_debug.h>
115 #include <netinet/tcp_log.h>
116 #include <sys/kdebug.h>
117 #include <mach/sdt.h>
120 #include <netinet6/ipsec.h>
124 #include <security/mac_framework.h>
125 #endif /* MAC_SOCKET */
127 #include <netinet/lro_ext.h>
129 #include <netinet/mptcp_var.h>
130 #include <netinet/mptcp.h>
131 #include <netinet/mptcp_opt.h>
134 #include <corecrypto/ccaes.h>
136 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETTCP, 1)
137 #define DBG_LAYER_END NETDBG_CODE(DBG_NETTCP, 3)
138 #define DBG_FNC_TCP_OUTPUT NETDBG_CODE(DBG_NETTCP, (4 << 8) | 1)
140 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, path_mtu_discovery
,
141 CTLFLAG_RW
| CTLFLAG_LOCKED
, int, path_mtu_discovery
, 1,
142 "Enable Path MTU Discovery");
144 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, slowstart_flightsize
,
145 CTLFLAG_RW
| CTLFLAG_LOCKED
, int, ss_fltsz
, 1,
146 "Slow start flight size");
148 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, local_slowstart_flightsize
,
149 CTLFLAG_RW
| CTLFLAG_LOCKED
, int, ss_fltsz_local
, 8,
150 "Slow start flight size for local networks");
153 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, tso
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
154 &tcp_do_tso
, 0, "Enable TCP Segmentation Offload");
156 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, ecn_setup_percentage
,
157 CTLFLAG_RW
| CTLFLAG_LOCKED
, int, tcp_ecn_setup_percentage
, 100,
158 "Max ECN setup percentage");
161 sysctl_change_ecn_setting SYSCTL_HANDLER_ARGS
163 #pragma unused(oidp, arg1, arg2)
164 int i
, err
= 0, changed
= 0;
167 err
= sysctl_io_number(req
, tcp_ecn_outbound
, sizeof(int32_t),
169 if (err
!= 0 || req
->newptr
== USER_ADDR_NULL
)
173 if ((tcp_ecn_outbound
== 0 || tcp_ecn_outbound
== 1) &&
174 (i
== 0 || i
== 1)) {
175 tcp_ecn_outbound
= i
;
176 SYSCTL_SKMEM_UPDATE_FIELD(tcp
.ecn_initiate_out
, tcp_ecn_outbound
);
179 if (tcp_ecn_outbound
== 2 && (i
== 0 || i
== 1)) {
181 * Reset ECN enable flags on non-cellular
182 * interfaces so that the system default will take
185 ifnet_head_lock_shared();
186 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
187 if (!IFNET_IS_CELLULAR(ifp
)) {
188 ifnet_lock_exclusive(ifp
);
189 ifp
->if_eflags
&= ~IFEF_ECN_DISABLE
;
190 ifp
->if_eflags
&= ~IFEF_ECN_ENABLE
;
191 ifnet_lock_done(ifp
);
197 * Set ECN enable flags on non-cellular
200 ifnet_head_lock_shared();
201 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
202 if (!IFNET_IS_CELLULAR(ifp
)) {
203 ifnet_lock_exclusive(ifp
);
204 ifp
->if_eflags
|= IFEF_ECN_ENABLE
;
205 ifp
->if_eflags
&= ~IFEF_ECN_DISABLE
;
206 ifnet_lock_done(ifp
);
211 tcp_ecn_outbound
= i
;
212 SYSCTL_SKMEM_UPDATE_FIELD(tcp
.ecn_initiate_out
, tcp_ecn_outbound
);
214 /* Change the other one too as the work is done */
215 if (i
== 2 || tcp_ecn_inbound
== 2) {
217 SYSCTL_SKMEM_UPDATE_FIELD(tcp
.ecn_negotiate_in
, tcp_ecn_inbound
);
222 int tcp_ecn_outbound
= 2;
223 SYSCTL_PROC(_net_inet_tcp
, OID_AUTO
, ecn_initiate_out
,
224 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, &tcp_ecn_outbound
, 0,
225 sysctl_change_ecn_setting
, "IU",
226 "Initiate ECN for outbound connections");
228 int tcp_ecn_inbound
= 2;
229 SYSCTL_PROC(_net_inet_tcp
, OID_AUTO
, ecn_negotiate_in
,
230 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, &tcp_ecn_inbound
, 0,
231 sysctl_change_ecn_setting
, "IU",
232 "Initiate ECN for inbound connections");
234 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, packetchain
,
235 CTLFLAG_RW
| CTLFLAG_LOCKED
, int, tcp_packet_chaining
, 50,
236 "Enable TCP output packet chaining");
238 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, socket_unlocked_on_output
,
239 CTLFLAG_RW
| CTLFLAG_LOCKED
, int, tcp_output_unlocked
, 1,
240 "Unlock TCP when sending packets down to IP");
242 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, rfc3390
,
243 CTLFLAG_RW
| CTLFLAG_LOCKED
, int, tcp_do_rfc3390
, 1,
244 "Calculate intial slowstart cwnd depending on MSS");
246 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, min_iaj_win
,
247 CTLFLAG_RW
| CTLFLAG_LOCKED
, int, tcp_min_iaj_win
, MIN_IAJ_WIN
,
248 "Minimum recv win based on inter-packet arrival jitter");
250 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, acc_iaj_react_limit
,
251 CTLFLAG_RW
| CTLFLAG_LOCKED
, int, tcp_acc_iaj_react_limit
,
252 ACC_IAJ_REACT_LIMIT
, "Accumulated IAJ when receiver starts to react");
254 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, doautosndbuf
,
255 CTLFLAG_RW
| CTLFLAG_LOCKED
, uint32_t, tcp_do_autosendbuf
, 1,
256 "Enable send socket buffer auto-tuning");
258 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, autosndbufinc
,
259 CTLFLAG_RW
| CTLFLAG_LOCKED
, uint32_t, tcp_autosndbuf_inc
,
260 8 * 1024, "Increment in send socket bufffer size");
262 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, autosndbufmax
,
263 CTLFLAG_RW
| CTLFLAG_LOCKED
, uint32_t, tcp_autosndbuf_max
, 512 * 1024,
264 "Maximum send socket buffer size");
266 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, ack_prioritize
,
267 CTLFLAG_RW
| CTLFLAG_LOCKED
, uint32_t, tcp_prioritize_acks
, 1,
268 "Prioritize pure acks");
270 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, rtt_recvbg
,
271 CTLFLAG_RW
| CTLFLAG_LOCKED
, uint32_t, tcp_use_rtt_recvbg
, 1,
272 "Use RTT for bg recv algorithm");
274 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, recv_throttle_minwin
,
275 CTLFLAG_RW
| CTLFLAG_LOCKED
, uint32_t, tcp_recv_throttle_minwin
, 16 * 1024,
276 "Minimum recv win for throttling");
278 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, enable_tlp
,
279 CTLFLAG_RW
| CTLFLAG_LOCKED
,
280 int32_t, tcp_enable_tlp
, 1, "Enable Tail loss probe");
282 static int32_t packchain_newlist
= 0;
283 static int32_t packchain_looped
= 0;
284 static int32_t packchain_sent
= 0;
286 /* temporary: for testing */
288 extern int ipsec_bypass
;
291 extern int slowlink_wsize
; /* window correction for slow links */
293 extern int fw_enable
; /* firewall check for packet chaining */
294 extern int fw_bypass
; /* firewall check: disable packet chaining if there is rules */
295 #endif /* IPFIREWALL */
297 extern u_int32_t dlil_filter_disable_tso_count
;
298 extern u_int32_t kipf_count
;
300 static int tcp_ip_output(struct socket
*, struct tcpcb
*, struct mbuf
*,
301 int, struct mbuf
*, int, int, boolean_t
);
302 static struct mbuf
* tcp_send_lroacks(struct tcpcb
*tp
, struct mbuf
*m
, struct tcphdr
*th
);
303 static int tcp_recv_throttle(struct tcpcb
*tp
);
305 static int32_t tcp_tfo_check(struct tcpcb
*tp
, int32_t len
)
307 struct socket
*so
= tp
->t_inpcb
->inp_socket
;
308 unsigned int optlen
= 0;
309 unsigned int cookie_len
;
311 if (tp
->t_flags
& TF_NOOPT
)
314 if (!(tp
->t_flagsext
& TF_FASTOPEN_FORCE_ENABLE
) &&
315 !tcp_heuristic_do_tfo(tp
)) {
316 tp
->t_tfo_stats
|= TFO_S_HEURISTICS_DISABLE
;
317 tcpstat
.tcps_tfo_heuristics_disable
++;
321 if (so
->so_flags1
& SOF1_DATA_AUTHENTICATED
)
324 optlen
+= TCPOLEN_MAXSEG
;
326 if (tp
->t_flags
& TF_REQ_SCALE
)
330 if ((so
->so_flags
& SOF_MP_SUBFLOW
) && mptcp_enable
&&
331 (tp
->t_rxtshift
<= mptcp_mpcap_retries
||
332 (tptomptp(tp
)->mpt_mpte
->mpte_flags
& MPTE_FORCE_ENABLE
)))
333 optlen
+= sizeof(struct mptcp_mpcapable_opt_common
) + sizeof(mptcp_key_t
);
336 if (tp
->t_flags
& TF_REQ_TSTMP
)
337 optlen
+= TCPOLEN_TSTAMP_APPA
;
339 if (SACK_ENABLED(tp
))
340 optlen
+= TCPOLEN_SACK_PERMITTED
;
342 /* Now, decide whether to use TFO or not */
344 /* Don't even bother trying if there is no space at all... */
345 if (MAX_TCPOPTLEN
- optlen
< TCPOLEN_FASTOPEN_REQ
)
348 cookie_len
= tcp_cache_get_cookie_len(tp
);
350 /* No cookie, so we request one */
353 /* There is not enough space for the cookie, so we cannot do TFO */
354 if (MAX_TCPOPTLEN
- optlen
< cookie_len
)
357 /* Do not send SYN+data if there is more in the queue than MSS */
358 if (so
->so_snd
.sb_cc
> (tp
->t_maxopd
- MAX_TCPOPTLEN
))
361 /* Ok, everything looks good. We can go on and do TFO */
369 /* Returns the number of bytes written to the TCP option-space */
371 tcp_tfo_write_cookie_rep(struct tcpcb
*tp
, unsigned optlen
, u_char
*opt
)
373 u_char out
[CCAES_BLOCK_SIZE
];
377 if ((MAX_TCPOPTLEN
- optlen
) <
378 (TCPOLEN_FASTOPEN_REQ
+ TFO_COOKIE_LEN_DEFAULT
))
381 tcp_tfo_gen_cookie(tp
->t_inpcb
, out
, sizeof(out
));
385 *bp
++ = TCPOPT_FASTOPEN
;
386 *bp
++ = 2 + TFO_COOKIE_LEN_DEFAULT
;
387 memcpy(bp
, out
, TFO_COOKIE_LEN_DEFAULT
);
388 ret
+= 2 + TFO_COOKIE_LEN_DEFAULT
;
390 tp
->t_tfo_stats
|= TFO_S_COOKIE_SENT
;
391 tcpstat
.tcps_tfo_cookie_sent
++;
397 tcp_tfo_write_cookie(struct tcpcb
*tp
, unsigned optlen
, int32_t len
,
400 u_int8_t tfo_len
= MAX_TCPOPTLEN
- optlen
- TCPOLEN_FASTOPEN_REQ
;
401 struct socket
*so
= tp
->t_inpcb
->inp_socket
;
406 if (so
->so_flags1
& SOF1_DATA_AUTHENTICATED
) {
407 /* If there is some data, let's track it */
409 tp
->t_tfo_stats
|= TFO_S_SYN_DATA_SENT
;
410 tcpstat
.tcps_tfo_syn_data_sent
++;
419 * The cookie will be copied in the appropriate place within the
420 * TCP-option space. That way we avoid the need for an intermediate
423 res
= tcp_cache_get_cookie(tp
, bp
+ TCPOLEN_FASTOPEN_REQ
, &tfo_len
);
425 *bp
++ = TCPOPT_FASTOPEN
;
426 *bp
++ = TCPOLEN_FASTOPEN_REQ
;
427 ret
+= TCPOLEN_FASTOPEN_REQ
;
429 tp
->t_tfo_flags
|= TFO_F_COOKIE_REQ
;
431 tp
->t_tfo_stats
|= TFO_S_COOKIE_REQ
;
432 tcpstat
.tcps_tfo_cookie_req
++;
434 *bp
++ = TCPOPT_FASTOPEN
;
435 *bp
++ = TCPOLEN_FASTOPEN_REQ
+ tfo_len
;
437 ret
+= TCPOLEN_FASTOPEN_REQ
+ tfo_len
;
439 tp
->t_tfo_flags
|= TFO_F_COOKIE_SENT
;
441 /* If there is some data, let's track it */
443 tp
->t_tfo_stats
|= TFO_S_SYN_DATA_SENT
;
444 tcpstat
.tcps_tfo_syn_data_sent
++;
452 tcp_send_ecn_flags_on_syn(struct tcpcb
*tp
, struct socket
*so
)
454 return !((tp
->ecn_flags
& TE_SETUPSENT
||
455 (so
->so_flags
& SOF_MP_SUBFLOW
) ||
460 tcp_set_ecn(struct tcpcb
*tp
, struct ifnet
*ifp
)
465 * Socket option has precedence
467 if (tp
->ecn_flags
& TE_ECN_MODE_ENABLE
) {
468 tp
->ecn_flags
|= TE_ENABLE_ECN
;
469 goto check_heuristic
;
472 if (tp
->ecn_flags
& TE_ECN_MODE_DISABLE
) {
473 tp
->ecn_flags
&= ~TE_ENABLE_ECN
;
477 * Per interface setting comes next
480 if (ifp
->if_eflags
& IFEF_ECN_ENABLE
) {
481 tp
->ecn_flags
|= TE_ENABLE_ECN
;
482 goto check_heuristic
;
485 if (ifp
->if_eflags
& IFEF_ECN_DISABLE
) {
486 tp
->ecn_flags
&= ~TE_ENABLE_ECN
;
491 * System wide settings come last
493 inbound
= (tp
->t_inpcb
->inp_socket
->so_head
!= NULL
);
494 if ((inbound
&& tcp_ecn_inbound
== 1) ||
495 (!inbound
&& tcp_ecn_outbound
== 1)) {
496 tp
->ecn_flags
|= TE_ENABLE_ECN
;
497 goto check_heuristic
;
499 tp
->ecn_flags
&= ~TE_ENABLE_ECN
;
505 if (!tcp_heuristic_do_ecn(tp
))
506 tp
->ecn_flags
&= ~TE_ENABLE_ECN
;
509 * If the interface setting, system-level setting and heuristics
510 * allow to enable ECN, randomly select 5% of connections to
513 if ((tp
->ecn_flags
& (TE_ECN_MODE_ENABLE
| TE_ECN_MODE_DISABLE
514 | TE_ENABLE_ECN
)) == TE_ENABLE_ECN
) {
516 * Use the random value in iss for randomizing
519 if ((tp
->iss
% 100) >= tcp_ecn_setup_percentage
)
520 tp
->ecn_flags
&= ~TE_ENABLE_ECN
;
525 * Tcp output routine: figure out what should be sent and send it.
533 * ip_output_list:ENOMEM
534 * ip_output_list:EADDRNOTAVAIL
535 * ip_output_list:ENETUNREACH
536 * ip_output_list:EHOSTUNREACH
537 * ip_output_list:EACCES
538 * ip_output_list:EMSGSIZE
539 * ip_output_list:ENOBUFS
540 * ip_output_list:??? [ignorable: mostly IPSEC/firewall/DLIL]
541 * ip6_output_list:EINVAL
542 * ip6_output_list:EOPNOTSUPP
543 * ip6_output_list:EHOSTUNREACH
544 * ip6_output_list:EADDRNOTAVAIL
545 * ip6_output_list:ENETUNREACH
546 * ip6_output_list:EMSGSIZE
547 * ip6_output_list:ENOBUFS
548 * ip6_output_list:??? [ignorable: mostly IPSEC/firewall/DLIL]
551 tcp_output(struct tcpcb
*tp
)
553 struct inpcb
*inp
= tp
->t_inpcb
;
554 struct socket
*so
= inp
->inp_socket
;
555 int32_t len
, recwin
, sendwin
, off
;
558 struct ip
*ip
= NULL
;
559 struct ipovly
*ipov
= NULL
;
561 struct ip6_hdr
*ip6
= NULL
;
564 u_char opt
[TCP_MAXOLEN
];
565 unsigned ipoptlen
, optlen
, hdrlen
;
566 int idle
, sendalot
, lost
= 0;
570 tcp_seq old_snd_nxt
= 0;
573 unsigned ipsec_optlen
= 0;
576 struct mbuf
*packetlist
= NULL
;
577 struct mbuf
*tp_inp_options
= inp
->inp_depend4
.inp4_options
;
579 int isipv6
= inp
->inp_vflag
& INP_IPV6
;
583 short packchain_listadd
= 0;
584 int so_options
= so
->so_options
;
586 u_int32_t svc_flags
= 0, allocated_len
;
587 u_int32_t lro_ackmore
= (tp
->t_lropktlen
!= 0) ? 1 : 0;
588 struct mbuf
*mnext
= NULL
;
591 boolean_t mptcp_acknow
;
593 boolean_t cell
= FALSE
;
594 boolean_t wifi
= FALSE
;
595 boolean_t wired
= FALSE
;
596 boolean_t sack_rescue_rxt
= FALSE
;
597 int sotc
= so
->so_traffic_class
;
600 * Determine length of data that should be transmitted,
601 * and flags that will be used.
602 * If there is some data or critical controls (SYN, RST)
603 * to send, then transmit; otherwise, investigate further.
605 idle
= (tp
->t_flags
& TF_LASTIDLE
) || (tp
->snd_max
== tp
->snd_una
);
607 /* Since idle_time is signed integer, the following integer subtraction
608 * will take care of wrap around of tcp_now
610 idle_time
= tcp_now
- tp
->t_rcvtime
;
611 if (idle
&& idle_time
>= TCP_IDLETIMEOUT(tp
)) {
612 if (CC_ALGO(tp
)->after_idle
!= NULL
&&
613 (tp
->tcp_cc_index
!= TCP_CC_ALGO_CUBIC_INDEX
||
614 idle_time
>= TCP_CC_CWND_NONVALIDATED_PERIOD
)) {
615 CC_ALGO(tp
)->after_idle(tp
);
616 tcp_ccdbg_trace(tp
, NULL
, TCP_CC_IDLE_TIMEOUT
);
620 * Do some other tasks that need to be done after
623 if (!SLIST_EMPTY(&tp
->t_rxt_segments
))
624 tcp_rxtseg_clean(tp
);
626 /* If stretch ack was auto-disabled, re-evaluate it */
627 tcp_cc_after_idle_stretchack(tp
);
629 tp
->t_flags
&= ~TF_LASTIDLE
;
631 if (tp
->t_flags
& TF_MORETOCOME
) {
632 tp
->t_flags
|= TF_LASTIDLE
;
637 if (tp
->t_mpflags
& TMPF_RESET
) {
638 tcp_check_timer_state(tp
);
640 * Once a RST has been sent for an MPTCP subflow,
641 * the subflow socket stays around until deleted.
642 * No packets such as FINs must be sent after RST.
650 mptcp_acknow
= FALSE
;
653 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_START
, 0,0,0,0,0);
657 KERNEL_DEBUG(DBG_LAYER_BEG
,
658 ((inp
->inp_fport
<< 16) | inp
->inp_lport
),
659 (((inp
->in6p_laddr
.s6_addr16
[0] & 0xffff) << 16) |
660 (inp
->in6p_faddr
.s6_addr16
[0] & 0xffff)),
666 KERNEL_DEBUG(DBG_LAYER_BEG
,
667 ((inp
->inp_fport
<< 16) | inp
->inp_lport
),
668 (((inp
->inp_laddr
.s_addr
& 0xffff) << 16) |
669 (inp
->inp_faddr
.s_addr
& 0xffff)),
673 * If the route generation id changed, we need to check that our
674 * local (source) IP address is still valid. If it isn't either
675 * return error or silently do nothing (assuming the address will
676 * come back before the TCP connection times out).
678 rt
= inp
->inp_route
.ro_rt
;
679 if (rt
!= NULL
&& ROUTE_UNUSABLE(&tp
->t_inpcb
->inp_route
)) {
681 struct in_ifaddr
*ia
= NULL
;
682 struct in6_ifaddr
*ia6
= NULL
;
683 int found_srcaddr
= 0;
685 /* disable multipages at the socket */
686 somultipages(so
, FALSE
);
688 /* Disable TSO for the socket until we know more */
689 tp
->t_flags
&= ~TF_TSO
;
694 ia6
= ifa_foraddr6(&inp
->in6p_laddr
);
698 ia
= ifa_foraddr(inp
->inp_laddr
.s_addr
);
703 /* check that the source address is still valid */
704 if (found_srcaddr
== 0) {
706 (SO_FILT_HINT_LOCKED
| SO_FILT_HINT_NOSRCADDR
));
708 if (tp
->t_state
>= TCPS_CLOSE_WAIT
) {
709 tcp_drop(tp
, EADDRNOTAVAIL
);
710 return EADDRNOTAVAIL
;
714 * Set retransmit timer if it wasn't set,
715 * reset Persist timer and shift register as the
716 * advertised peer window may not be valid anymore
718 if (tp
->t_timer
[TCPT_REXMT
] == 0) {
719 tp
->t_timer
[TCPT_REXMT
] =
720 OFFSET_FROM_START(tp
, tp
->t_rxtcur
);
721 if (tp
->t_timer
[TCPT_PERSIST
] != 0) {
722 tp
->t_timer
[TCPT_PERSIST
] = 0;
723 tp
->t_persist_stop
= 0;
724 TCP_RESET_REXMT_STATE(tp
);
728 if (tp
->t_pktlist_head
!= NULL
)
729 m_freem_list(tp
->t_pktlist_head
);
730 TCP_PKTLIST_CLEAR(tp
);
732 /* drop connection if source address isn't available */
733 if (so
->so_flags
& SOF_NOADDRAVAIL
) {
734 tcp_drop(tp
, EADDRNOTAVAIL
);
735 return EADDRNOTAVAIL
;
737 tcp_check_timer_state(tp
);
738 return 0; /* silently ignore, keep data in socket: address may be back */
742 IFA_REMREF(&ia
->ia_ifa
);
745 IFA_REMREF(&ia6
->ia_ifa
);
748 * Address is still valid; check for multipages capability
749 * again in case the outgoing interface has changed.
752 if ((ifp
= rt
->rt_ifp
) != NULL
) {
753 somultipages(so
, (ifp
->if_hwassist
& IFNET_MULTIPAGES
));
754 tcp_set_tso(tp
, ifp
);
755 soif2kcl(so
, (ifp
->if_eflags
& IFEF_2KCL
));
756 tcp_set_ecn(tp
, ifp
);
758 if (rt
->rt_flags
& RTF_UP
)
761 * See if we should do MTU discovery. Don't do it if:
762 * 1) it is disabled via the sysctl
763 * 2) the route isn't up
764 * 3) the MTU is locked (if it is, then discovery
768 if (!path_mtu_discovery
|| ((rt
!= NULL
) &&
769 (!(rt
->rt_flags
& RTF_UP
) ||
770 (rt
->rt_rmx
.rmx_locks
& RTV_MTU
))))
771 tp
->t_flags
&= ~TF_PMTUD
;
773 tp
->t_flags
|= TF_PMTUD
;
779 cell
= IFNET_IS_CELLULAR(rt
->rt_ifp
);
780 wifi
= (!cell
&& IFNET_IS_WIFI(rt
->rt_ifp
));
781 wired
= (!wifi
&& IFNET_IS_WIRED(rt
->rt_ifp
));
785 * If we've recently taken a timeout, snd_max will be greater than
786 * snd_nxt. There may be SACK information that allows us to avoid
787 * resending already delivered data. Adjust snd_nxt accordingly.
789 if (SACK_ENABLED(tp
) && SEQ_LT(tp
->snd_nxt
, tp
->snd_max
))
792 off
= tp
->snd_nxt
- tp
->snd_una
;
793 sendwin
= min(tp
->snd_wnd
, tp
->snd_cwnd
);
795 if (tp
->t_flags
& TF_SLOWLINK
&& slowlink_wsize
> 0)
796 sendwin
= min(sendwin
, slowlink_wsize
);
798 flags
= tcp_outflags
[tp
->t_state
];
800 * Send any SACK-generated retransmissions. If we're explicitly
801 * trying to send out new data (when sendalot is 1), bypass this
802 * function. If we retransmit in fast recovery mode, decrement
803 * snd_cwnd, since we're replacing a (future) new transmission
804 * with a retransmission now, and we previously incremented
805 * snd_cwnd in tcp_input().
808 * Still in sack recovery , reset rxmit flag to zero.
814 if (SACK_ENABLED(tp
) && IN_FASTRECOVERY(tp
) &&
815 (p
= tcp_sack_output(tp
, &sack_bytes_rxmt
))) {
818 cwin
= min(tp
->snd_wnd
, tp
->snd_cwnd
) - sack_bytes_rxmt
;
821 /* Do not retransmit SACK segments beyond snd_recover */
822 if (SEQ_GT(p
->end
, tp
->snd_recover
)) {
824 * (At least) part of sack hole extends beyond
825 * snd_recover. Check to see if we can rexmit data
828 if (SEQ_GEQ(p
->rxmit
, tp
->snd_recover
)) {
830 * Can't rexmit any more data for this hole.
831 * That data will be rexmitted in the next
832 * sack recovery episode, when snd_recover
833 * moves past p->rxmit.
836 goto after_sack_rexmit
;
838 /* Can rexmit part of the current hole */
839 len
= ((int32_t)min(cwin
,
840 tp
->snd_recover
- p
->rxmit
));
842 len
= ((int32_t)min(cwin
, p
->end
- p
->rxmit
));
845 off
= p
->rxmit
- tp
->snd_una
;
848 tcpstat
.tcps_sack_rexmits
++;
849 tcpstat
.tcps_sack_rexmit_bytes
+=
850 min(len
, tp
->t_maxseg
);
857 * Get standard flags, and add SYN or FIN if requested by 'hidden'
860 if (tp
->t_flags
& TF_NEEDFIN
)
862 if (tp
->t_flags
& TF_NEEDSYN
)
866 * If in persist timeout with window of 0, send 1 byte.
867 * Otherwise, if window is small but nonzero
868 * and timer expired, we will send what we can
869 * and go to transmit state.
871 if (tp
->t_flagsext
& TF_FORCE
) {
874 * If we still have some data to send, then
875 * clear the FIN bit. Usually this would
876 * happen below when it realizes that we
877 * aren't sending all the data. However,
878 * if we have exactly 1 byte of unsent data,
879 * then it won't clear the FIN bit below,
880 * and if we are in persist state, we wind
881 * up sending the packet without recording
882 * that we sent the FIN bit.
884 * We can't just blindly clear the FIN bit,
885 * because if we don't have any more data
886 * to send then the probe will be the FIN
889 if (off
< so
->so_snd
.sb_cc
)
893 tp
->t_timer
[TCPT_PERSIST
] = 0;
894 tp
->t_persist_stop
= 0;
895 TCP_RESET_REXMT_STATE(tp
);
900 * If snd_nxt == snd_max and we have transmitted a FIN, the
901 * offset will be > 0 even if so_snd.sb_cc is 0, resulting in
902 * a negative length. This can also occur when TCP opens up
903 * its congestion window while receiving additional duplicate
904 * acks after fast-retransmit because TCP will reset snd_nxt
905 * to snd_max after the fast-retransmit.
907 * In the normal retransmit-FIN-only case, however, snd_nxt will
908 * be set to snd_una, the offset will be 0, and the length may
911 * If sack_rxmit is true we are retransmitting from the scoreboard
912 * in which case len is already set.
914 if (sack_rxmit
== 0) {
915 if (sack_bytes_rxmt
== 0) {
916 len
= min(so
->so_snd
.sb_cc
, sendwin
) - off
;
920 cwin
= tp
->snd_cwnd
-
921 (tp
->snd_nxt
- tp
->sack_newdata
) -
926 * We are inside of a SACK recovery episode and are
927 * sending new data, having retransmitted all the
928 * data possible in the scoreboard.
930 len
= min(so
->so_snd
.sb_cc
, tp
->snd_wnd
)
933 * Don't remove this (len > 0) check !
934 * We explicitly check for len > 0 here (although it
935 * isn't really necessary), to work around a gcc
936 * optimization issue - to force gcc to compute
937 * len above. Without this check, the computation
938 * of len is bungled by the optimizer.
941 len
= imin(len
, cwin
);
946 * At this point SACK recovery can not send any
947 * data from scoreboard or any new data. Check
948 * if we can do a rescue retransmit towards the
949 * tail end of recovery window.
951 if (len
== 0 && cwin
> 0 &&
952 SEQ_LT(tp
->snd_fack
, tp
->snd_recover
) &&
953 !(tp
->t_flagsext
& TF_RESCUE_RXT
)) {
954 len
= min((tp
->snd_recover
- tp
->snd_fack
),
956 len
= imin(len
, cwin
);
957 old_snd_nxt
= tp
->snd_nxt
;
958 sack_rescue_rxt
= TRUE
;
959 tp
->snd_nxt
= tp
->snd_recover
- len
;
961 * If FIN has been sent, snd_max
962 * must have been advanced to cover it.
964 if ((tp
->t_flags
& TF_SENTFIN
) &&
965 tp
->snd_max
== tp
->snd_recover
)
968 off
= tp
->snd_nxt
- tp
->snd_una
;
970 tp
->t_flagsext
|= TF_RESCUE_RXT
;
976 * Lop off SYN bit if it has already been sent. However, if this
977 * is SYN-SENT state and if segment contains data and if we don't
978 * know that foreign host supports TAO, suppress sending segment.
980 if ((flags
& TH_SYN
) && SEQ_GT(tp
->snd_nxt
, tp
->snd_una
)) {
981 if (tp
->t_state
!= TCPS_SYN_RECEIVED
|| tfo_enabled(tp
))
985 if (len
> 0 && tp
->t_state
== TCPS_SYN_SENT
) {
986 while (inp
->inp_sndinprog_cnt
== 0 &&
987 tp
->t_pktlist_head
!= NULL
) {
988 packetlist
= tp
->t_pktlist_head
;
989 packchain_listadd
= tp
->t_lastchain
;
991 TCP_PKTLIST_CLEAR(tp
);
993 error
= tcp_ip_output(so
, tp
, packetlist
,
994 packchain_listadd
, tp_inp_options
,
995 (so_options
& SO_DONTROUTE
),
996 (sack_rxmit
|| (sack_bytes_rxmt
!= 0)),
1001 * tcp was closed while we were in ip,
1004 if (inp
->inp_sndinprog_cnt
== 0 &&
1005 (tp
->t_flags
& TF_CLOSING
)) {
1006 tp
->t_flags
&= ~TF_CLOSING
;
1007 (void) tcp_close(tp
);
1009 tcp_check_timer_state(tp
);
1011 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
,
1018 * Be careful not to send data and/or FIN on SYN segments.
1019 * This measure is needed to prevent interoperability problems
1020 * with not fully conformant TCP implementations.
1022 * In case of TFO, we handle the setting of the len in
1023 * tcp_tfo_check. In case TFO is not enabled, never ever send
1026 if ((flags
& TH_SYN
) && !tfo_enabled(tp
)) {
1032 * Don't send a RST with data.
1037 if ((flags
& TH_SYN
) && tp
->t_state
<= TCPS_SYN_SENT
&& tfo_enabled(tp
))
1038 len
= tcp_tfo_check(tp
, len
);
1041 * The check here used to be (len < 0). Some times len is zero
1042 * when the congestion window is closed and we need to check
1043 * if persist timer has to be set in that case. But don't set
1044 * persist until connection is established.
1046 if (len
<= 0 && !(flags
& TH_SYN
)) {
1048 * If FIN has been sent but not acked,
1049 * but we haven't been called to retransmit,
1050 * len will be < 0. Otherwise, window shrank
1051 * after we sent into it. If window shrank to 0,
1052 * cancel pending retransmit, pull snd_nxt back
1053 * to (closed) window, and set the persist timer
1054 * if it isn't already going. If the window didn't
1055 * close completely, just wait for an ACK.
1059 tp
->t_timer
[TCPT_REXMT
] = 0;
1060 tp
->t_timer
[TCPT_PTO
] = 0;
1061 TCP_RESET_REXMT_STATE(tp
);
1062 tp
->snd_nxt
= tp
->snd_una
;
1064 if (tp
->t_timer
[TCPT_PERSIST
] == 0)
1070 * Automatic sizing of send socket buffer. Increase the send
1071 * socket buffer size if all of the following criteria are met
1072 * 1. the receiver has enough buffer space for this data
1073 * 2. send buffer is filled to 7/8th with data (so we actually
1074 * have data to make use of it);
1075 * 3. our send window (slow start and congestion controlled) is
1076 * larger than sent but unacknowledged data in send buffer.
1078 if (tcp_do_autosendbuf
== 1 &&
1079 !INP_WAIT_FOR_IF_FEEDBACK(inp
) && !IN_FASTRECOVERY(tp
) &&
1080 (so
->so_snd
.sb_flags
& (SB_AUTOSIZE
| SB_TRIM
)) == SB_AUTOSIZE
&&
1081 tcp_cansbgrow(&so
->so_snd
)) {
1082 if ((tp
->snd_wnd
/ 4 * 5) >= so
->so_snd
.sb_hiwat
&&
1083 so
->so_snd
.sb_cc
>= (so
->so_snd
.sb_hiwat
/ 8 * 7) &&
1084 sendwin
>= (so
->so_snd
.sb_cc
- (tp
->snd_nxt
- tp
->snd_una
))) {
1085 if (sbreserve(&so
->so_snd
,
1086 min(so
->so_snd
.sb_hiwat
+ tcp_autosndbuf_inc
,
1087 tcp_autosndbuf_max
)) == 1) {
1088 so
->so_snd
.sb_idealsize
= so
->so_snd
.sb_hiwat
;
1094 * Truncate to the maximum segment length or enable TCP Segmentation
1095 * Offloading (if supported by hardware) and ensure that FIN is removed
1096 * if the length no longer contains the last data byte.
1098 * TSO may only be used if we are in a pure bulk sending state.
1099 * The presence of TCP-MD5, SACK retransmits, SACK advertizements,
1100 * ipfw rules and IP options, as well as disabling hardware checksum
1101 * offload prevent using TSO. With TSO the TCP header is the same
1102 * (except for the sequence number) for all generated packets. This
1103 * makes it impossible to transmit any options which vary per generated
1104 * segment or packet.
1106 * The length of TSO bursts is limited to TCP_MAXWIN. That limit and
1107 * removal of FIN (if not already catched here) are handled later after
1108 * the exact length of the TCP options are known.
1112 * Pre-calculate here as we save another lookup into the darknesses
1113 * of IPsec that way and can actually decide if TSO is ok.
1115 if (ipsec_bypass
== 0)
1116 ipsec_optlen
= ipsec_hdrsiz_tcp(tp
);
1118 if (len
> tp
->t_maxseg
) {
1119 if ((tp
->t_flags
& TF_TSO
) && tcp_do_tso
&& hwcksum_tx
&&
1120 ip_use_randomid
&& kipf_count
== 0 &&
1121 dlil_filter_disable_tso_count
== 0 &&
1122 tp
->rcv_numsacks
== 0 && sack_rxmit
== 0 &&
1123 sack_bytes_rxmt
== 0 &&
1124 inp
->inp_options
== NULL
&&
1125 inp
->in6p_options
== NULL
1127 && ipsec_optlen
== 0
1130 && (fw_enable
== 0 || fw_bypass
)
1142 /* Send one segment or less as a tail loss probe */
1143 if (tp
->t_flagsext
& TF_SENT_TLPROBE
) {
1144 len
= min(len
, tp
->t_maxseg
);
1150 if (so
->so_flags
& SOF_MP_SUBFLOW
&& off
< 0) {
1151 os_log_error(mptcp_log_handle
, "%s - %lx: offset is negative! len %d off %d\n",
1152 __func__
, (unsigned long)VM_KERNEL_ADDRPERM(tp
->t_mpsub
->mpts_mpte
),
1156 if ((so
->so_flags
& SOF_MP_SUBFLOW
) &&
1157 !(tp
->t_mpflags
& TMPF_TCP_FALLBACK
)) {
1159 if (tp
->t_state
>= TCPS_ESTABLISHED
&&
1160 (tp
->t_mpflags
& TMPF_SND_MPPRIO
||
1161 tp
->t_mpflags
& TMPF_SND_REM_ADDR
||
1162 tp
->t_mpflags
& TMPF_SND_MPFAIL
||
1163 tp
->t_mpflags
& TMPF_SND_KEYS
||
1164 tp
->t_mpflags
& TMPF_SND_JACK
)) {
1169 * On a new subflow, don't try to send again, because
1170 * we are still waiting for the fourth ack.
1172 if (!(tp
->t_mpflags
& TMPF_PREESTABLISHED
))
1174 mptcp_acknow
= TRUE
;
1176 mptcp_acknow
= FALSE
;
1179 * The contiguous bytes in the subflow socket buffer can be
1180 * discontiguous at the MPTCP level. Since only one DSS
1181 * option can be sent in one packet, reduce length to match
1182 * the contiguous MPTCP level. Set sendalot to send remainder.
1184 if (len
> 0 && off
>= 0) {
1185 newlen
= mptcp_adj_sendlen(so
, off
);
1195 * If the socket is capable of doing unordered send,
1196 * pull the amount of data that can be sent from the
1197 * unordered priority queues to the serial queue in
1198 * the socket buffer. If bytes are not yet available
1199 * in the highest priority message, we may not be able
1200 * to send any new data.
1202 if (so
->so_flags
& SOF_ENABLE_MSGS
) {
1204 so
->so_msg_state
->msg_serial_bytes
) {
1205 sbpull_unordered_data(so
, off
, len
);
1207 /* check if len needs to be modified */
1209 so
->so_msg_state
->msg_serial_bytes
) {
1210 len
= so
->so_msg_state
->msg_serial_bytes
- off
;
1213 tcpstat
.tcps_msg_sndwaithipri
++;
1220 if (SEQ_LT(p
->rxmit
+ len
, tp
->snd_una
+ so
->so_snd
.sb_cc
))
1223 if (SEQ_LT(tp
->snd_nxt
+ len
, tp
->snd_una
+ so
->so_snd
.sb_cc
))
1227 * Compare available window to amount of window
1228 * known to peer (as advertised window less
1229 * next expected input). If the difference is at least two
1230 * max size segments, or at least 25% of the maximum possible
1231 * window, then want to send a window update to peer.
1233 recwin
= tcp_sbspace(tp
);
1235 if (!(so
->so_flags
& SOF_MP_SUBFLOW
)) {
1236 if (recwin
< (int32_t)(so
->so_rcv
.sb_hiwat
/ 4) &&
1237 recwin
< (int)tp
->t_maxseg
) {
1241 struct mptcb
*mp_tp
= tptomptp(tp
);
1242 struct socket
*mp_so
= mptetoso(mp_tp
->mpt_mpte
);
1244 if (recwin
< (int32_t)(mp_so
->so_rcv
.sb_hiwat
/ 4) &&
1245 recwin
< (int)tp
->t_maxseg
) {
1251 if (tcp_recv_bg
== 1 || IS_TCP_RECV_BG(so
)) {
1252 if (recwin
> 0 && tcp_recv_throttle(tp
)) {
1253 uint32_t min_iaj_win
= tcp_min_iaj_win
* tp
->t_maxseg
;
1254 uint32_t bg_rwintop
= tp
->rcv_adv
;
1255 if (SEQ_LT(bg_rwintop
, tp
->rcv_nxt
+ min_iaj_win
))
1256 bg_rwintop
= tp
->rcv_nxt
+ min_iaj_win
;
1257 recwin
= imin((int32_t)(bg_rwintop
- tp
->rcv_nxt
),
1263 #endif /* TRAFFIC_MGT */
1265 if (recwin
> (int32_t)(TCP_MAXWIN
<< tp
->rcv_scale
))
1266 recwin
= (int32_t)(TCP_MAXWIN
<< tp
->rcv_scale
);
1268 if (!(so
->so_flags
& SOF_MP_SUBFLOW
)) {
1269 if (recwin
< (int32_t)(tp
->rcv_adv
- tp
->rcv_nxt
)) {
1270 recwin
= (int32_t)(tp
->rcv_adv
- tp
->rcv_nxt
);
1273 struct mptcb
*mp_tp
= tptomptp(tp
);
1275 /* Don't remove what we announced at the MPTCP-layer */
1276 if (recwin
< (int32_t)(mp_tp
->mpt_rcvadv
- (uint32_t)mp_tp
->mpt_rcvnxt
)) {
1277 recwin
= (int32_t)(mp_tp
->mpt_rcvadv
- (uint32_t)mp_tp
->mpt_rcvnxt
);
1282 * Sender silly window avoidance. We transmit under the following
1283 * conditions when len is non-zero:
1285 * - we've timed out (e.g. persist timer)
1286 * - we need to retransmit
1287 * - We have a full segment (or more with TSO)
1288 * - This is the last buffer in a write()/send() and we are
1289 * either idle or running NODELAY
1290 * - we have more then 1/2 the maximum send window's worth of
1291 * data (receiver may be limited the window size)
1294 if (tp
->t_flagsext
& TF_FORCE
)
1296 if (SEQ_LT(tp
->snd_nxt
, tp
->snd_max
))
1302 * If this here is the first segment after SYN/ACK and TFO
1303 * is being used, then we always send it, regardless of Nagle,...
1305 if (tp
->t_state
== TCPS_SYN_RECEIVED
&&
1307 (tp
->t_tfo_flags
& TFO_F_COOKIE_VALID
) &&
1308 tp
->snd_nxt
== tp
->iss
+ 1)
1312 * Send new data on the connection only if it is
1313 * not flow controlled
1315 if (!INP_WAIT_FOR_IF_FEEDBACK(inp
) ||
1316 tp
->t_state
!= TCPS_ESTABLISHED
) {
1317 if (len
>= tp
->t_maxseg
)
1320 if (!(tp
->t_flags
& TF_MORETOCOME
) &&
1321 (idle
|| tp
->t_flags
& TF_NODELAY
||
1322 (tp
->t_flags
& TF_MAXSEGSNT
) ||
1323 ALLOW_LIMITED_TRANSMIT(tp
)) &&
1324 (tp
->t_flags
& TF_NOPUSH
) == 0 &&
1325 (len
+ off
>= so
->so_snd
.sb_cc
||
1327 * MPTCP needs to respect the DSS-mappings. So, it
1328 * may be sending data that *could* have been
1329 * coalesced, but cannot because of
1330 * mptcp_adj_sendlen().
1332 so
->so_flags
& SOF_MP_SUBFLOW
))
1334 if (len
>= tp
->max_sndwnd
/ 2 && tp
->max_sndwnd
> 0)
1337 tcpstat
.tcps_fcholdpacket
++;
1341 if (recwin
> 0 && !(tp
->t_flags
& TF_NEEDSYN
)) {
1343 * "adv" is the amount we can increase the window,
1344 * taking into account that we are limited by
1345 * TCP_MAXWIN << tp->rcv_scale.
1347 int32_t adv
, oldwin
= 0;
1348 adv
= imin(recwin
, (int)TCP_MAXWIN
<< tp
->rcv_scale
) -
1349 (tp
->rcv_adv
- tp
->rcv_nxt
);
1351 if (SEQ_GT(tp
->rcv_adv
, tp
->rcv_nxt
))
1352 oldwin
= tp
->rcv_adv
- tp
->rcv_nxt
;
1354 if (adv
>= (int32_t) (2 * tp
->t_maxseg
)) {
1356 * Update only if the resulting scaled value of
1357 * the window changed, or if there is a change in
1358 * the sequence since the last ack. This avoids
1359 * what appears as dupe ACKS (see rdar://5640997)
1361 * If streaming is detected avoid sending too many
1362 * window updates. We will depend on the delack
1363 * timer to send a window update when needed.
1365 if (!(tp
->t_flags
& TF_STRETCHACK
) &&
1366 (tp
->last_ack_sent
!= tp
->rcv_nxt
||
1367 ((oldwin
+ adv
) >> tp
->rcv_scale
) >
1368 (oldwin
>> tp
->rcv_scale
))) {
1373 if (4 * adv
>= (int32_t) so
->so_rcv
.sb_hiwat
)
1377 * Make sure that the delayed ack timer is set if
1378 * we delayed sending a window update because of
1379 * streaming detection.
1381 if ((tp
->t_flags
& TF_STRETCHACK
) &&
1382 !(tp
->t_flags
& TF_DELACK
)) {
1383 tp
->t_flags
|= TF_DELACK
;
1384 tp
->t_timer
[TCPT_DELACK
] =
1385 OFFSET_FROM_START(tp
, tcp_delack
);
1390 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW
1391 * is also a catch-all for the retransmit timer timeout case.
1393 if (tp
->t_flags
& TF_ACKNOW
)
1395 if ((flags
& TH_RST
) ||
1396 ((flags
& TH_SYN
) && (tp
->t_flags
& TF_NEEDSYN
) == 0))
1398 if (SEQ_GT(tp
->snd_up
, tp
->snd_una
))
1405 * If our state indicates that FIN should be sent
1406 * and we have not yet done so, then we need to send.
1408 if ((flags
& TH_FIN
) &&
1409 (!(tp
->t_flags
& TF_SENTFIN
) || tp
->snd_nxt
== tp
->snd_una
))
1412 * In SACK, it is possible for tcp_output to fail to send a segment
1413 * after the retransmission timer has been turned off. Make sure
1414 * that the retransmission timer is set.
1416 if (SACK_ENABLED(tp
) && (tp
->t_state
>= TCPS_ESTABLISHED
) &&
1417 SEQ_GT(tp
->snd_max
, tp
->snd_una
) &&
1418 tp
->t_timer
[TCPT_REXMT
] == 0 &&
1419 tp
->t_timer
[TCPT_PERSIST
] == 0) {
1420 tp
->t_timer
[TCPT_REXMT
] = OFFSET_FROM_START(tp
,
1425 * TCP window updates are not reliable, rather a polling protocol
1426 * using ``persist'' packets is used to insure receipt of window
1427 * updates. The three ``states'' for the output side are:
1428 * idle not doing retransmits or persists
1429 * persisting to move a small or zero window
1430 * (re)transmitting and thereby not persisting
1432 * tp->t_timer[TCPT_PERSIST]
1433 * is set when we are in persist state.
1435 * is set when we are called to send a persist packet.
1436 * tp->t_timer[TCPT_REXMT]
1437 * is set when we are retransmitting
1438 * The output side is idle when both timers are zero.
1440 * If send window is too small, there is data to transmit, and no
1441 * retransmit or persist is pending, then go to persist state.
1442 * If nothing happens soon, send when timer expires:
1443 * if window is nonzero, transmit what we can,
1444 * otherwise force out a byte.
1446 if (so
->so_snd
.sb_cc
&& tp
->t_timer
[TCPT_REXMT
] == 0 &&
1447 tp
->t_timer
[TCPT_PERSIST
] == 0) {
1448 TCP_RESET_REXMT_STATE(tp
);
1453 * If there is no reason to send a segment, just return.
1454 * but if there is some packets left in the packet list, send them now.
1456 while (inp
->inp_sndinprog_cnt
== 0 &&
1457 tp
->t_pktlist_head
!= NULL
) {
1458 packetlist
= tp
->t_pktlist_head
;
1459 packchain_listadd
= tp
->t_lastchain
;
1461 TCP_PKTLIST_CLEAR(tp
);
1463 error
= tcp_ip_output(so
, tp
, packetlist
,
1465 tp_inp_options
, (so_options
& SO_DONTROUTE
),
1466 (sack_rxmit
|| (sack_bytes_rxmt
!= 0)), isipv6
);
1468 /* tcp was closed while we were in ip; resume close */
1469 if (inp
->inp_sndinprog_cnt
== 0 &&
1470 (tp
->t_flags
& TF_CLOSING
)) {
1471 tp
->t_flags
&= ~TF_CLOSING
;
1472 (void) tcp_close(tp
);
1474 tcp_check_timer_state(tp
);
1476 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
1481 * Set TF_MAXSEGSNT flag if the segment size is greater than
1482 * the max segment size.
1485 if (len
>= tp
->t_maxseg
)
1486 tp
->t_flags
|= TF_MAXSEGSNT
;
1488 tp
->t_flags
&= ~TF_MAXSEGSNT
;
1491 * Before ESTABLISHED, force sending of initial options
1492 * unless TCP set not to do any options.
1493 * NOTE: we assume that the IP/TCP header plus TCP options
1494 * always fit in a single mbuf, leaving room for a maximum
1496 * max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MCLBYTES
1501 hdrlen
= sizeof (struct ip6_hdr
) + sizeof (struct tcphdr
);
1504 hdrlen
= sizeof (struct tcpiphdr
);
1505 if (flags
& TH_SYN
) {
1506 tp
->snd_nxt
= tp
->iss
;
1507 if ((tp
->t_flags
& TF_NOOPT
) == 0) {
1510 opt
[0] = TCPOPT_MAXSEG
;
1511 opt
[1] = TCPOLEN_MAXSEG
;
1512 mss
= htons((u_short
) tcp_mssopt(tp
));
1513 (void)memcpy(opt
+ 2, &mss
, sizeof(mss
));
1514 optlen
= TCPOLEN_MAXSEG
;
1516 if ((tp
->t_flags
& TF_REQ_SCALE
) &&
1517 ((flags
& TH_ACK
) == 0 ||
1518 (tp
->t_flags
& TF_RCVD_SCALE
))) {
1519 *((u_int32_t
*)(void *)(opt
+ optlen
)) = htonl(
1521 TCPOPT_WINDOW
<< 16 |
1522 TCPOLEN_WINDOW
<< 8 |
1523 tp
->request_r_scale
);
1527 if (mptcp_enable
&& (so
->so_flags
& SOF_MP_SUBFLOW
)) {
1528 optlen
= mptcp_setup_syn_opts(so
, opt
, optlen
);
1535 * Send a timestamp and echo-reply if this is a SYN and our side
1536 * wants to use timestamps (TF_REQ_TSTMP is set) or both our side
1537 * and our peer have sent timestamps in our SYN's.
1539 if ((tp
->t_flags
& (TF_REQ_TSTMP
|TF_NOOPT
)) == TF_REQ_TSTMP
&&
1540 (flags
& TH_RST
) == 0 &&
1541 ((flags
& TH_ACK
) == 0 ||
1542 (tp
->t_flags
& TF_RCVD_TSTMP
))) {
1543 u_int32_t
*lp
= (u_int32_t
*)(void *)(opt
+ optlen
);
1545 /* Form timestamp option as shown in appendix A of RFC 1323. */
1546 *lp
++ = htonl(TCPOPT_TSTAMP_HDR
);
1547 *lp
++ = htonl(tcp_now
);
1548 *lp
= htonl(tp
->ts_recent
);
1549 optlen
+= TCPOLEN_TSTAMP_APPA
;
1552 /* Note the timestamp for receive buffer autosizing */
1553 if (tp
->rfbuf_ts
== 0 && (so
->so_rcv
.sb_flags
& SB_AUTOSIZE
))
1554 tp
->rfbuf_ts
= tcp_now
;
1556 if (SACK_ENABLED(tp
) && ((tp
->t_flags
& TF_NOOPT
) == 0)) {
1558 * Tack on the SACK permitted option *last*.
1559 * And do padding of options after tacking this on.
1560 * This is because of MSS, TS, WinScale and Signatures are
1561 * all present, we have just 2 bytes left for the SACK
1562 * permitted option, which is just enough.
1565 * If this is the first SYN of connection (not a SYN
1566 * ACK), include SACK permitted option. If this is a
1567 * SYN ACK, include SACK permitted option if peer has
1568 * already done so. This is only for active connect,
1569 * since the syncache takes care of the passive connect.
1571 if ((flags
& TH_SYN
) &&
1572 (!(flags
& TH_ACK
) || (tp
->t_flags
& TF_SACK_PERMIT
))) {
1574 bp
= (u_char
*)opt
+ optlen
;
1576 *bp
++ = TCPOPT_SACK_PERMITTED
;
1577 *bp
++ = TCPOLEN_SACK_PERMITTED
;
1578 optlen
+= TCPOLEN_SACK_PERMITTED
;
1582 if (so
->so_flags
& SOF_MP_SUBFLOW
) {
1584 * Its important to piggyback acks with data as ack only packets
1585 * may get lost and data packets that don't send Data ACKs
1586 * still advance the subflow level ACK and therefore make it
1587 * hard for the remote end to recover in low cwnd situations.
1590 tp
->t_mpflags
|= (TMPF_SEND_DSN
|
1593 tp
->t_mpflags
|= TMPF_MPTCP_ACKNOW
;
1595 optlen
= mptcp_setup_opts(tp
, off
, &opt
[0], optlen
, flags
,
1596 len
, &mptcp_acknow
);
1597 tp
->t_mpflags
&= ~TMPF_SEND_DSN
;
1601 if (tfo_enabled(tp
) && !(tp
->t_flags
& TF_NOOPT
) &&
1602 (flags
& (TH_SYN
| TH_ACK
)) == TH_SYN
)
1603 optlen
+= tcp_tfo_write_cookie(tp
, optlen
, len
, opt
);
1605 if (tfo_enabled(tp
) &&
1606 (flags
& (TH_SYN
| TH_ACK
)) == (TH_SYN
| TH_ACK
) &&
1607 (tp
->t_tfo_flags
& TFO_F_OFFER_COOKIE
))
1608 optlen
+= tcp_tfo_write_cookie_rep(tp
, optlen
, opt
);
1610 if (SACK_ENABLED(tp
) && ((tp
->t_flags
& TF_NOOPT
) == 0)) {
1612 * Send SACKs if necessary. This should be the last
1613 * option processed. Only as many SACKs are sent as
1614 * are permitted by the maximum options size.
1616 * In general, SACK blocks consume 8*n+2 bytes.
1617 * So a full size SACK blocks option is 34 bytes
1618 * (to generate 4 SACK blocks). At a minimum,
1619 * we need 10 bytes (to generate 1 SACK block).
1620 * If TCP Timestamps (12 bytes) and TCP Signatures
1621 * (18 bytes) are both present, we'll just have
1622 * 10 bytes for SACK options 40 - (12 + 18).
1624 if (TCPS_HAVEESTABLISHED(tp
->t_state
) &&
1625 (tp
->t_flags
& TF_SACK_PERMIT
) &&
1626 (tp
->rcv_numsacks
> 0 || TCP_SEND_DSACK_OPT(tp
)) &&
1627 MAX_TCPOPTLEN
- optlen
- 2 >= TCPOLEN_SACK
) {
1629 u_char
*bp
= (u_char
*)opt
+ optlen
;
1632 nsack
= (MAX_TCPOPTLEN
- optlen
- 2) / TCPOLEN_SACK
;
1633 nsack
= min(nsack
, (tp
->rcv_numsacks
+
1634 (TCP_SEND_DSACK_OPT(tp
) ? 1 : 0)));
1635 sackoptlen
= (2 + nsack
* TCPOLEN_SACK
);
1638 * First we need to pad options so that the
1639 * SACK blocks can start at a 4-byte boundary
1640 * (sack option and length are at a 2 byte offset).
1642 padlen
= (MAX_TCPOPTLEN
- optlen
- sackoptlen
) % 4;
1644 while (padlen
-- > 0)
1647 tcpstat
.tcps_sack_send_blocks
++;
1648 *bp
++ = TCPOPT_SACK
;
1650 lp
= (u_int32_t
*)(void *)bp
;
1653 * First block of SACK option should represent
1654 * DSACK. Prefer to send SACK information if there
1655 * is space for only one SACK block. This will
1656 * allow for faster recovery.
1658 if (TCP_SEND_DSACK_OPT(tp
) && nsack
> 0 &&
1659 (tp
->rcv_numsacks
== 0 || nsack
> 1)) {
1660 *lp
++ = htonl(tp
->t_dsack_lseq
);
1661 *lp
++ = htonl(tp
->t_dsack_rseq
);
1662 tcpstat
.tcps_dsack_sent
++;
1666 VERIFY(nsack
== 0 || tp
->rcv_numsacks
>= nsack
);
1667 for (i
= 0; i
< nsack
; i
++) {
1668 struct sackblk sack
= tp
->sackblks
[i
];
1669 *lp
++ = htonl(sack
.start
);
1670 *lp
++ = htonl(sack
.end
);
1672 optlen
+= sackoptlen
;
1676 /* Pad TCP options to a 4 byte boundary */
1677 if (optlen
< MAX_TCPOPTLEN
&& (optlen
% sizeof(u_int32_t
))) {
1678 int pad
= sizeof(u_int32_t
) - (optlen
% sizeof(u_int32_t
));
1679 u_char
*bp
= (u_char
*)opt
+ optlen
;
1689 * RFC 3168 states that:
1690 * - If you ever sent an ECN-setup SYN/SYN-ACK you must be prepared
1691 * to handle the TCP ECE flag, even if you also later send a
1692 * non-ECN-setup SYN/SYN-ACK.
1693 * - If you ever send a non-ECN-setup SYN/SYN-ACK, you must not set
1696 * It is not clear how the ECE flag would ever be set if you never
1697 * set the IP ECT flag on outbound packets. All the same, we use
1698 * the TE_SETUPSENT to indicate that we have committed to handling
1699 * the TCP ECE flag correctly. We use the TE_SENDIPECT to indicate
1700 * whether or not we should set the IP ECT flag on outbound packet
1702 * For a SYN-ACK, send an ECN setup SYN-ACK
1704 if ((flags
& (TH_SYN
| TH_ACK
)) == (TH_SYN
| TH_ACK
) &&
1705 (tp
->ecn_flags
& TE_ENABLE_ECN
)) {
1706 if (tp
->ecn_flags
& TE_SETUPRECEIVED
) {
1707 if (tcp_send_ecn_flags_on_syn(tp
, so
)) {
1709 * Setting TH_ECE makes this an ECN-setup
1715 * Record that we sent the ECN-setup and
1716 * default to setting IP ECT.
1718 tp
->ecn_flags
|= (TE_SETUPSENT
|TE_SENDIPECT
);
1719 tcpstat
.tcps_ecn_server_setup
++;
1720 tcpstat
.tcps_ecn_server_success
++;
1723 * We sent an ECN-setup SYN-ACK but it was
1724 * dropped. Fallback to non-ECN-setup
1725 * SYN-ACK and clear flag to indicate that
1726 * we should not send data with IP ECT set
1728 * Pretend we didn't receive an
1731 * We already incremented the counter
1732 * assuming that the ECN setup will
1733 * succeed. Decrementing here
1734 * tcps_ecn_server_success to correct it.
1736 if (tp
->ecn_flags
& TE_SETUPSENT
) {
1737 tcpstat
.tcps_ecn_lost_synack
++;
1738 tcpstat
.tcps_ecn_server_success
--;
1739 tp
->ecn_flags
|= TE_LOST_SYNACK
;
1743 ~(TE_SETUPRECEIVED
| TE_SENDIPECT
|
1747 } else if ((flags
& (TH_SYN
| TH_ACK
)) == TH_SYN
&&
1748 (tp
->ecn_flags
& TE_ENABLE_ECN
)) {
1749 if (tcp_send_ecn_flags_on_syn(tp
, so
)) {
1751 * Setting TH_ECE and TH_CWR makes this an
1754 flags
|= (TH_ECE
| TH_CWR
);
1755 tcpstat
.tcps_ecn_client_setup
++;
1756 tp
->ecn_flags
|= TE_CLIENT_SETUP
;
1759 * Record that we sent the ECN-setup and default to
1762 tp
->ecn_flags
|= (TE_SETUPSENT
| TE_SENDIPECT
);
1765 * We sent an ECN-setup SYN but it was dropped.
1766 * Fall back to non-ECN and clear flag indicating
1767 * we should send data with IP ECT set.
1769 if (tp
->ecn_flags
& TE_SETUPSENT
) {
1770 tcpstat
.tcps_ecn_lost_syn
++;
1771 tp
->ecn_flags
|= TE_LOST_SYN
;
1773 tp
->ecn_flags
&= ~TE_SENDIPECT
;
1778 * Check if we should set the TCP CWR flag.
1779 * CWR flag is sent when we reduced the congestion window because
1780 * we received a TCP ECE or we performed a fast retransmit. We
1781 * never set the CWR flag on retransmitted packets. We only set
1782 * the CWR flag on data packets. Pure acks don't have this set.
1784 if ((tp
->ecn_flags
& TE_SENDCWR
) != 0 && len
!= 0 &&
1785 !SEQ_LT(tp
->snd_nxt
, tp
->snd_max
) && !sack_rxmit
) {
1787 tp
->ecn_flags
&= ~TE_SENDCWR
;
1791 * Check if we should set the TCP ECE flag.
1793 if ((tp
->ecn_flags
& TE_SENDECE
) != 0 && len
== 0) {
1795 tcpstat
.tcps_ecn_sent_ece
++;
1801 /* Reset DSACK sequence numbers */
1802 tp
->t_dsack_lseq
= 0;
1803 tp
->t_dsack_rseq
= 0;
1807 ipoptlen
= ip6_optlen(inp
);
1811 if (tp_inp_options
) {
1812 ipoptlen
= tp_inp_options
->m_len
-
1813 offsetof(struct ipoption
, ipopt_list
);
1819 ipoptlen
+= ipsec_optlen
;
1823 * Adjust data length if insertion of options will
1824 * bump the packet length beyond the t_maxopd length.
1825 * Clear the FIN bit because we cut off the tail of
1828 * When doing TSO limit a burst to TCP_MAXWIN minus the
1829 * IP, TCP and Options length to keep ip->ip_len from
1830 * overflowing. Prevent the last segment from being
1831 * fractional thus making them all equal sized and set
1832 * the flag to continue sending. TSO is disabled when
1833 * IP options or IPSEC are present.
1835 if (len
+ optlen
+ ipoptlen
> tp
->t_maxopd
) {
1837 * If there is still more to send,
1838 * don't close the connection.
1844 tso_maxlen
= tp
->tso_max_segment_size
?
1845 tp
->tso_max_segment_size
: TCP_MAXWIN
;
1847 if (len
> tso_maxlen
- hdrlen
- optlen
) {
1848 len
= tso_maxlen
- hdrlen
- optlen
;
1849 len
= len
- (len
% (tp
->t_maxopd
- optlen
));
1851 } else if (tp
->t_flags
& TF_NEEDFIN
) {
1855 len
= tp
->t_maxopd
- optlen
- ipoptlen
;
1860 if (max_linkhdr
+ hdrlen
> MCLBYTES
)
1861 panic("tcphdr too big");
1863 /* Check if there is enough data in the send socket
1864 * buffer to start measuring bandwidth
1866 if ((tp
->t_flagsext
& TF_MEASURESNDBW
) != 0 &&
1867 (tp
->t_bwmeas
!= NULL
) &&
1868 (tp
->t_flagsext
& TF_BWMEAS_INPROGRESS
) == 0) {
1869 tp
->t_bwmeas
->bw_size
= min(min(
1870 (so
->so_snd
.sb_cc
- (tp
->snd_max
- tp
->snd_una
)),
1871 tp
->snd_cwnd
), tp
->snd_wnd
);
1872 if (tp
->t_bwmeas
->bw_minsize
> 0 &&
1873 tp
->t_bwmeas
->bw_size
< tp
->t_bwmeas
->bw_minsize
)
1874 tp
->t_bwmeas
->bw_size
= 0;
1875 if (tp
->t_bwmeas
->bw_maxsize
> 0)
1876 tp
->t_bwmeas
->bw_size
= min(tp
->t_bwmeas
->bw_size
,
1877 tp
->t_bwmeas
->bw_maxsize
);
1878 if (tp
->t_bwmeas
->bw_size
> 0) {
1879 tp
->t_flagsext
|= TF_BWMEAS_INPROGRESS
;
1880 tp
->t_bwmeas
->bw_start
= tp
->snd_max
;
1881 tp
->t_bwmeas
->bw_ts
= tcp_now
;
1885 VERIFY(inp
->inp_flowhash
!= 0);
1887 * Grab a header mbuf, attaching a copy of data to
1888 * be transmitted, and initialize the header from
1889 * the template for sends on this connection.
1892 tp
->t_pmtud_lastseg_size
= len
+ optlen
+ ipoptlen
;
1893 if ((tp
->t_flagsext
& TF_FORCE
) && len
== 1)
1894 tcpstat
.tcps_sndprobe
++;
1895 else if (SEQ_LT(tp
->snd_nxt
, tp
->snd_max
) || sack_rxmit
) {
1896 tcpstat
.tcps_sndrexmitpack
++;
1897 tcpstat
.tcps_sndrexmitbyte
+= len
;
1898 if (nstat_collect
) {
1899 nstat_route_tx(inp
->inp_route
.ro_rt
, 1,
1900 len
, NSTAT_TX_FLAG_RETRANSMIT
);
1901 INP_ADD_STAT(inp
, cell
, wifi
, wired
,
1903 INP_ADD_STAT(inp
, cell
, wifi
, wired
,
1905 tp
->t_stat
.txretransmitbytes
+= len
;
1906 tp
->t_stat
.rxmitpkts
++;
1909 tcpstat
.tcps_sndpack
++;
1910 tcpstat
.tcps_sndbyte
+= len
;
1912 if (nstat_collect
) {
1913 INP_ADD_STAT(inp
, cell
, wifi
, wired
,
1915 INP_ADD_STAT(inp
, cell
, wifi
, wired
,
1918 inp_decr_sndbytes_unsent(so
, len
);
1920 inp_set_activity_bitmap(inp
);
1922 if (tp
->t_mpflags
& TMPF_MPTCP_TRUE
) {
1923 tcpstat
.tcps_mp_sndpacks
++;
1924 tcpstat
.tcps_mp_sndbytes
+= len
;
1928 * try to use the new interface that allocates all
1929 * the necessary mbuf hdrs under 1 mbuf lock and
1930 * avoids rescanning the socket mbuf list if
1931 * certain conditions are met. This routine can't
1932 * be used in the following cases...
1933 * 1) the protocol headers exceed the capacity of
1934 * of a single mbuf header's data area (no cluster attached)
1935 * 2) the length of the data being transmitted plus
1936 * the protocol headers fits into a single mbuf header's
1937 * data area (no cluster attached)
1941 /* minimum length we are going to allocate */
1942 allocated_len
= MHLEN
;
1943 if (MHLEN
< hdrlen
+ max_linkhdr
) {
1944 MGETHDR(m
, M_DONTWAIT
, MT_HEADER
);
1949 MCLGET(m
, M_DONTWAIT
);
1950 if ((m
->m_flags
& M_EXT
) == 0) {
1955 m
->m_data
+= max_linkhdr
;
1957 allocated_len
= MCLBYTES
;
1959 if (len
<= allocated_len
- hdrlen
- max_linkhdr
) {
1961 VERIFY(allocated_len
<= MHLEN
);
1962 MGETHDR(m
, M_DONTWAIT
, MT_HEADER
);
1967 m
->m_data
+= max_linkhdr
;
1970 /* makes sure we still have data left to be sent at this point */
1971 if (so
->so_snd
.sb_mb
== NULL
|| off
< 0) {
1972 if (m
!= NULL
) m_freem(m
);
1973 error
= 0; /* should we return an error? */
1976 m_copydata(so
->so_snd
.sb_mb
, off
, (int) len
,
1977 mtod(m
, caddr_t
) + hdrlen
);
1982 * Retain packet header metadata at the socket
1983 * buffer if this is is an MPTCP subflow,
1984 * otherwise move it.
1986 copymode
= M_COPYM_MOVE_HDR
;
1988 if (so
->so_flags
& SOF_MP_SUBFLOW
) {
1989 copymode
= M_COPYM_NOOP_HDR
;
1993 m
->m_next
= m_copym_mode(so
->so_snd
.sb_mb
,
1994 off
, (int)len
, M_DONTWAIT
, copymode
);
1995 if (m
->m_next
== NULL
) {
2002 * make sure we still have data left
2003 * to be sent at this point
2005 if (so
->so_snd
.sb_mb
== NULL
) {
2006 error
= 0; /* should we return an error? */
2011 * m_copym_with_hdrs will always return the
2012 * last mbuf pointer and the offset into it that
2013 * it acted on to fullfill the current request,
2014 * whether a valid 'hint' was passed in or not.
2016 if ((m
= m_copym_with_hdrs(so
->so_snd
.sb_mb
,
2017 off
, len
, M_DONTWAIT
, NULL
, NULL
,
2018 copymode
)) == NULL
) {
2022 m
->m_data
+= max_linkhdr
;
2027 * If we're sending everything we've got, set PUSH.
2028 * (This will keep happy those implementations which only
2029 * give data to the user when a buffer fills or
2032 * On SYN-segments we should not add the PUSH-flag.
2034 if (off
+ len
== so
->so_snd
.sb_cc
&& !(flags
& TH_SYN
))
2037 if (tp
->t_flags
& TF_ACKNOW
)
2038 tcpstat
.tcps_sndacks
++;
2039 else if (flags
& (TH_SYN
|TH_FIN
|TH_RST
))
2040 tcpstat
.tcps_sndctrl
++;
2041 else if (SEQ_GT(tp
->snd_up
, tp
->snd_una
))
2042 tcpstat
.tcps_sndurg
++;
2044 tcpstat
.tcps_sndwinup
++;
2046 MGETHDR(m
, M_DONTWAIT
, MT_HEADER
); /* MAC-OK */
2051 if (MHLEN
< (hdrlen
+ max_linkhdr
)) {
2052 MCLGET(m
, M_DONTWAIT
);
2053 if ((m
->m_flags
& M_EXT
) == 0) {
2059 m
->m_data
+= max_linkhdr
;
2062 m
->m_pkthdr
.rcvif
= 0;
2064 mac_mbuf_label_associate_inpcb(inp
, m
);
2068 ip6
= mtod(m
, struct ip6_hdr
*);
2069 th
= (struct tcphdr
*)(void *)(ip6
+ 1);
2070 tcp_fillheaders(tp
, ip6
, th
);
2071 if ((tp
->ecn_flags
& TE_SENDIPECT
) != 0 && len
&&
2072 !SEQ_LT(tp
->snd_nxt
, tp
->snd_max
) && !sack_rxmit
) {
2073 ip6
->ip6_flow
|= htonl(IPTOS_ECN_ECT0
<< 20);
2075 svc_flags
|= PKT_SCF_IPV6
;
2077 m_pftag(m
)->pftag_hdr
= (void *)ip6
;
2078 m_pftag(m
)->pftag_flags
|= PF_TAG_HDR_INET6
;
2083 ip
= mtod(m
, struct ip
*);
2084 ipov
= (struct ipovly
*)ip
;
2085 th
= (struct tcphdr
*)(void *)(ip
+ 1);
2086 /* this picks up the pseudo header (w/o the length) */
2087 tcp_fillheaders(tp
, ip
, th
);
2088 if ((tp
->ecn_flags
& TE_SENDIPECT
) != 0 && len
&&
2089 !SEQ_LT(tp
->snd_nxt
, tp
->snd_max
) &&
2090 !sack_rxmit
&& !(flags
& TH_SYN
)) {
2091 ip
->ip_tos
|= IPTOS_ECN_ECT0
;
2094 m_pftag(m
)->pftag_hdr
= (void *)ip
;
2095 m_pftag(m
)->pftag_flags
|= PF_TAG_HDR_INET
;
2100 * Fill in fields, remembering maximum advertised
2101 * window for use in delaying messages about window sizes.
2102 * If resending a FIN, be sure not to use a new sequence number.
2104 if ((flags
& TH_FIN
) && (tp
->t_flags
& TF_SENTFIN
) &&
2105 tp
->snd_nxt
== tp
->snd_max
)
2108 * If we are doing retransmissions, then snd_nxt will
2109 * not reflect the first unsent octet. For ACK only
2110 * packets, we do not want the sequence number of the
2111 * retransmitted packet, we want the sequence number
2112 * of the next unsent octet. So, if there is no data
2113 * (and no SYN or FIN), use snd_max instead of snd_nxt
2114 * when filling in ti_seq. But if we are in persist
2115 * state, snd_max might reflect one byte beyond the
2116 * right edge of the window, so use snd_nxt in that
2117 * case, since we know we aren't doing a retransmission.
2118 * (retransmit and persist are mutually exclusive...)
2120 * Note the state of this retransmit segment to detect spurious
2123 if (sack_rxmit
== 0) {
2124 if (len
|| (flags
& (TH_SYN
|TH_FIN
)) ||
2125 tp
->t_timer
[TCPT_PERSIST
]) {
2126 th
->th_seq
= htonl(tp
->snd_nxt
);
2128 m
->m_pkthdr
.tx_start_seq
= tp
->snd_nxt
;
2129 m
->m_pkthdr
.pkt_flags
|= PKTF_START_SEQ
;
2131 if (SEQ_LT(tp
->snd_nxt
, tp
->snd_max
)) {
2132 if (SACK_ENABLED(tp
) && len
> 1) {
2133 tcp_rxtseg_insert(tp
, tp
->snd_nxt
,
2134 (tp
->snd_nxt
+ len
- 1));
2137 m
->m_pkthdr
.pkt_flags
|=
2141 th
->th_seq
= htonl(tp
->snd_max
);
2144 th
->th_seq
= htonl(p
->rxmit
);
2146 m
->m_pkthdr
.pkt_flags
|=
2147 (PKTF_TCP_REXMT
| PKTF_START_SEQ
);
2148 m
->m_pkthdr
.tx_start_seq
= p
->rxmit
;
2150 tcp_rxtseg_insert(tp
, p
->rxmit
, (p
->rxmit
+ len
- 1));
2152 tp
->sackhint
.sack_bytes_rexmit
+= len
;
2154 th
->th_ack
= htonl(tp
->rcv_nxt
);
2155 tp
->last_ack_sent
= tp
->rcv_nxt
;
2157 bcopy(opt
, th
+ 1, optlen
);
2158 th
->th_off
= (sizeof (struct tcphdr
) + optlen
) >> 2;
2160 th
->th_flags
= flags
;
2161 th
->th_win
= htons((u_short
) (recwin
>>tp
->rcv_scale
));
2162 if (!(so
->so_flags
& SOF_MP_SUBFLOW
)) {
2163 if (recwin
> 0 && SEQ_LT(tp
->rcv_adv
, tp
->rcv_nxt
+ recwin
)) {
2164 tp
->rcv_adv
= tp
->rcv_nxt
+ recwin
;
2167 struct mptcb
*mp_tp
= tptomptp(tp
);
2169 tp
->rcv_adv
= tp
->rcv_nxt
+ recwin
;
2172 if (recwin
> 0 && SEQ_LT(mp_tp
->mpt_rcvadv
, (uint32_t)mp_tp
->mpt_rcvnxt
+ recwin
)) {
2173 mp_tp
->mpt_rcvadv
= (uint32_t)mp_tp
->mpt_rcvnxt
+ recwin
;
2178 * Adjust the RXWIN0SENT flag - indicate that we have advertised
2179 * a 0 window. This may cause the remote transmitter to stall. This
2180 * flag tells soreceive() to disable delayed acknowledgements when
2181 * draining the buffer. This can occur if the receiver is attempting
2182 * to read more data then can be buffered prior to transmitting on
2185 if (th
->th_win
== 0)
2186 tp
->t_flags
|= TF_RXWIN0SENT
;
2188 tp
->t_flags
&= ~TF_RXWIN0SENT
;
2190 if (SEQ_GT(tp
->snd_up
, tp
->snd_nxt
)) {
2191 th
->th_urp
= htons((u_short
)(tp
->snd_up
- tp
->snd_nxt
));
2192 th
->th_flags
|= TH_URG
;
2195 * If no urgent pointer to send, then we pull
2196 * the urgent pointer to the left edge of the send window
2197 * so that it doesn't drift into the send window on sequence
2198 * number wraparound.
2200 tp
->snd_up
= tp
->snd_una
; /* drag it along */
2204 * Put TCP length in extended header, and then
2205 * checksum extended header and data.
2207 m
->m_pkthdr
.len
= hdrlen
+ len
; /* in6_cksum() need this */
2210 * If this is potentially the last packet on the stream, then mark
2211 * it in order to enable some optimizations in the underlying
2214 if (tp
->t_state
!= TCPS_ESTABLISHED
&&
2215 (tp
->t_state
== TCPS_CLOSING
|| tp
->t_state
== TCPS_TIME_WAIT
2216 || tp
->t_state
== TCPS_LAST_ACK
|| (th
->th_flags
& TH_RST
)))
2217 m
->m_pkthdr
.pkt_flags
|= PKTF_LAST_PKT
;
2222 * ip6_plen is not need to be filled now, and will be filled
2225 m
->m_pkthdr
.csum_flags
= CSUM_TCPIPV6
;
2226 m
->m_pkthdr
.csum_data
= offsetof(struct tcphdr
, th_sum
);
2228 th
->th_sum
= in_addword(th
->th_sum
,
2229 htons((u_short
)(optlen
+ len
)));
2234 m
->m_pkthdr
.csum_flags
= CSUM_TCP
;
2235 m
->m_pkthdr
.csum_data
= offsetof(struct tcphdr
, th_sum
);
2237 th
->th_sum
= in_addword(th
->th_sum
,
2238 htons((u_short
)(optlen
+ len
)));
2242 * Enable TSO and specify the size of the segments.
2243 * The TCP pseudo header checksum is always provided.
2248 m
->m_pkthdr
.csum_flags
|= CSUM_TSO_IPV6
;
2251 m
->m_pkthdr
.csum_flags
|= CSUM_TSO_IPV4
;
2253 m
->m_pkthdr
.tso_segsz
= tp
->t_maxopd
- optlen
;
2255 m
->m_pkthdr
.tso_segsz
= 0;
2259 * In transmit state, time the transmission and arrange for
2260 * the retransmit. In persist state, just set snd_max.
2262 if (!(tp
->t_flagsext
& TF_FORCE
)
2263 || tp
->t_timer
[TCPT_PERSIST
] == 0) {
2264 tcp_seq startseq
= tp
->snd_nxt
;
2267 * Advance snd_nxt over sequence space of this segment.
2269 if (flags
& (TH_SYN
|TH_FIN
)) {
2272 if ((flags
& TH_FIN
) &&
2273 !(tp
->t_flags
& TF_SENTFIN
)) {
2275 tp
->t_flags
|= TF_SENTFIN
;
2280 if (sack_rescue_rxt
== TRUE
) {
2281 tp
->snd_nxt
= old_snd_nxt
;
2282 sack_rescue_rxt
= FALSE
;
2283 tcpstat
.tcps_pto_in_recovery
++;
2287 if (SEQ_GT(tp
->snd_nxt
, tp
->snd_max
)) {
2288 tp
->snd_max
= tp
->snd_nxt
;
2289 tp
->t_sndtime
= tcp_now
;
2291 * Time this transmission if not a retransmission and
2292 * not currently timing anything.
2294 if (tp
->t_rtttime
== 0) {
2295 tp
->t_rtttime
= tcp_now
;
2296 tp
->t_rtseq
= startseq
;
2297 tcpstat
.tcps_segstimed
++;
2299 /* update variables related to pipe ack */
2300 tp
->t_pipeack_lastuna
= tp
->snd_una
;
2305 * Set retransmit timer if not currently set,
2306 * and not doing an ack or a keep-alive probe.
2309 if (tp
->t_timer
[TCPT_REXMT
] == 0 &&
2310 ((sack_rxmit
&& tp
->snd_nxt
!= tp
->snd_max
) ||
2311 tp
->snd_nxt
!= tp
->snd_una
|| (flags
& TH_FIN
))) {
2312 if (tp
->t_timer
[TCPT_PERSIST
]) {
2313 tp
->t_timer
[TCPT_PERSIST
] = 0;
2314 tp
->t_persist_stop
= 0;
2315 TCP_RESET_REXMT_STATE(tp
);
2317 tp
->t_timer
[TCPT_REXMT
] =
2318 OFFSET_FROM_START(tp
, tp
->t_rxtcur
);
2322 * Set tail loss probe timeout if new data is being
2323 * transmitted. This will be supported only when
2324 * SACK option is enabled on a connection.
2326 * Every time new data is sent PTO will get reset.
2328 if (tcp_enable_tlp
&& len
!= 0 && tp
->t_state
== TCPS_ESTABLISHED
&&
2329 SACK_ENABLED(tp
) && !IN_FASTRECOVERY(tp
) &&
2330 tp
->snd_nxt
== tp
->snd_max
&&
2331 SEQ_GT(tp
->snd_nxt
, tp
->snd_una
) &&
2332 tp
->t_rxtshift
== 0 &&
2333 (tp
->t_flagsext
& (TF_SENT_TLPROBE
|TF_PKTS_REORDERED
)) == 0) {
2334 u_int32_t pto
, srtt
;
2337 * Using SRTT alone to set PTO can cause spurious
2338 * retransmissions on wireless networks where there
2339 * is a lot of variance in RTT. Taking variance
2340 * into account will avoid this.
2342 srtt
= tp
->t_srtt
>> TCP_RTT_SHIFT
;
2343 pto
= ((TCP_REXMTVAL(tp
)) * 3) >> 1;
2344 pto
= max (2 * srtt
, pto
);
2345 if ((tp
->snd_max
- tp
->snd_una
) == tp
->t_maxseg
)
2347 (((3 * pto
) >> 2) + tcp_delack
* 2));
2351 /* if RTO is less than PTO, choose RTO instead */
2352 if (tp
->t_rxtcur
< pto
)
2355 tp
->t_timer
[TCPT_PTO
] = OFFSET_FROM_START(tp
, pto
);
2359 * Persist case, update snd_max but since we are in
2360 * persist mode (no window) we do not update snd_nxt.
2365 if ((flags
& TH_FIN
) &&
2366 !(tp
->t_flags
& TF_SENTFIN
)) {
2368 tp
->t_flags
|= TF_SENTFIN
;
2370 if (SEQ_GT(tp
->snd_nxt
+ xlen
, tp
->snd_max
)) {
2371 tp
->snd_max
= tp
->snd_nxt
+ len
;
2372 tp
->t_sndtime
= tcp_now
;
2380 if (so_options
& SO_DEBUG
)
2381 tcp_trace(TA_OUTPUT
, tp
->t_state
, tp
, mtod(m
, void *), th
, 0);
2385 * Fill in IP length and desired time to live and
2386 * send to IP level. There should be a better way
2387 * to handle ttl and tos; we could keep them in
2388 * the template, but need a way to checksum without them.
2392 * m->m_pkthdr.len should have been set before cksum calcuration,
2393 * because in6_cksum() need it.
2397 * we separately set hoplimit for every segment, since the
2398 * user might want to change the value via setsockopt.
2399 * Also, desired default hop limit might be changed via
2400 * Neighbor Discovery.
2402 ip6
->ip6_hlim
= in6_selecthlim(inp
, inp
->in6p_route
.ro_rt
?
2403 inp
->in6p_route
.ro_rt
->rt_ifp
: NULL
);
2405 /* TODO: IPv6 IP6TOS_ECT bit on */
2406 KERNEL_DEBUG(DBG_LAYER_BEG
,
2407 ((inp
->inp_fport
<< 16) | inp
->inp_lport
),
2408 (((inp
->in6p_laddr
.s6_addr16
[0] & 0xffff) << 16) |
2409 (inp
->in6p_faddr
.s6_addr16
[0] & 0xffff)),
2414 ip
->ip_len
= m
->m_pkthdr
.len
;
2415 ip
->ip_ttl
= inp
->inp_ip_ttl
; /* XXX */
2416 ip
->ip_tos
|= (inp
->inp_ip_tos
& ~IPTOS_ECN_MASK
);/* XXX */
2417 KERNEL_DEBUG(DBG_LAYER_BEG
,
2418 ((inp
->inp_fport
<< 16) | inp
->inp_lport
),
2419 (((inp
->inp_laddr
.s_addr
& 0xffff) << 16) |
2420 (inp
->inp_faddr
.s_addr
& 0xffff)), 0,0,0);
2424 * See if we should do MTU discovery.
2425 * Look at the flag updated on the following criterias:
2426 * 1) Path MTU discovery is authorized by the sysctl
2427 * 2) The route isn't set yet (unlikely but could happen)
2428 * 3) The route is up
2429 * 4) the MTU is not locked (if it is, then discovery has been
2430 * disabled for that route)
2435 if (path_mtu_discovery
&& (tp
->t_flags
& TF_PMTUD
))
2436 ip
->ip_off
|= IP_DF
;
2440 necp_kernel_policy_id policy_id
;
2441 necp_kernel_policy_id skip_policy_id
;
2442 u_int32_t route_rule_id
;
2443 if (!necp_socket_is_allowed_to_send_recv(inp
, NULL
, &policy_id
, &route_rule_id
, &skip_policy_id
)) {
2444 TCP_LOG_DROP_NECP(isipv6
? (void *)ip6
: (void *)ip
, th
, tp
, true);
2446 error
= EHOSTUNREACH
;
2449 necp_mark_packet_from_socket(m
, inp
, policy_id
, route_rule_id
, skip_policy_id
);
2451 if (net_qos_policy_restricted
!= 0) {
2452 necp_socket_update_qos_marking(inp
, inp
->inp_route
.ro_rt
,
2453 NULL
, route_rule_id
);
2459 if (inp
->inp_sp
!= NULL
)
2460 ipsec_setsocket(m
, so
);
2464 * The socket is kept locked while sending out packets in ip_output, even if packet chaining is not active.
2469 * Embed the flow hash in pkt hdr and mark the packet as
2470 * capable of flow controlling
2472 m
->m_pkthdr
.pkt_flowsrc
= FLOWSRC_INPCB
;
2473 m
->m_pkthdr
.pkt_flowid
= inp
->inp_flowhash
;
2474 m
->m_pkthdr
.pkt_flags
|= (PKTF_FLOW_ID
| PKTF_FLOW_LOCALSRC
| PKTF_FLOW_ADV
);
2475 m
->m_pkthdr
.pkt_proto
= IPPROTO_TCP
;
2476 m
->m_pkthdr
.tx_tcp_pid
= so
->last_pid
;
2477 if (so
->so_flags
& SOF_DELEGATED
)
2478 m
->m_pkthdr
.tx_tcp_e_pid
= so
->e_pid
;
2480 m
->m_pkthdr
.tx_tcp_e_pid
= 0;
2482 m
->m_nextpkt
= NULL
;
2484 if (inp
->inp_last_outifp
!= NULL
&&
2485 !(inp
->inp_last_outifp
->if_flags
& IFF_LOOPBACK
)) {
2486 /* Hint to prioritize this packet if
2487 * 1. if the packet has no data
2488 * 2. the interface supports transmit-start model and did
2489 * not disable ACK prioritization.
2490 * 3. Only ACK flag is set.
2491 * 4. there is no outstanding data on this connection.
2493 if (tcp_prioritize_acks
!= 0 && len
== 0 &&
2494 (inp
->inp_last_outifp
->if_eflags
&
2495 (IFEF_TXSTART
| IFEF_NOACKPRI
)) == IFEF_TXSTART
) {
2496 if (th
->th_flags
== TH_ACK
&&
2497 tp
->snd_una
== tp
->snd_max
&&
2498 tp
->t_timer
[TCPT_REXMT
] == 0)
2499 svc_flags
|= PKT_SCF_TCP_ACK
;
2500 if (th
->th_flags
& TH_SYN
)
2501 svc_flags
|= PKT_SCF_TCP_SYN
;
2503 set_packet_service_class(m
, so
, sotc
, svc_flags
);
2506 * Optimization for loopback just set the mbuf
2509 (void) m_set_service_class(m
, so_tc2msc(sotc
));
2512 TCP_LOG_TH_FLAGS(isipv6
? (void *)ip6
: (void *)ip
, th
, tp
, true,
2513 inp
->inp_last_outifp
!= NULL
? inp
->inp_last_outifp
:
2516 tp
->t_pktlist_sentlen
+= len
;
2521 DTRACE_TCP5(send
, struct mbuf
*, m
, struct inpcb
*, inp
,
2522 struct ip6
*, ip6
, struct tcpcb
*, tp
, struct tcphdr
*,
2527 DTRACE_TCP5(send
, struct mbuf
*, m
, struct inpcb
*, inp
,
2528 struct ip
*, ip
, struct tcpcb
*, tp
, struct tcphdr
*, th
);
2531 if (tp
->t_pktlist_head
!= NULL
) {
2532 tp
->t_pktlist_tail
->m_nextpkt
= m
;
2533 tp
->t_pktlist_tail
= m
;
2535 packchain_newlist
++;
2536 tp
->t_pktlist_head
= tp
->t_pktlist_tail
= m
;
2539 if (lro_ackmore
&& !sackoptlen
&& tp
->t_timer
[TCPT_PERSIST
] == 0 &&
2540 (th
->th_flags
& TH_ACK
) == TH_ACK
&& len
== 0 &&
2541 tp
->t_state
== TCPS_ESTABLISHED
) {
2542 /* For a pure ACK, see if you need to send more of them */
2543 mnext
= tcp_send_lroacks(tp
, m
, th
);
2545 tp
->t_pktlist_tail
->m_nextpkt
= mnext
;
2546 if (mnext
->m_nextpkt
== NULL
) {
2547 tp
->t_pktlist_tail
= mnext
;
2550 struct mbuf
*tail
, *next
;
2551 next
= mnext
->m_nextpkt
;
2552 tail
= next
->m_nextpkt
;
2555 tail
= tail
->m_nextpkt
;
2558 tp
->t_pktlist_tail
= next
;
2563 if (sendalot
== 0 || (tp
->t_state
!= TCPS_ESTABLISHED
) ||
2564 (tp
->snd_cwnd
<= (tp
->snd_wnd
/ 8)) ||
2565 (tp
->t_flags
& TF_ACKNOW
) ||
2566 (tp
->t_flagsext
& TF_FORCE
) ||
2567 tp
->t_lastchain
>= tcp_packet_chaining
) {
2569 while (inp
->inp_sndinprog_cnt
== 0 &&
2570 tp
->t_pktlist_head
!= NULL
) {
2571 packetlist
= tp
->t_pktlist_head
;
2572 packchain_listadd
= tp
->t_lastchain
;
2574 lost
= tp
->t_pktlist_sentlen
;
2575 TCP_PKTLIST_CLEAR(tp
);
2577 error
= tcp_ip_output(so
, tp
, packetlist
,
2578 packchain_listadd
, tp_inp_options
,
2579 (so_options
& SO_DONTROUTE
),
2580 (sack_rxmit
|| (sack_bytes_rxmt
!= 0)), isipv6
);
2583 * Take into account the rest of unsent
2584 * packets in the packet list for this tcp
2585 * into "lost", since we're about to free
2586 * the whole list below.
2588 lost
+= tp
->t_pktlist_sentlen
;
2594 /* tcp was closed while we were in ip; resume close */
2595 if (inp
->inp_sndinprog_cnt
== 0 &&
2596 (tp
->t_flags
& TF_CLOSING
)) {
2597 tp
->t_flags
&= ~TF_CLOSING
;
2598 (void) tcp_close(tp
);
2604 tcpstat
.tcps_sndtotal
++;
2610 * Assume that the packets were lost, so back out the
2611 * sequence number advance, if any. Note that the "lost"
2612 * variable represents the amount of user data sent during
2613 * the recent call to ip_output_list() plus the amount of
2614 * user data in the packet list for this tcp at the moment.
2616 if (!(tp
->t_flagsext
& TF_FORCE
)
2617 || tp
->t_timer
[TCPT_PERSIST
] == 0) {
2619 * No need to check for TH_FIN here because
2620 * the TF_SENTFIN flag handles that case.
2622 if ((flags
& TH_SYN
) == 0) {
2624 if (SEQ_GT((p
->rxmit
- lost
),
2628 lost
= p
->rxmit
- tp
->snd_una
;
2629 p
->rxmit
= tp
->snd_una
;
2631 tp
->sackhint
.sack_bytes_rexmit
-= lost
;
2633 if (SEQ_GT((tp
->snd_nxt
- lost
),
2635 tp
->snd_nxt
-= lost
;
2637 tp
->snd_nxt
= tp
->snd_una
;
2642 if (tp
->t_pktlist_head
!= NULL
)
2643 m_freem_list(tp
->t_pktlist_head
);
2644 TCP_PKTLIST_CLEAR(tp
);
2646 if (error
== ENOBUFS
) {
2648 * Set retransmit timer if not currently set
2649 * when we failed to send a segment that can be
2650 * retransmitted (i.e. not pure ack or rst)
2652 if (tp
->t_timer
[TCPT_REXMT
] == 0 &&
2653 tp
->t_timer
[TCPT_PERSIST
] == 0 &&
2654 (len
!= 0 || (flags
& (TH_SYN
| TH_FIN
)) != 0 ||
2655 so
->so_snd
.sb_cc
> 0))
2656 tp
->t_timer
[TCPT_REXMT
] =
2657 OFFSET_FROM_START(tp
, tp
->t_rxtcur
);
2658 tp
->snd_cwnd
= tp
->t_maxseg
;
2659 tp
->t_bytes_acked
= 0;
2660 tcp_check_timer_state(tp
);
2661 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
2663 tcp_ccdbg_trace(tp
, NULL
, TCP_CC_OUTPUT_ERROR
);
2666 if (error
== EMSGSIZE
) {
2668 * ip_output() will have already fixed the route
2669 * for us. tcp_mtudisc() will, as its last action,
2670 * initiate retransmission, so it is important to
2673 * If TSO was active we either got an interface
2674 * without TSO capabilits or TSO was turned off.
2675 * Disable it for this connection as too and
2676 * immediatly retry with MSS sized segments generated
2680 tp
->t_flags
&= ~TF_TSO
;
2682 tcp_mtudisc(inp
, 0);
2683 tcp_check_timer_state(tp
);
2685 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
2689 * Unless this is due to interface restriction policy,
2690 * treat EHOSTUNREACH/ENETDOWN as a soft error.
2692 if ((error
== EHOSTUNREACH
|| error
== ENETDOWN
) &&
2693 TCPS_HAVERCVDSYN(tp
->t_state
) &&
2694 !inp_restricted_send(inp
, inp
->inp_last_outifp
)) {
2695 tp
->t_softerror
= error
;
2698 tcp_check_timer_state(tp
);
2699 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
2703 tcpstat
.tcps_sndtotal
++;
2705 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
,0,0,0,0,0);
2709 tcp_check_timer_state(tp
);
2715 tcp_ip_output(struct socket
*so
, struct tcpcb
*tp
, struct mbuf
*pkt
,
2716 int cnt
, struct mbuf
*opt
, int flags
, int sack_in_progress
, boolean_t isipv6
)
2720 boolean_t unlocked
= FALSE
;
2721 boolean_t ifdenied
= FALSE
;
2722 struct inpcb
*inp
= tp
->t_inpcb
;
2723 struct ip_out_args ipoa
;
2725 struct ifnet
*outif
= NULL
;
2727 bzero(&ipoa
, sizeof(ipoa
));
2728 ipoa
.ipoa_boundif
= IFSCOPE_NONE
;
2729 ipoa
.ipoa_flags
= IPOAF_SELECT_SRCIF
| IPOAF_BOUND_SRCADDR
;
2730 ipoa
.ipoa_sotc
= SO_TC_UNSPEC
;
2731 ipoa
.ipoa_netsvctype
= _NET_SERVICE_TYPE_UNSPEC
;
2733 struct ip6_out_args ip6oa
;
2734 struct route_in6 ro6
;
2736 bzero(&ip6oa
, sizeof(ip6oa
));
2737 ip6oa
.ip6oa_boundif
= IFSCOPE_NONE
;
2738 ip6oa
.ip6oa_flags
= IP6OAF_SELECT_SRCIF
| IP6OAF_BOUND_SRCADDR
;
2739 ip6oa
.ip6oa_sotc
= SO_TC_UNSPEC
;
2740 ip6oa
.ip6oa_netsvctype
= _NET_SERVICE_TYPE_UNSPEC
;
2742 struct flowadv
*adv
=
2743 (isipv6
? &ip6oa
.ip6oa_flowadv
: &ipoa
.ipoa_flowadv
);
2745 struct flowadv
*adv
= &ipoa
.ipoa_flowadv
;
2748 /* If socket was bound to an ifindex, tell ip_output about it */
2749 if (inp
->inp_flags
& INP_BOUND_IF
) {
2752 ip6oa
.ip6oa_boundif
= inp
->inp_boundifp
->if_index
;
2753 ip6oa
.ip6oa_flags
|= IP6OAF_BOUND_IF
;
2757 ipoa
.ipoa_boundif
= inp
->inp_boundifp
->if_index
;
2758 ipoa
.ipoa_flags
|= IPOAF_BOUND_IF
;
2762 if (INP_NO_CELLULAR(inp
)) {
2765 ip6oa
.ip6oa_flags
|= IP6OAF_NO_CELLULAR
;
2768 ipoa
.ipoa_flags
|= IPOAF_NO_CELLULAR
;
2770 if (INP_NO_EXPENSIVE(inp
)) {
2773 ip6oa
.ip6oa_flags
|= IP6OAF_NO_EXPENSIVE
;
2776 ipoa
.ipoa_flags
|= IPOAF_NO_EXPENSIVE
;
2779 if (INP_NO_CONSTRAINED(inp
)) {
2782 ip6oa
.ip6oa_flags
|= IP6OAF_NO_CONSTRAINED
;
2785 ipoa
.ipoa_flags
|= IPOAF_NO_CONSTRAINED
;
2787 if (INP_AWDL_UNRESTRICTED(inp
)) {
2790 ip6oa
.ip6oa_flags
|= IP6OAF_AWDL_UNRESTRICTED
;
2793 ipoa
.ipoa_flags
|= IPOAF_AWDL_UNRESTRICTED
;
2797 if (INP_INTCOPROC_ALLOWED(inp
) && isipv6
) {
2798 ip6oa
.ip6oa_flags
|= IP6OAF_INTCOPROC_ALLOWED
;
2801 ip6oa
.ip6oa_sotc
= so
->so_traffic_class
;
2802 ip6oa
.ip6oa_netsvctype
= so
->so_netsvctype
;
2806 ipoa
.ipoa_sotc
= so
->so_traffic_class
;
2807 ipoa
.ipoa_netsvctype
= so
->so_netsvctype
;
2809 if ((so
->so_flags1
& SOF1_QOSMARKING_ALLOWED
)) {
2812 ip6oa
.ip6oa_flags
|= IP6OAF_QOSMARKING_ALLOWED
;
2815 ipoa
.ipoa_flags
|= IPOAF_QOSMARKING_ALLOWED
;
2819 flags
|= IPV6_OUTARGS
;
2822 flags
|= IP_OUTARGS
;
2824 /* Copy the cached route and take an extra reference */
2827 in6p_route_copyout(inp
, &ro6
);
2830 inp_route_copyout(inp
, &ro
);
2833 * Make sure ACK/DELACK conditions are cleared before
2834 * we unlock the socket.
2836 tp
->last_ack_sent
= tp
->rcv_nxt
;
2837 tp
->t_flags
&= ~(TF_ACKNOW
| TF_DELACK
);
2838 tp
->t_timer
[TCPT_DELACK
] = 0;
2839 tp
->t_unacksegs
= 0;
2841 /* Increment the count of outstanding send operations */
2842 inp
->inp_sndinprog_cnt
++;
2845 * If allowed, unlock TCP socket while in IP
2846 * but only if the connection is established and
2847 * in a normal mode where reentrancy on the tcpcb won't be
2849 * - there is no SACK episode
2850 * - we're not in Fast Recovery mode
2851 * - if we're not sending from an upcall.
2853 if (tcp_output_unlocked
&& !so
->so_upcallusecount
&&
2854 (tp
->t_state
== TCPS_ESTABLISHED
) && (sack_in_progress
== 0) &&
2855 !IN_FASTRECOVERY(tp
) && !(so
->so_flags
& SOF_MP_SUBFLOW
)) {
2858 socket_unlock(so
, 0);
2862 * Don't send down a chain of packets when:
2863 * - TCP chaining is disabled
2864 * - there is an IPsec rule set
2865 * - there is a non default rule set for the firewall
2868 chain
= tcp_packet_chaining
> 1
2873 && (fw_enable
== 0 || fw_bypass
)
2875 ; // I'm important, not extraneous
2877 while (pkt
!= NULL
) {
2878 struct mbuf
*npkt
= pkt
->m_nextpkt
;
2881 pkt
->m_nextpkt
= NULL
;
2883 * If we are not chaining, make sure to set the packet
2884 * list count to 0 so that IP takes the right path;
2885 * this is important for cases such as IPsec where a
2886 * single mbuf might result in multiple mbufs as part
2887 * of the encapsulation. If a non-zero count is passed
2888 * down to IP, the head of the chain might change and
2889 * we could end up skipping it (thus generating bogus
2890 * packets). Fixing it in IP would be desirable, but
2891 * for now this would do it.
2897 error
= ip6_output_list(pkt
, cnt
,
2898 inp
->in6p_outputopts
, &ro6
, flags
, NULL
, NULL
,
2900 ifdenied
= (ip6oa
.ip6oa_retflags
& IP6OARF_IFDENIED
);
2903 error
= ip_output_list(pkt
, cnt
, opt
, &ro
, flags
, NULL
,
2905 ifdenied
= (ipoa
.ipoa_retflags
& IPOARF_IFDENIED
);
2908 if (chain
|| error
) {
2910 * If we sent down a chain then we are done since
2911 * the callee had taken care of everything; else
2912 * we need to free the rest of the chain ourselves.
2925 * Enter flow controlled state if the connection is established
2926 * and is not in recovery. Flow control is allowed only if there
2927 * is outstanding data.
2929 * A connection will enter suspended state even if it is in
2932 if (((adv
->code
== FADV_FLOW_CONTROLLED
&& !IN_FASTRECOVERY(tp
)) ||
2933 adv
->code
== FADV_SUSPENDED
) &&
2934 !(tp
->t_flags
& TF_CLOSING
) &&
2935 tp
->t_state
== TCPS_ESTABLISHED
&&
2936 SEQ_GT(tp
->snd_max
, tp
->snd_una
)) {
2938 rc
= inp_set_fc_state(inp
, adv
->code
);
2941 tcp_ccdbg_trace(tp
, NULL
,
2942 ((adv
->code
== FADV_FLOW_CONTROLLED
) ?
2943 TCP_CC_FLOW_CONTROL
: TCP_CC_SUSPEND
));
2947 * When an interface queue gets suspended, some of the
2948 * packets are dropped. Return ENOBUFS, to update the
2951 if (adv
->code
== FADV_SUSPENDED
)
2954 VERIFY(inp
->inp_sndinprog_cnt
> 0);
2955 if ( --inp
->inp_sndinprog_cnt
== 0) {
2956 inp
->inp_flags
&= ~(INP_FC_FEEDBACK
);
2957 if (inp
->inp_sndingprog_waiters
> 0) {
2958 wakeup(&inp
->inp_sndinprog_cnt
);
2965 * When an NECP IP tunnel policy forces the outbound interface,
2966 * ip6_output_list() informs the transport layer what is the actual
2967 * outgoing interface
2969 if (ip6oa
.ip6oa_flags
& IP6OAF_BOUND_IF
) {
2970 outif
= ifindex2ifnet
[ip6oa
.ip6oa_boundif
];
2971 } else if (ro6
.ro_rt
!= NULL
) {
2972 outif
= ro6
.ro_rt
->rt_ifp
;
2976 if (ro
.ro_rt
!= NULL
)
2977 outif
= ro
.ro_rt
->rt_ifp
;
2979 if (outif
!= NULL
&& outif
!= inp
->inp_last_outifp
) {
2980 /* Update the send byte count */
2981 if (so
->so_snd
.sb_cc
> 0 && so
->so_snd
.sb_flags
& SB_SNDBYTE_CNT
) {
2982 inp_decr_sndbytes_total(so
, so
->so_snd
.sb_cc
);
2983 inp_decr_sndbytes_allunsent(so
, tp
->snd_una
);
2984 so
->so_snd
.sb_flags
&= ~SB_SNDBYTE_CNT
;
2986 inp
->inp_last_outifp
= outif
;
2990 if (error
!= 0 && ifdenied
&&
2991 (INP_NO_CELLULAR(inp
) || INP_NO_EXPENSIVE(inp
) || INP_NO_CONSTRAINED(inp
)))
2993 (SO_FILT_HINT_LOCKED
|SO_FILT_HINT_IFDENIED
));
2995 /* Synchronize cached PCB route & options */
2998 in6p_route_copyin(inp
, &ro6
);
3001 inp_route_copyin(inp
, &ro
);
3003 if (tp
->t_state
< TCPS_ESTABLISHED
&& tp
->t_rxtshift
== 0 &&
3004 tp
->t_inpcb
->inp_route
.ro_rt
!= NULL
) {
3005 /* If we found the route and there is an rtt on it
3006 * reset the retransmit timer
3008 tcp_getrt_rtt(tp
, tp
->t_inpcb
->in6p_route
.ro_rt
);
3009 tp
->t_timer
[TCPT_REXMT
] = OFFSET_FROM_START(tp
, tp
->t_rxtcur
);
3014 int tcptv_persmin_val
= TCPTV_PERSMIN
;
3017 tcp_setpersist(struct tcpcb
*tp
)
3019 int t
= ((tp
->t_srtt
>> 2) + tp
->t_rttvar
) >> 1;
3021 /* If a PERSIST_TIMER option was set we will limit the
3022 * time the persist timer will be active for that connection
3023 * in order to avoid DOS by using zero window probes.
3024 * see rdar://5805356
3027 if (tp
->t_persist_timeout
!= 0 &&
3028 tp
->t_timer
[TCPT_PERSIST
] == 0 &&
3029 tp
->t_persist_stop
== 0) {
3030 tp
->t_persist_stop
= tcp_now
+ tp
->t_persist_timeout
;
3034 * Start/restart persistance timer.
3036 TCPT_RANGESET(tp
->t_timer
[TCPT_PERSIST
],
3037 t
* tcp_backoff
[tp
->t_rxtshift
],
3038 tcptv_persmin_val
, TCPTV_PERSMAX
, 0);
3039 tp
->t_timer
[TCPT_PERSIST
] = OFFSET_FROM_START(tp
, tp
->t_timer
[TCPT_PERSIST
]);
3041 if (tp
->t_rxtshift
< TCP_MAXRXTSHIFT
)
3046 * Send as many acks as data coalesced. Every other packet when stretch
3047 * ACK is not enabled. Every 8 packets, if stretch ACK is enabled.
3050 tcp_send_lroacks(struct tcpcb
*tp
, struct mbuf
*m
, struct tcphdr
*th
)
3052 struct mbuf
*mnext
= NULL
, *ack_chain
= NULL
, *tail
= NULL
;
3054 tcp_seq org_ack
= ntohl(th
->th_ack
);
3055 tcp_seq prev_ack
= 0;
3056 int tack_offset
= 28; /* IPv6 and IP options not supported */
3057 int twin_offset
= 34; /* IPv6 and IP options not supported */
3058 int ack_size
= (tp
->t_flags
& TF_STRETCHACK
) ?
3059 (maxseg_unacked
* tp
->t_maxseg
) : (tp
->t_maxseg
<< 1);
3060 int segs_acked
= (tp
->t_flags
& TF_STRETCHACK
) ? maxseg_unacked
: 2;
3061 struct mbuf
*prev_ack_pkt
= NULL
;
3062 struct socket
*so
= tp
->t_inpcb
->inp_socket
;
3063 unsigned short winsz
= ntohs(th
->th_win
);
3064 unsigned int scaled_win
= winsz
<<tp
->rcv_scale
;
3065 tcp_seq win_rtedge
= org_ack
+ scaled_win
;
3067 count
= tp
->t_lropktlen
/tp
->t_maxseg
;
3069 prev_ack
= (org_ack
- tp
->t_lropktlen
) + ack_size
;
3070 if (prev_ack
< org_ack
) {
3071 ack_chain
= m_dup(m
, M_DONTWAIT
);
3073 th
->th_ack
= htonl(prev_ack
);
3074 /* Keep adv window constant for duplicated ACK packets */
3075 scaled_win
= win_rtedge
- prev_ack
;
3076 if (scaled_win
> (int32_t)(TCP_MAXWIN
<< tp
->rcv_scale
))
3077 scaled_win
= (int32_t)(TCP_MAXWIN
<< tp
->rcv_scale
);
3078 th
->th_win
= htons(scaled_win
>>tp
->rcv_scale
);
3079 if (lrodebug
== 5) {
3080 printf("%s: win = %d winsz = %d sc = %d"
3082 __func__
, scaled_win
>>tp
->rcv_scale
, winsz
,
3083 tp
->rcv_scale
, tp
->t_lropktlen
, count
);
3086 count
-= segs_acked
; /* accounts for prev_ack packet */
3087 count
= (count
<= segs_acked
) ? 0 : count
- segs_acked
;
3088 tcpstat
.tcps_sndacks
++;
3089 so_tc_update_stats(m
, so
, m_get_service_class(m
));
3095 tp
->t_lropktlen
= 0;
3099 prev_ack_pkt
= ack_chain
;
3102 if ((prev_ack
+ ack_size
) < org_ack
) {
3103 prev_ack
+= ack_size
;
3106 * The last ACK sent must have the ACK number that TCP
3107 * thinks is the last sent ACK number.
3111 mnext
= m_dup(prev_ack_pkt
, M_DONTWAIT
);
3113 /* Keep adv window constant for duplicated ACK packets */
3114 scaled_win
= win_rtedge
- prev_ack
;
3115 if (scaled_win
> (int32_t)(TCP_MAXWIN
<< tp
->rcv_scale
))
3116 scaled_win
= (int32_t)(TCP_MAXWIN
<< tp
->rcv_scale
);
3117 winsz
= htons(scaled_win
>>tp
->rcv_scale
);
3118 if (lrodebug
== 5) {
3119 printf("%s: winsz = %d ack %x count %d\n",
3120 __func__
, scaled_win
>>tp
->rcv_scale
,
3123 bcopy(&winsz
, mtod(prev_ack_pkt
, caddr_t
) + twin_offset
, 2);
3125 bcopy(&prev_ack
, mtod(prev_ack_pkt
, caddr_t
) + tack_offset
, 4);
3127 tail
->m_nextpkt
= mnext
;
3129 count
-= segs_acked
;
3130 tcpstat
.tcps_sndacks
++;
3131 so_tc_update_stats(m
, so
, m_get_service_class(m
));
3133 if (lrodebug
== 5) {
3134 printf("%s: failed to alloc mbuf.\n", __func__
);
3138 prev_ack_pkt
= mnext
;
3140 tp
->t_lropktlen
= 0;
3145 tcp_recv_throttle (struct tcpcb
*tp
)
3147 uint32_t base_rtt
, newsize
;
3148 struct sockbuf
*sbrcv
= &tp
->t_inpcb
->inp_socket
->so_rcv
;
3150 if (tcp_use_rtt_recvbg
== 1 &&
3151 TSTMP_SUPPORTED(tp
)) {
3153 * Timestamps are supported on this connection. Use
3154 * RTT to look for an increase in latency.
3158 * If the connection is already being throttled, leave it
3159 * in that state until rtt comes closer to base rtt
3161 if (tp
->t_flagsext
& TF_RECV_THROTTLE
)
3164 base_rtt
= get_base_rtt(tp
);
3166 if (base_rtt
!= 0 && tp
->t_rttcur
!= 0) {
3168 * if latency increased on a background flow,
3169 * return 1 to start throttling.
3171 if (tp
->t_rttcur
> (base_rtt
+ target_qdelay
)) {
3172 tp
->t_flagsext
|= TF_RECV_THROTTLE
;
3173 if (tp
->t_recv_throttle_ts
== 0)
3174 tp
->t_recv_throttle_ts
= tcp_now
;
3176 * Reduce the recv socket buffer size to
3179 if (sbrcv
->sb_idealsize
>
3180 tcp_recv_throttle_minwin
) {
3181 newsize
= sbrcv
->sb_idealsize
>> 1;
3182 /* Set a minimum of 16 K */
3185 tcp_recv_throttle_minwin
);
3186 sbrcv
->sb_idealsize
= newsize
;
3196 * Timestamps are not supported or there is no good RTT
3197 * measurement. Use IPDV in this case.
3199 if (tp
->acc_iaj
> tcp_acc_iaj_react_limit
)