2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
24 * The Regents of the University of California. All rights reserved.
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
29 * 1. Redistributions of source code must retain the above copyright
30 * notice, this list of conditions and the following disclaimer.
31 * 2. Redistributions in binary form must reproduce the above copyright
32 * notice, this list of conditions and the following disclaimer in the
33 * documentation and/or other materials provided with the distribution.
34 * 3. All advertising materials mentioning features or use of this software
35 * must display the following acknowledgement:
36 * This product includes software developed by the University of
37 * California, Berkeley and its contributors.
38 * 4. Neither the name of the University nor the names of its contributors
39 * may be used to endorse or promote products derived from this software
40 * without specific prior written permission.
42 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 * @(#)tcp_output.c 8.4 (Berkeley) 5/24/95
55 * $FreeBSD: src/sys/netinet/tcp_output.c,v 1.39.2.10 2001/07/07 04:30:38 silby Exp $
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/kernel.h>
64 #include <sys/sysctl.h>
66 #include <sys/domain.h>
67 #include <sys/protosw.h>
68 #include <sys/socket.h>
69 #include <sys/socketvar.h>
71 #include <net/route.h>
73 #include <netinet/in.h>
74 #include <netinet/in_systm.h>
75 #include <netinet/ip.h>
76 #include <netinet/in_pcb.h>
77 #include <netinet/ip_var.h>
79 #include <netinet6/in6_pcb.h>
80 #include <netinet/ip6.h>
81 #include <netinet6/ip6_var.h>
83 #include <netinet/tcp.h>
85 #include <netinet/tcp_fsm.h>
86 #include <netinet/tcp_seq.h>
87 #include <netinet/tcp_timer.h>
88 #include <netinet/tcp_var.h>
89 #include <netinet/tcpip.h>
91 #include <netinet/tcp_debug.h>
93 #include <sys/kdebug.h>
96 #include <netinet6/ipsec.h>
99 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETTCP, 1)
100 #define DBG_LAYER_END NETDBG_CODE(DBG_NETTCP, 3)
101 #define DBG_FNC_TCP_OUTPUT NETDBG_CODE(DBG_NETTCP, (4 << 8) | 1)
105 extern struct mbuf
*m_copypack();
108 static int path_mtu_discovery
= 1;
109 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, path_mtu_discovery
, CTLFLAG_RW
,
110 &path_mtu_discovery
, 1, "Enable Path MTU Discovery");
113 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, slowstart_flightsize
, CTLFLAG_RW
,
114 &ss_fltsz
, 1, "Slow start flight size");
116 int ss_fltsz_local
= TCP_MAXWIN
; /* something large */
117 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, local_slowstart_flightsize
, CTLFLAG_RW
,
118 &ss_fltsz_local
, 1, "Slow start flight size for local networks");
120 int tcp_do_newreno
= 0;
121 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, newreno
, CTLFLAG_RW
, &tcp_do_newreno
,
122 0, "Enable NewReno Algorithms");
124 struct mbuf
*m_copym_with_hdrs
__P((struct mbuf
*, int, int, int, struct mbuf
**, int*));
127 /* temporary: for testing */
129 extern int ipsec_bypass
;
133 * Tcp output routine: figure out what should be sent and send it.
137 register struct tcpcb
*tp
;
139 register struct socket
*so
= tp
->t_inpcb
->inp_socket
;
140 register long len
, win
;
141 int off
, flags
, error
;
142 register struct mbuf
*m
;
143 struct ip
*ip
= NULL
;
144 register struct ipovly
*ipov
= NULL
;
146 struct ip6_hdr
*ip6
= NULL
;
148 register struct tcphdr
*th
;
149 u_char opt
[TCP_MAXOLEN
];
150 unsigned ipoptlen
, optlen
, hdrlen
;
152 int maxburst
= TCP_MAXBURST
;
153 struct rmxp_tao
*taop
;
154 struct rmxp_tao tao_noncached
;
160 struct mbuf
*m_last
= 0;
161 struct mbuf
*m_head
= 0;
164 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_START
, 0,0,0,0,0);
166 if (isipv6
= ((tp
->t_inpcb
->inp_vflag
& INP_IPV6
) != 0)) {
168 KERNEL_DEBUG(DBG_LAYER_BEG
,
169 ((tp
->t_inpcb
->inp_fport
<< 16) | tp
->t_inpcb
->inp_lport
),
170 (((tp
->t_inpcb
->in6p_laddr
.s6_addr16
[0] & 0xffff) << 16) |
171 (tp
->t_inpcb
->in6p_faddr
.s6_addr16
[0] & 0xffff)),
178 KERNEL_DEBUG(DBG_LAYER_BEG
,
179 ((tp
->t_inpcb
->inp_fport
<< 16) | tp
->t_inpcb
->inp_lport
),
180 (((tp
->t_inpcb
->inp_laddr
.s_addr
& 0xffff) << 16) |
181 (tp
->t_inpcb
->inp_faddr
.s_addr
& 0xffff)),
185 * Determine length of data that should be transmitted,
186 * and flags that will be used.
187 * If there is some data or critical controls (SYN, RST)
188 * to send, then transmit; otherwise, investigate further.
190 idle
= (tp
->snd_max
== tp
->snd_una
);
192 if (idle
&& tp
->t_rcvtime
>= tp
->t_rxtcur
) {
194 if (idle
&& (ticks
- tp
->t_rcvtime
) >= tp
->t_rxtcur
) {
197 * We have been idle for "a while" and no acks are
198 * expected to clock out any data we send --
199 * slow start to get ack "clock" running again.
201 * Set the slow-start flight size depending on whether
202 * this is a local network or not.
206 (isipv6
&& in6_localaddr(&tp
->t_inpcb
->in6p_faddr
)) ||
209 in_localaddr(tp
->t_inpcb
->inp_faddr
)
214 tp
->snd_cwnd
= tp
->t_maxseg
* ss_fltsz_local
;
216 tp
->snd_cwnd
= tp
->t_maxseg
* ss_fltsz
;
220 off
= tp
->snd_nxt
- tp
->snd_una
;
221 win
= min(tp
->snd_wnd
, tp
->snd_cwnd
);
223 flags
= tcp_outflags
[tp
->t_state
];
225 * Get standard flags, and add SYN or FIN if requested by 'hidden'
228 if (tp
->t_flags
& TF_NEEDFIN
)
230 if (tp
->t_flags
& TF_NEEDSYN
)
234 * If in persist timeout with window of 0, send 1 byte.
235 * Otherwise, if window is small but nonzero
236 * and timer expired, we will send what we can
237 * and go to transmit state.
242 * If we still have some data to send, then
243 * clear the FIN bit. Usually this would
244 * happen below when it realizes that we
245 * aren't sending all the data. However,
246 * if we have exactly 1 byte of unsent data,
247 * then it won't clear the FIN bit below,
248 * and if we are in persist state, we wind
249 * up sending the packet without recording
250 * that we sent the FIN bit.
252 * We can't just blindly clear the FIN bit,
253 * because if we don't have any more data
254 * to send then the probe will be the FIN
257 if (off
< so
->so_snd
.sb_cc
)
261 tp
->t_timer
[TCPT_PERSIST
] = 0;
266 len
= (long)ulmin(so
->so_snd
.sb_cc
, win
) - off
;
268 if ((taop
= tcp_gettaocache(tp
->t_inpcb
)) == NULL
) {
269 taop
= &tao_noncached
;
270 bzero(taop
, sizeof(*taop
));
274 * Lop off SYN bit if it has already been sent. However, if this
275 * is SYN-SENT state and if segment contains data and if we don't
276 * know that foreign host supports TAO, suppress sending segment.
278 if ((flags
& TH_SYN
) && SEQ_GT(tp
->snd_nxt
, tp
->snd_una
)) {
281 if (len
> 0 && tp
->t_state
== TCPS_SYN_SENT
&&
282 taop
->tao_ccsent
== 0) {
283 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
289 * Be careful not to send data and/or FIN on SYN segments
290 * in cases when no CC option will be sent.
291 * This measure is needed to prevent interoperability problems
292 * with not fully conformant TCP implementations.
294 if ((flags
& TH_SYN
) &&
295 ((tp
->t_flags
& TF_NOOPT
) || !(tp
->t_flags
& TF_REQ_CC
) ||
296 ((flags
& TH_ACK
) && !(tp
->t_flags
& TF_RCVD_CC
)))) {
303 * If FIN has been sent but not acked,
304 * but we haven't been called to retransmit,
305 * len will be -1. Otherwise, window shrank
306 * after we sent into it. If window shrank to 0,
307 * cancel pending retransmit, pull snd_nxt back
308 * to (closed) window, and set the persist timer
309 * if it isn't already going. If the window didn't
310 * close completely, just wait for an ACK.
314 tp
->t_timer
[TCPT_REXMT
] = 0;
316 tp
->snd_nxt
= tp
->snd_una
;
317 if (tp
->t_timer
[TCPT_PERSIST
] == 0)
321 if (len
> tp
->t_maxseg
) {
325 if (SEQ_LT(tp
->snd_nxt
+ len
, tp
->snd_una
+ so
->so_snd
.sb_cc
))
328 win
= sbspace(&so
->so_rcv
);
331 * Sender silly window avoidance. If connection is idle
332 * and can send all data, a maximum segment,
333 * at least a maximum default-size segment do it,
334 * or are forced, do it; otherwise don't bother.
335 * If peer's buffer is tiny, then send
336 * when window is at least half open.
337 * If retransmitting (possibly after persist timer forced us
338 * to send into a small window), then must resend.
341 if (len
== tp
->t_maxseg
)
343 if (!(tp
->t_flags
& TF_MORETOCOME
) &&
344 (idle
|| tp
->t_flags
& TF_NODELAY
) &&
345 (tp
->t_flags
& TF_NOPUSH
) == 0 &&
346 len
+ off
>= so
->so_snd
.sb_cc
)
350 if (len
>= tp
->max_sndwnd
/ 2 && tp
->max_sndwnd
> 0)
352 if (SEQ_LT(tp
->snd_nxt
, tp
->snd_max
))
357 * Compare available window to amount of window
358 * known to peer (as advertised window less
359 * next expected input). If the difference is at least two
360 * max size segments, or at least 50% of the maximum possible
361 * window, then want to send a window update to peer.
365 * "adv" is the amount we can increase the window,
366 * taking into account that we are limited by
367 * TCP_MAXWIN << tp->rcv_scale.
369 long adv
= min(win
, (long)TCP_MAXWIN
<< tp
->rcv_scale
) -
370 (tp
->rcv_adv
- tp
->rcv_nxt
);
372 if (adv
>= (long) (2 * tp
->t_maxseg
))
374 if (2 * adv
>= (long) so
->so_rcv
.sb_hiwat
)
379 * Send if we owe peer an ACK.
381 if (tp
->t_flags
& TF_ACKNOW
)
383 if ((flags
& TH_RST
) ||
384 ((flags
& TH_SYN
) && (tp
->t_flags
& TF_NEEDSYN
) == 0))
386 if (SEQ_GT(tp
->snd_up
, tp
->snd_una
))
389 * If our state indicates that FIN should be sent
390 * and we have not yet done so, or we're retransmitting the FIN,
391 * then we need to send.
393 if (flags
& TH_FIN
&&
394 ((tp
->t_flags
& TF_SENTFIN
) == 0 || tp
->snd_nxt
== tp
->snd_una
))
398 * TCP window updates are not reliable, rather a polling protocol
399 * using ``persist'' packets is used to insure receipt of window
400 * updates. The three ``states'' for the output side are:
401 * idle not doing retransmits or persists
402 * persisting to move a small or zero window
403 * (re)transmitting and thereby not persisting
405 * tp->t_timer[TCPT_PERSIST]
406 * is set when we are in persist state.
408 * is set when we are called to send a persist packet.
409 * tp->t_timer[TCPT_REXMT]
410 * is set when we are retransmitting
411 * The output side is idle when both timers are zero.
413 * If send window is too small, there is data to transmit, and no
414 * retransmit or persist is pending, then go to persist state.
415 * If nothing happens soon, send when timer expires:
416 * if window is nonzero, transmit what we can,
417 * otherwise force out a byte.
419 if (so
->so_snd
.sb_cc
&& tp
->t_timer
[TCPT_REXMT
] == 0 &&
420 tp
->t_timer
[TCPT_PERSIST
] == 0) {
426 * No reason to send a segment, just return.
428 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
433 * Before ESTABLISHED, force sending of initial options
434 * unless TCP set not to do any options.
435 * NOTE: we assume that the IP/TCP header plus TCP options
436 * always fit in a single mbuf, leaving room for a maximum
438 * max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MCLBYTES
443 hdrlen
= sizeof (struct ip6_hdr
) + sizeof (struct tcphdr
);
446 hdrlen
= sizeof (struct tcpiphdr
);
447 if (flags
& TH_SYN
) {
448 tp
->snd_nxt
= tp
->iss
;
449 if ((tp
->t_flags
& TF_NOOPT
) == 0) {
452 opt
[0] = TCPOPT_MAXSEG
;
453 opt
[1] = TCPOLEN_MAXSEG
;
454 mss
= htons((u_short
) tcp_mssopt(tp
));
455 (void)memcpy(opt
+ 2, &mss
, sizeof(mss
));
456 optlen
= TCPOLEN_MAXSEG
;
458 if ((tp
->t_flags
& TF_REQ_SCALE
) &&
459 ((flags
& TH_ACK
) == 0 ||
460 (tp
->t_flags
& TF_RCVD_SCALE
))) {
461 *((u_int32_t
*)(opt
+ optlen
)) = htonl(
463 TCPOPT_WINDOW
<< 16 |
464 TCPOLEN_WINDOW
<< 8 |
465 tp
->request_r_scale
);
472 * Send a timestamp and echo-reply if this is a SYN and our side
473 * wants to use timestamps (TF_REQ_TSTMP is set) or both our side
474 * and our peer have sent timestamps in our SYN's.
476 if ((tp
->t_flags
& (TF_REQ_TSTMP
|TF_NOOPT
)) == TF_REQ_TSTMP
&&
477 (flags
& TH_RST
) == 0 &&
478 ((flags
& TH_ACK
) == 0 ||
479 (tp
->t_flags
& TF_RCVD_TSTMP
))) {
480 u_int32_t
*lp
= (u_int32_t
*)(opt
+ optlen
);
482 /* Form timestamp option as shown in appendix A of RFC 1323. */
483 *lp
++ = htonl(TCPOPT_TSTAMP_HDR
);
484 *lp
++ = htonl(tcp_now
);
485 *lp
= htonl(tp
->ts_recent
);
486 optlen
+= TCPOLEN_TSTAMP_APPA
;
490 * Send `CC-family' options if our side wants to use them (TF_REQ_CC),
491 * options are allowed (!TF_NOOPT) and it's not a RST.
493 if ((tp
->t_flags
& (TF_REQ_CC
|TF_NOOPT
)) == TF_REQ_CC
&&
494 (flags
& TH_RST
) == 0) {
495 switch (flags
& (TH_SYN
|TH_ACK
)) {
497 * This is a normal ACK, send CC if we received CC before
501 if (!(tp
->t_flags
& TF_RCVD_CC
))
506 * We can only get here in T/TCP's SYN_SENT* state, when
507 * we're a sending a non-SYN segment without waiting for
508 * the ACK of our SYN. A check above assures that we only
509 * do this if our peer understands T/TCP.
512 opt
[optlen
++] = TCPOPT_NOP
;
513 opt
[optlen
++] = TCPOPT_NOP
;
514 opt
[optlen
++] = TCPOPT_CC
;
515 opt
[optlen
++] = TCPOLEN_CC
;
516 *(u_int32_t
*)&opt
[optlen
] = htonl(tp
->cc_send
);
522 * This is our initial SYN, check whether we have to use
526 opt
[optlen
++] = TCPOPT_NOP
;
527 opt
[optlen
++] = TCPOPT_NOP
;
528 opt
[optlen
++] = tp
->t_flags
& TF_SENDCCNEW
?
529 TCPOPT_CCNEW
: TCPOPT_CC
;
530 opt
[optlen
++] = TCPOLEN_CC
;
531 *(u_int32_t
*)&opt
[optlen
] = htonl(tp
->cc_send
);
536 * This is a SYN,ACK; send CC and CC.echo if we received
539 case (TH_SYN
|TH_ACK
):
540 if (tp
->t_flags
& TF_RCVD_CC
) {
541 opt
[optlen
++] = TCPOPT_NOP
;
542 opt
[optlen
++] = TCPOPT_NOP
;
543 opt
[optlen
++] = TCPOPT_CC
;
544 opt
[optlen
++] = TCPOLEN_CC
;
545 *(u_int32_t
*)&opt
[optlen
] =
548 opt
[optlen
++] = TCPOPT_NOP
;
549 opt
[optlen
++] = TCPOPT_NOP
;
550 opt
[optlen
++] = TCPOPT_CCECHO
;
551 opt
[optlen
++] = TCPOLEN_CC
;
552 *(u_int32_t
*)&opt
[optlen
] =
563 ipoptlen
= ip6_optlen(tp
->t_inpcb
);
567 if (tp
->t_inpcb
->inp_options
) {
568 ipoptlen
= tp
->t_inpcb
->inp_options
->m_len
-
569 offsetof(struct ipoption
, ipopt_list
);
575 if (ipsec_bypass
== 0)
576 ipoptlen
+= ipsec_hdrsiz_tcp(tp
);
580 * Adjust data length if insertion of options will
581 * bump the packet length beyond the t_maxopd length.
582 * Clear the FIN bit because we cut off the tail of
585 if (len
+ optlen
+ ipoptlen
> tp
->t_maxopd
) {
587 * If there is still more to send, don't close the connection.
590 len
= tp
->t_maxopd
- optlen
- ipoptlen
;
594 /*#ifdef DIAGNOSTIC*/
596 if (max_linkhdr
+ hdrlen
> MCLBYTES
)
597 panic("tcphdr too big");
599 if (max_linkhdr
+ hdrlen
> MHLEN
)
600 panic("tcphdr too big");
605 * Grab a header mbuf, attaching a copy of data to
606 * be transmitted, and initialize the header from
607 * the template for sends on this connection.
610 if (tp
->t_force
&& len
== 1)
611 tcpstat
.tcps_sndprobe
++;
612 else if (SEQ_LT(tp
->snd_nxt
, tp
->snd_max
)) {
613 tcpstat
.tcps_sndrexmitpack
++;
614 tcpstat
.tcps_sndrexmitbyte
+= len
;
616 tcpstat
.tcps_sndpack
++;
617 tcpstat
.tcps_sndbyte
+= len
;
620 if ((m
= m_copypack(so
->so_snd
.sb_mb
, off
,
621 (int)len
, max_linkhdr
+ hdrlen
)) == 0) {
626 * m_copypack left space for our hdr; use it.
632 * try to use the new interface that allocates all
633 * the necessary mbuf hdrs under 1 mbuf lock and
634 * avoids rescanning the socket mbuf list if
635 * certain conditions are met. This routine can't
636 * be used in the following cases...
637 * 1) the protocol headers exceed the capacity of
638 * of a single mbuf header's data area (no cluster attached)
639 * 2) the length of the data being transmitted plus
640 * the protocol headers fits into a single mbuf header's
641 * data area (no cluster attached)
645 if (MHLEN
< hdrlen
+ max_linkhdr
) {
646 MGETHDR(m
, M_DONTWAIT
, MT_HEADER
);
651 MCLGET(m
, M_DONTWAIT
);
652 if ((m
->m_flags
& M_EXT
) == 0) {
657 m
->m_data
+= max_linkhdr
;
661 if (len
<= MHLEN
- hdrlen
- max_linkhdr
) {
663 MGETHDR(m
, M_DONTWAIT
, MT_HEADER
);
668 m
->m_data
+= max_linkhdr
;
671 m_copydata(so
->so_snd
.sb_mb
, off
, (int) len
,
672 mtod(m
, caddr_t
) + hdrlen
);
676 m
->m_next
= m_copy(so
->so_snd
.sb_mb
, off
, (int) len
);
677 if (m
->m_next
== 0) {
684 * determine whether the mbuf pointer and offset passed back by the 'last' call
685 * to m_copym_with_hdrs are still valid... if the head of the socket chain has
686 * changed (due to an incoming ACK for instance), or the offset into the chain we
687 * just computed is different from the one last returned by m_copym_with_hdrs (perhaps
688 * we're re-transmitting a packet sent earlier), than we can't pass the mbuf pointer and
689 * offset into it as valid hints for m_copym_with_hdrs to use (if valid, these hints allow
690 * m_copym_with_hdrs to avoid rescanning from the beginning of the socket buffer mbuf list.
691 * setting the mbuf pointer to NULL is sufficient to disable the hint mechanism.
693 if (m_head
!= so
->so_snd
.sb_mb
|| last_off
!= off
)
695 last_off
= off
+ len
;
696 m_head
= so
->so_snd
.sb_mb
;
699 * m_copym_with_hdrs will always return the last mbuf pointer and the offset into it that
700 * it acted on to fullfill the current request, whether a valid 'hint' was passed in or not
702 if ((m
= m_copym_with_hdrs(so
->so_snd
.sb_mb
, off
, (int) len
, M_DONTWAIT
, &m_last
, &m_off
)) == NULL
) {
706 m
->m_data
+= max_linkhdr
;
712 * If we're sending everything we've got, set PUSH.
713 * (This will keep happy those implementations which only
714 * give data to the user when a buffer fills or
717 if (off
+ len
== so
->so_snd
.sb_cc
)
720 if (tp
->t_flags
& TF_ACKNOW
)
721 tcpstat
.tcps_sndacks
++;
722 else if (flags
& (TH_SYN
|TH_FIN
|TH_RST
))
723 tcpstat
.tcps_sndctrl
++;
724 else if (SEQ_GT(tp
->snd_up
, tp
->snd_una
))
725 tcpstat
.tcps_sndurg
++;
727 tcpstat
.tcps_sndwinup
++;
729 MGETHDR(m
, M_DONTWAIT
, MT_HEADER
);
735 if (isipv6
&& (MHLEN
< hdrlen
+ max_linkhdr
) &&
740 m
->m_data
+= max_linkhdr
;
743 m
->m_pkthdr
.rcvif
= (struct ifnet
*)0;
746 ip6
= mtod(m
, struct ip6_hdr
*);
747 th
= (struct tcphdr
*)(ip6
+ 1);
748 tcp_fillheaders(tp
, ip6
, th
);
752 ip
= mtod(m
, struct ip
*);
753 ipov
= (struct ipovly
*)ip
;
754 th
= (struct tcphdr
*)(ip
+ 1);
755 /* this picks up the pseudo header (w/o the length) */
756 tcp_fillheaders(tp
, ip
, th
);
760 * Fill in fields, remembering maximum advertised
761 * window for use in delaying messages about window sizes.
762 * If resending a FIN, be sure not to use a new sequence number.
764 if (flags
& TH_FIN
&& tp
->t_flags
& TF_SENTFIN
&&
765 tp
->snd_nxt
== tp
->snd_max
)
768 * If we are doing retransmissions, then snd_nxt will
769 * not reflect the first unsent octet. For ACK only
770 * packets, we do not want the sequence number of the
771 * retransmitted packet, we want the sequence number
772 * of the next unsent octet. So, if there is no data
773 * (and no SYN or FIN), use snd_max instead of snd_nxt
774 * when filling in ti_seq. But if we are in persist
775 * state, snd_max might reflect one byte beyond the
776 * right edge of the window, so use snd_nxt in that
777 * case, since we know we aren't doing a retransmission.
778 * (retransmit and persist are mutually exclusive...)
780 if (len
|| (flags
& (TH_SYN
|TH_FIN
)) || tp
->t_timer
[TCPT_PERSIST
])
781 th
->th_seq
= htonl(tp
->snd_nxt
);
783 th
->th_seq
= htonl(tp
->snd_max
);
784 th
->th_ack
= htonl(tp
->rcv_nxt
);
786 bcopy(opt
, th
+ 1, optlen
);
787 th
->th_off
= (sizeof (struct tcphdr
) + optlen
) >> 2;
789 th
->th_flags
= flags
;
791 * Calculate receive window. Don't shrink window,
792 * but avoid silly window syndrome.
794 if (win
< (long)(so
->so_rcv
.sb_hiwat
/ 4) && win
< (long)tp
->t_maxseg
)
796 if (win
< (long)(tp
->rcv_adv
- tp
->rcv_nxt
))
797 win
= (long)(tp
->rcv_adv
- tp
->rcv_nxt
);
798 if (win
> (long)TCP_MAXWIN
<< tp
->rcv_scale
)
799 win
= (long)TCP_MAXWIN
<< tp
->rcv_scale
;
800 th
->th_win
= htons((u_short
) (win
>>tp
->rcv_scale
));
801 if (SEQ_GT(tp
->snd_up
, tp
->snd_nxt
)) {
802 th
->th_urp
= htons((u_short
)(tp
->snd_up
- tp
->snd_nxt
));
803 th
->th_flags
|= TH_URG
;
806 * If no urgent pointer to send, then we pull
807 * the urgent pointer to the left edge of the send window
808 * so that it doesn't drift into the send window on sequence
811 tp
->snd_up
= tp
->snd_una
; /* drag it along */
814 * Put TCP length in extended header, and then
815 * checksum extended header and data.
817 m
->m_pkthdr
.len
= hdrlen
+ len
; /* in6_cksum() need this */
821 * ip6_plen is not need to be filled now, and will be filled
824 th
->th_sum
= in6_cksum(m
, IPPROTO_TCP
, sizeof(struct ip6_hdr
),
825 sizeof(struct tcphdr
) + optlen
+ len
);
829 m
->m_pkthdr
.csum_flags
= CSUM_TCP
;
830 m
->m_pkthdr
.csum_data
= offsetof(struct tcphdr
, th_sum
);
832 th
->th_sum
= in_addword(th
->th_sum
,
833 htons((u_short
)(optlen
+ len
)));
835 /* IP version must be set here for ipv4/ipv6 checking later */
836 KASSERT(ip
->ip_v
== IPVERSION
,
837 ("%s: IP version incorrect: %d", __FUNCTION__
, ip
->ip_v
));
841 * In transmit state, time the transmission and arrange for
842 * the retransmit. In persist state, just set snd_max.
844 if (tp
->t_force
== 0 || tp
->t_timer
[TCPT_PERSIST
] == 0) {
845 tcp_seq startseq
= tp
->snd_nxt
;
848 * Advance snd_nxt over sequence space of this segment.
850 if (flags
& (TH_SYN
|TH_FIN
)) {
853 if (flags
& TH_FIN
) {
855 tp
->t_flags
|= TF_SENTFIN
;
859 if (SEQ_GT(tp
->snd_nxt
, tp
->snd_max
)) {
860 tp
->snd_max
= tp
->snd_nxt
;
862 * Time this transmission if not a retransmission and
863 * not currently timing anything.
865 if (tp
->t_rtttime
== 0) {
867 tp
->t_rtseq
= startseq
;
868 tcpstat
.tcps_segstimed
++;
873 * Set retransmit timer if not currently set,
874 * and not doing an ack or a keep-alive probe.
875 * Initial value for retransmit timer is smoothed
876 * round-trip time + 2 * round-trip time variance.
877 * Initialize shift counter which is used for backoff
878 * of retransmit time.
880 if (tp
->t_timer
[TCPT_REXMT
] == 0 &&
881 tp
->snd_nxt
!= tp
->snd_una
) {
882 tp
->t_timer
[TCPT_REXMT
] = tp
->t_rxtcur
;
883 if (tp
->t_timer
[TCPT_PERSIST
]) {
884 tp
->t_timer
[TCPT_PERSIST
] = 0;
889 if (SEQ_GT(tp
->snd_nxt
+ len
, tp
->snd_max
))
890 tp
->snd_max
= tp
->snd_nxt
+ len
;
896 if (so
->so_options
& SO_DEBUG
)
897 tcp_trace(TA_OUTPUT
, tp
->t_state
, tp
, mtod(m
, void *), th
, 0);
901 * Fill in IP length and desired time to live and
902 * send to IP level. There should be a better way
903 * to handle ttl and tos; we could keep them in
904 * the template, but need a way to checksum without them.
907 * m->m_pkthdr.len should have been set before cksum calcuration,
908 * because in6_cksum() need it.
913 * we separately set hoplimit for every segment, since the
914 * user might want to change the value via setsockopt.
915 * Also, desired default hop limit might be changed via
916 * Neighbor Discovery.
918 ip6
->ip6_hlim
= in6_selecthlim(tp
->t_inpcb
,
919 tp
->t_inpcb
->in6p_route
.ro_rt
?
920 tp
->t_inpcb
->in6p_route
.ro_rt
->rt_ifp
923 /* TODO: IPv6 IP6TOS_ECT bit on */
925 if (ipsec_bypass
== 0 && ipsec_setsocket(m
, so
) != 0) {
931 error
= ip6_output(m
,
932 tp
->t_inpcb
->in6p_outputopts
,
933 &tp
->t_inpcb
->in6p_route
,
934 (so
->so_options
& SO_DONTROUTE
), NULL
, NULL
);
939 ip
->ip_len
= m
->m_pkthdr
.len
;
941 if (INP_CHECK_SOCKAF(so
, AF_INET6
))
942 ip
->ip_ttl
= in6_selecthlim(tp
->t_inpcb
,
943 tp
->t_inpcb
->in6p_route
.ro_rt
?
944 tp
->t_inpcb
->in6p_route
.ro_rt
->rt_ifp
948 ip
->ip_ttl
= tp
->t_inpcb
->inp_ip_ttl
; /* XXX */
949 ip
->ip_tos
= tp
->t_inpcb
->inp_ip_tos
; /* XXX */
954 KERNEL_DEBUG(DBG_LAYER_BEG
,
955 ((tp
->t_inpcb
->inp_fport
<< 16) | tp
->t_inpcb
->inp_lport
),
956 (((tp
->t_inpcb
->in6p_laddr
.s6_addr16
[0] & 0xffff) << 16) |
957 (tp
->t_inpcb
->in6p_faddr
.s6_addr16
[0] & 0xffff)),
963 KERNEL_DEBUG(DBG_LAYER_BEG
,
964 ((tp
->t_inpcb
->inp_fport
<< 16) | tp
->t_inpcb
->inp_lport
),
965 (((tp
->t_inpcb
->inp_laddr
.s_addr
& 0xffff) << 16) |
966 (tp
->t_inpcb
->inp_faddr
.s_addr
& 0xffff)),
971 * See if we should do MTU discovery. We do it only if the following
973 * 1) we have a valid route to the destination
974 * 2) the MTU is not locked (if it is, then discovery has been
977 if (path_mtu_discovery
978 && (rt
= tp
->t_inpcb
->inp_route
.ro_rt
)
979 && rt
->rt_flags
& RTF_UP
980 && !(rt
->rt_rmx
.rmx_locks
& RTV_MTU
)) {
984 if (ipsec_bypass
== 0)
985 ipsec_setsocket(m
, so
);
987 error
= ip_output(m
, tp
->t_inpcb
->inp_options
, &tp
->t_inpcb
->inp_route
,
988 (so
->so_options
& SO_DONTROUTE
), 0);
993 * We know that the packet was lost, so back out the
994 * sequence number advance, if any.
996 if (tp
->t_force
== 0 || !tp
->t_timer
[TCPT_PERSIST
]) {
998 * No need to check for TH_FIN here because
999 * the TF_SENTFIN flag handles that case.
1001 if ((flags
& TH_SYN
) == 0)
1005 if (error
== ENOBUFS
) {
1006 if (!tp
->t_timer
[TCPT_REXMT
] &&
1007 !tp
->t_timer
[TCPT_PERSIST
])
1008 tp
->t_timer
[TCPT_REXMT
] = tp
->t_rxtcur
;
1009 tcp_quench(tp
->t_inpcb
, 0);
1010 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
1013 if (error
== EMSGSIZE
) {
1015 * ip_output() will have already fixed the route
1016 * for us. tcp_mtudisc() will, as its last action,
1017 * initiate retransmission, so it is important to
1020 tcp_mtudisc(tp
->t_inpcb
, 0);
1021 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
1024 if ((error
== EHOSTUNREACH
|| error
== ENETDOWN
)
1025 && TCPS_HAVERCVDSYN(tp
->t_state
)) {
1026 tp
->t_softerror
= error
;
1027 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
1030 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
1033 tcpstat
.tcps_sndtotal
++;
1036 * Data sent (as far as we can tell).
1037 * If this advertises a larger window than any other segment,
1038 * then remember the size of the advertised window.
1039 * Any pending ACK has now been sent.
1041 if (win
> 0 && SEQ_GT(tp
->rcv_nxt
+win
, tp
->rcv_adv
))
1042 tp
->rcv_adv
= tp
->rcv_nxt
+ win
;
1043 tp
->last_ack_sent
= tp
->rcv_nxt
;
1044 tp
->t_flags
&= ~(TF_ACKNOW
|TF_DELACK
);
1047 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
1053 register struct tcpcb
*tp
;
1055 int t
= ((tp
->t_srtt
>> 2) + tp
->t_rttvar
) >> 1;
1058 if (tp
->t_timer
[TCPT_REXMT
])
1059 panic("tcp_setpersist: retransmit pending");
1061 * Start/restart persistance timer.
1063 TCPT_RANGESET(tp
->t_timer
[TCPT_PERSIST
],
1064 t
* tcp_backoff
[tp
->t_rxtshift
],
1065 TCPTV_PERSMIN
, TCPTV_PERSMAX
);
1066 if (tp
->t_rxtshift
< TCP_MAXRXTSHIFT
)