2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
24 * The Regents of the University of California. All rights reserved.
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
29 * 1. Redistributions of source code must retain the above copyright
30 * notice, this list of conditions and the following disclaimer.
31 * 2. Redistributions in binary form must reproduce the above copyright
32 * notice, this list of conditions and the following disclaimer in the
33 * documentation and/or other materials provided with the distribution.
34 * 3. All advertising materials mentioning features or use of this software
35 * must display the following acknowledgement:
36 * This product includes software developed by the University of
37 * California, Berkeley and its contributors.
38 * 4. Neither the name of the University nor the names of its contributors
39 * may be used to endorse or promote products derived from this software
40 * without specific prior written permission.
42 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 * @(#)tcp_output.c 8.4 (Berkeley) 5/24/95
55 * $FreeBSD: src/sys/netinet/tcp_output.c,v 1.39.2.10 2001/07/07 04:30:38 silby Exp $
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/kernel.h>
64 #include <sys/sysctl.h>
66 #include <sys/domain.h>
67 #include <sys/protosw.h>
68 #include <sys/socket.h>
69 #include <sys/socketvar.h>
71 #include <net/route.h>
73 #include <netinet/in.h>
74 #include <netinet/in_systm.h>
75 #include <netinet/ip.h>
76 #include <netinet/in_pcb.h>
77 #include <netinet/ip_var.h>
79 #include <netinet6/in6_pcb.h>
80 #include <netinet/ip6.h>
81 #include <netinet6/ip6_var.h>
83 #include <netinet/tcp.h>
85 #include <netinet/tcp_fsm.h>
86 #include <netinet/tcp_seq.h>
87 #include <netinet/tcp_timer.h>
88 #include <netinet/tcp_var.h>
89 #include <netinet/tcpip.h>
91 #include <netinet/tcp_debug.h>
93 #include <sys/kdebug.h>
96 #include <netinet6/ipsec.h>
99 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETTCP, 1)
100 #define DBG_LAYER_END NETDBG_CODE(DBG_NETTCP, 3)
101 #define DBG_FNC_TCP_OUTPUT NETDBG_CODE(DBG_NETTCP, (4 << 8) | 1)
105 extern struct mbuf
*m_copypack();
108 static int path_mtu_discovery
= 1;
109 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, path_mtu_discovery
, CTLFLAG_RW
,
110 &path_mtu_discovery
, 1, "Enable Path MTU Discovery");
113 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, slowstart_flightsize
, CTLFLAG_RW
,
114 &ss_fltsz
, 1, "Slow start flight size");
116 int ss_fltsz_local
= 4; /* starts with four segments max */
117 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, local_slowstart_flightsize
, CTLFLAG_RW
,
118 &ss_fltsz_local
, 1, "Slow start flight size for local networks");
120 int tcp_do_newreno
= 0;
121 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, newreno
, CTLFLAG_RW
, &tcp_do_newreno
,
122 0, "Enable NewReno Algorithms");
124 struct mbuf
*m_copym_with_hdrs
__P((struct mbuf
*, int, int, int, struct mbuf
**, int*));
127 /* temporary: for testing */
129 extern int ipsec_bypass
;
132 extern int slowlink_wsize
; /* window correction for slow links */
135 * Tcp output routine: figure out what should be sent and send it.
139 register struct tcpcb
*tp
;
141 register struct socket
*so
= tp
->t_inpcb
->inp_socket
;
142 register long len
, win
;
143 int off
, flags
, error
;
144 register struct mbuf
*m
;
145 struct ip
*ip
= NULL
;
146 register struct ipovly
*ipov
= NULL
;
148 struct ip6_hdr
*ip6
= NULL
;
150 register struct tcphdr
*th
;
151 u_char opt
[TCP_MAXOLEN
];
152 unsigned ipoptlen
, optlen
, hdrlen
;
154 int maxburst
= TCP_MAXBURST
;
155 struct rmxp_tao
*taop
;
156 struct rmxp_tao tao_noncached
;
162 struct mbuf
*m_last
= 0;
163 struct mbuf
*m_head
= 0;
166 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_START
, 0,0,0,0,0);
168 if (isipv6
= ((tp
->t_inpcb
->inp_vflag
& INP_IPV6
) != 0)) {
170 KERNEL_DEBUG(DBG_LAYER_BEG
,
171 ((tp
->t_inpcb
->inp_fport
<< 16) | tp
->t_inpcb
->inp_lport
),
172 (((tp
->t_inpcb
->in6p_laddr
.s6_addr16
[0] & 0xffff) << 16) |
173 (tp
->t_inpcb
->in6p_faddr
.s6_addr16
[0] & 0xffff)),
180 KERNEL_DEBUG(DBG_LAYER_BEG
,
181 ((tp
->t_inpcb
->inp_fport
<< 16) | tp
->t_inpcb
->inp_lport
),
182 (((tp
->t_inpcb
->inp_laddr
.s_addr
& 0xffff) << 16) |
183 (tp
->t_inpcb
->inp_faddr
.s_addr
& 0xffff)),
187 * Determine length of data that should be transmitted,
188 * and flags that will be used.
189 * If there is some data or critical controls (SYN, RST)
190 * to send, then transmit; otherwise, investigate further.
192 idle
= (tp
->snd_max
== tp
->snd_una
);
194 if (idle
&& tp
->t_rcvtime
>= tp
->t_rxtcur
) {
196 if (idle
&& (ticks
- tp
->t_rcvtime
) >= tp
->t_rxtcur
) {
199 * We have been idle for "a while" and no acks are
200 * expected to clock out any data we send --
201 * slow start to get ack "clock" running again.
203 * Set the slow-start flight size depending on whether
204 * this is a local network or not.
208 (isipv6
&& in6_localaddr(&tp
->t_inpcb
->in6p_faddr
)) ||
211 in_localaddr(tp
->t_inpcb
->inp_faddr
)
216 tp
->snd_cwnd
= tp
->t_maxseg
* ss_fltsz_local
;
218 tp
->snd_cwnd
= tp
->t_maxseg
* ss_fltsz
;
222 off
= tp
->snd_nxt
- tp
->snd_una
;
223 win
= min(tp
->snd_wnd
, tp
->snd_cwnd
);
224 if (tp
->t_flags
& TF_SLOWLINK
&& slowlink_wsize
> 0)
225 win
= min(win
, slowlink_wsize
);
227 flags
= tcp_outflags
[tp
->t_state
];
229 * Get standard flags, and add SYN or FIN if requested by 'hidden'
232 if (tp
->t_flags
& TF_NEEDFIN
)
234 if (tp
->t_flags
& TF_NEEDSYN
)
238 * If in persist timeout with window of 0, send 1 byte.
239 * Otherwise, if window is small but nonzero
240 * and timer expired, we will send what we can
241 * and go to transmit state.
246 * If we still have some data to send, then
247 * clear the FIN bit. Usually this would
248 * happen below when it realizes that we
249 * aren't sending all the data. However,
250 * if we have exactly 1 byte of unsent data,
251 * then it won't clear the FIN bit below,
252 * and if we are in persist state, we wind
253 * up sending the packet without recording
254 * that we sent the FIN bit.
256 * We can't just blindly clear the FIN bit,
257 * because if we don't have any more data
258 * to send then the probe will be the FIN
261 if (off
< so
->so_snd
.sb_cc
)
265 tp
->t_timer
[TCPT_PERSIST
] = 0;
270 len
= (long)ulmin(so
->so_snd
.sb_cc
, win
) - off
;
272 if ((taop
= tcp_gettaocache(tp
->t_inpcb
)) == NULL
) {
273 taop
= &tao_noncached
;
274 bzero(taop
, sizeof(*taop
));
278 * Lop off SYN bit if it has already been sent. However, if this
279 * is SYN-SENT state and if segment contains data and if we don't
280 * know that foreign host supports TAO, suppress sending segment.
282 if ((flags
& TH_SYN
) && SEQ_GT(tp
->snd_nxt
, tp
->snd_una
)) {
285 if (len
> 0 && tp
->t_state
== TCPS_SYN_SENT
&&
286 taop
->tao_ccsent
== 0) {
287 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
293 * Be careful not to send data and/or FIN on SYN segments
294 * in cases when no CC option will be sent.
295 * This measure is needed to prevent interoperability problems
296 * with not fully conformant TCP implementations.
298 if ((flags
& TH_SYN
) &&
299 ((tp
->t_flags
& TF_NOOPT
) || !(tp
->t_flags
& TF_REQ_CC
) ||
300 ((flags
& TH_ACK
) && !(tp
->t_flags
& TF_RCVD_CC
)))) {
307 * If FIN has been sent but not acked,
308 * but we haven't been called to retransmit,
309 * len will be -1. Otherwise, window shrank
310 * after we sent into it. If window shrank to 0,
311 * cancel pending retransmit, pull snd_nxt back
312 * to (closed) window, and set the persist timer
313 * if it isn't already going. If the window didn't
314 * close completely, just wait for an ACK.
318 tp
->t_timer
[TCPT_REXMT
] = 0;
320 tp
->snd_nxt
= tp
->snd_una
;
321 if (tp
->t_timer
[TCPT_PERSIST
] == 0)
325 if (len
> tp
->t_maxseg
) {
329 if (SEQ_LT(tp
->snd_nxt
+ len
, tp
->snd_una
+ so
->so_snd
.sb_cc
))
332 if (tp
->t_flags
& TF_SLOWLINK
&& slowlink_wsize
> 0 ) /* Clips window size for slow links */
333 win
= min(sbspace(&so
->so_rcv
), slowlink_wsize
);
335 win
= sbspace(&so
->so_rcv
);
338 * Sender silly window avoidance. If connection is idle
339 * and can send all data, a maximum segment,
340 * at least a maximum default-size segment do it,
341 * or are forced, do it; otherwise don't bother.
342 * If peer's buffer is tiny, then send
343 * when window is at least half open.
344 * If retransmitting (possibly after persist timer forced us
345 * to send into a small window), then must resend.
348 if (len
== tp
->t_maxseg
)
350 if (!(tp
->t_flags
& TF_MORETOCOME
) &&
351 (idle
|| tp
->t_flags
& TF_NODELAY
) &&
352 (tp
->t_flags
& TF_NOPUSH
) == 0 &&
353 len
+ off
>= so
->so_snd
.sb_cc
)
357 if (len
>= tp
->max_sndwnd
/ 2 && tp
->max_sndwnd
> 0)
359 if (SEQ_LT(tp
->snd_nxt
, tp
->snd_max
))
364 * Compare available window to amount of window
365 * known to peer (as advertised window less
366 * next expected input). If the difference is at least two
367 * max size segments, or at least 50% of the maximum possible
368 * window, then want to send a window update to peer.
372 * "adv" is the amount we can increase the window,
373 * taking into account that we are limited by
374 * TCP_MAXWIN << tp->rcv_scale.
376 long adv
= min(win
, (long)TCP_MAXWIN
<< tp
->rcv_scale
) -
377 (tp
->rcv_adv
- tp
->rcv_nxt
);
379 if (adv
>= (long) (2 * tp
->t_maxseg
))
381 if (2 * adv
>= (long) so
->so_rcv
.sb_hiwat
)
386 * Send if we owe peer an ACK.
388 if (tp
->t_flags
& TF_ACKNOW
)
390 if ((flags
& TH_RST
) ||
391 ((flags
& TH_SYN
) && (tp
->t_flags
& TF_NEEDSYN
) == 0))
393 if (SEQ_GT(tp
->snd_up
, tp
->snd_una
))
396 * If our state indicates that FIN should be sent
397 * and we have not yet done so, or we're retransmitting the FIN,
398 * then we need to send.
400 if (flags
& TH_FIN
&&
401 ((tp
->t_flags
& TF_SENTFIN
) == 0 || tp
->snd_nxt
== tp
->snd_una
))
405 * TCP window updates are not reliable, rather a polling protocol
406 * using ``persist'' packets is used to insure receipt of window
407 * updates. The three ``states'' for the output side are:
408 * idle not doing retransmits or persists
409 * persisting to move a small or zero window
410 * (re)transmitting and thereby not persisting
412 * tp->t_timer[TCPT_PERSIST]
413 * is set when we are in persist state.
415 * is set when we are called to send a persist packet.
416 * tp->t_timer[TCPT_REXMT]
417 * is set when we are retransmitting
418 * The output side is idle when both timers are zero.
420 * If send window is too small, there is data to transmit, and no
421 * retransmit or persist is pending, then go to persist state.
422 * If nothing happens soon, send when timer expires:
423 * if window is nonzero, transmit what we can,
424 * otherwise force out a byte.
426 if (so
->so_snd
.sb_cc
&& tp
->t_timer
[TCPT_REXMT
] == 0 &&
427 tp
->t_timer
[TCPT_PERSIST
] == 0) {
433 * No reason to send a segment, just return.
435 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
440 * Before ESTABLISHED, force sending of initial options
441 * unless TCP set not to do any options.
442 * NOTE: we assume that the IP/TCP header plus TCP options
443 * always fit in a single mbuf, leaving room for a maximum
445 * max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MCLBYTES
450 hdrlen
= sizeof (struct ip6_hdr
) + sizeof (struct tcphdr
);
453 hdrlen
= sizeof (struct tcpiphdr
);
454 if (flags
& TH_SYN
) {
455 tp
->snd_nxt
= tp
->iss
;
456 if ((tp
->t_flags
& TF_NOOPT
) == 0) {
459 opt
[0] = TCPOPT_MAXSEG
;
460 opt
[1] = TCPOLEN_MAXSEG
;
461 mss
= htons((u_short
) tcp_mssopt(tp
));
462 (void)memcpy(opt
+ 2, &mss
, sizeof(mss
));
463 optlen
= TCPOLEN_MAXSEG
;
465 if ((tp
->t_flags
& TF_REQ_SCALE
) &&
466 ((flags
& TH_ACK
) == 0 ||
467 (tp
->t_flags
& TF_RCVD_SCALE
))) {
468 *((u_int32_t
*)(opt
+ optlen
)) = htonl(
470 TCPOPT_WINDOW
<< 16 |
471 TCPOLEN_WINDOW
<< 8 |
472 tp
->request_r_scale
);
479 * Send a timestamp and echo-reply if this is a SYN and our side
480 * wants to use timestamps (TF_REQ_TSTMP is set) or both our side
481 * and our peer have sent timestamps in our SYN's.
483 if ((tp
->t_flags
& (TF_REQ_TSTMP
|TF_NOOPT
)) == TF_REQ_TSTMP
&&
484 (flags
& TH_RST
) == 0 &&
485 ((flags
& TH_ACK
) == 0 ||
486 (tp
->t_flags
& TF_RCVD_TSTMP
))) {
487 u_int32_t
*lp
= (u_int32_t
*)(opt
+ optlen
);
489 /* Form timestamp option as shown in appendix A of RFC 1323. */
490 *lp
++ = htonl(TCPOPT_TSTAMP_HDR
);
491 *lp
++ = htonl(tcp_now
);
492 *lp
= htonl(tp
->ts_recent
);
493 optlen
+= TCPOLEN_TSTAMP_APPA
;
497 * Send `CC-family' options if our side wants to use them (TF_REQ_CC),
498 * options are allowed (!TF_NOOPT) and it's not a RST.
500 if ((tp
->t_flags
& (TF_REQ_CC
|TF_NOOPT
)) == TF_REQ_CC
&&
501 (flags
& TH_RST
) == 0) {
502 switch (flags
& (TH_SYN
|TH_ACK
)) {
504 * This is a normal ACK, send CC if we received CC before
508 if (!(tp
->t_flags
& TF_RCVD_CC
))
513 * We can only get here in T/TCP's SYN_SENT* state, when
514 * we're a sending a non-SYN segment without waiting for
515 * the ACK of our SYN. A check above assures that we only
516 * do this if our peer understands T/TCP.
519 opt
[optlen
++] = TCPOPT_NOP
;
520 opt
[optlen
++] = TCPOPT_NOP
;
521 opt
[optlen
++] = TCPOPT_CC
;
522 opt
[optlen
++] = TCPOLEN_CC
;
523 *(u_int32_t
*)&opt
[optlen
] = htonl(tp
->cc_send
);
529 * This is our initial SYN, check whether we have to use
533 opt
[optlen
++] = TCPOPT_NOP
;
534 opt
[optlen
++] = TCPOPT_NOP
;
535 opt
[optlen
++] = tp
->t_flags
& TF_SENDCCNEW
?
536 TCPOPT_CCNEW
: TCPOPT_CC
;
537 opt
[optlen
++] = TCPOLEN_CC
;
538 *(u_int32_t
*)&opt
[optlen
] = htonl(tp
->cc_send
);
543 * This is a SYN,ACK; send CC and CC.echo if we received
546 case (TH_SYN
|TH_ACK
):
547 if (tp
->t_flags
& TF_RCVD_CC
) {
548 opt
[optlen
++] = TCPOPT_NOP
;
549 opt
[optlen
++] = TCPOPT_NOP
;
550 opt
[optlen
++] = TCPOPT_CC
;
551 opt
[optlen
++] = TCPOLEN_CC
;
552 *(u_int32_t
*)&opt
[optlen
] =
555 opt
[optlen
++] = TCPOPT_NOP
;
556 opt
[optlen
++] = TCPOPT_NOP
;
557 opt
[optlen
++] = TCPOPT_CCECHO
;
558 opt
[optlen
++] = TCPOLEN_CC
;
559 *(u_int32_t
*)&opt
[optlen
] =
570 ipoptlen
= ip6_optlen(tp
->t_inpcb
);
574 if (tp
->t_inpcb
->inp_options
) {
575 ipoptlen
= tp
->t_inpcb
->inp_options
->m_len
-
576 offsetof(struct ipoption
, ipopt_list
);
582 if (ipsec_bypass
== 0)
583 ipoptlen
+= ipsec_hdrsiz_tcp(tp
);
587 * Adjust data length if insertion of options will
588 * bump the packet length beyond the t_maxopd length.
589 * Clear the FIN bit because we cut off the tail of
592 if (len
+ optlen
+ ipoptlen
> tp
->t_maxopd
) {
594 * If there is still more to send, don't close the connection.
597 len
= tp
->t_maxopd
- optlen
- ipoptlen
;
601 /*#ifdef DIAGNOSTIC*/
603 if (max_linkhdr
+ hdrlen
> MCLBYTES
)
604 panic("tcphdr too big");
606 if (max_linkhdr
+ hdrlen
> MHLEN
)
607 panic("tcphdr too big");
612 * Grab a header mbuf, attaching a copy of data to
613 * be transmitted, and initialize the header from
614 * the template for sends on this connection.
617 if (tp
->t_force
&& len
== 1)
618 tcpstat
.tcps_sndprobe
++;
619 else if (SEQ_LT(tp
->snd_nxt
, tp
->snd_max
)) {
620 tcpstat
.tcps_sndrexmitpack
++;
621 tcpstat
.tcps_sndrexmitbyte
+= len
;
623 tcpstat
.tcps_sndpack
++;
624 tcpstat
.tcps_sndbyte
+= len
;
627 if ((m
= m_copypack(so
->so_snd
.sb_mb
, off
,
628 (int)len
, max_linkhdr
+ hdrlen
)) == 0) {
633 * m_copypack left space for our hdr; use it.
639 * try to use the new interface that allocates all
640 * the necessary mbuf hdrs under 1 mbuf lock and
641 * avoids rescanning the socket mbuf list if
642 * certain conditions are met. This routine can't
643 * be used in the following cases...
644 * 1) the protocol headers exceed the capacity of
645 * of a single mbuf header's data area (no cluster attached)
646 * 2) the length of the data being transmitted plus
647 * the protocol headers fits into a single mbuf header's
648 * data area (no cluster attached)
652 if (MHLEN
< hdrlen
+ max_linkhdr
) {
653 MGETHDR(m
, M_DONTWAIT
, MT_HEADER
);
658 MCLGET(m
, M_DONTWAIT
);
659 if ((m
->m_flags
& M_EXT
) == 0) {
664 m
->m_data
+= max_linkhdr
;
668 if (len
<= MHLEN
- hdrlen
- max_linkhdr
) {
670 MGETHDR(m
, M_DONTWAIT
, MT_HEADER
);
675 m
->m_data
+= max_linkhdr
;
678 m_copydata(so
->so_snd
.sb_mb
, off
, (int) len
,
679 mtod(m
, caddr_t
) + hdrlen
);
683 m
->m_next
= m_copy(so
->so_snd
.sb_mb
, off
, (int) len
);
684 if (m
->m_next
== 0) {
691 * determine whether the mbuf pointer and offset passed back by the 'last' call
692 * to m_copym_with_hdrs are still valid... if the head of the socket chain has
693 * changed (due to an incoming ACK for instance), or the offset into the chain we
694 * just computed is different from the one last returned by m_copym_with_hdrs (perhaps
695 * we're re-transmitting a packet sent earlier), than we can't pass the mbuf pointer and
696 * offset into it as valid hints for m_copym_with_hdrs to use (if valid, these hints allow
697 * m_copym_with_hdrs to avoid rescanning from the beginning of the socket buffer mbuf list.
698 * setting the mbuf pointer to NULL is sufficient to disable the hint mechanism.
700 if (m_head
!= so
->so_snd
.sb_mb
|| last_off
!= off
)
702 last_off
= off
+ len
;
703 m_head
= so
->so_snd
.sb_mb
;
706 * m_copym_with_hdrs will always return the last mbuf pointer and the offset into it that
707 * it acted on to fullfill the current request, whether a valid 'hint' was passed in or not
709 if ((m
= m_copym_with_hdrs(so
->so_snd
.sb_mb
, off
, (int) len
, M_DONTWAIT
, &m_last
, &m_off
)) == NULL
) {
713 m
->m_data
+= max_linkhdr
;
719 * If we're sending everything we've got, set PUSH.
720 * (This will keep happy those implementations which only
721 * give data to the user when a buffer fills or
724 if (off
+ len
== so
->so_snd
.sb_cc
)
727 if (tp
->t_flags
& TF_ACKNOW
)
728 tcpstat
.tcps_sndacks
++;
729 else if (flags
& (TH_SYN
|TH_FIN
|TH_RST
))
730 tcpstat
.tcps_sndctrl
++;
731 else if (SEQ_GT(tp
->snd_up
, tp
->snd_una
))
732 tcpstat
.tcps_sndurg
++;
734 tcpstat
.tcps_sndwinup
++;
736 MGETHDR(m
, M_DONTWAIT
, MT_HEADER
);
742 if (isipv6
&& (MHLEN
< hdrlen
+ max_linkhdr
) &&
747 m
->m_data
+= max_linkhdr
;
750 m
->m_pkthdr
.rcvif
= (struct ifnet
*)0;
753 ip6
= mtod(m
, struct ip6_hdr
*);
754 th
= (struct tcphdr
*)(ip6
+ 1);
755 tcp_fillheaders(tp
, ip6
, th
);
759 ip
= mtod(m
, struct ip
*);
760 ipov
= (struct ipovly
*)ip
;
761 th
= (struct tcphdr
*)(ip
+ 1);
762 /* this picks up the pseudo header (w/o the length) */
763 tcp_fillheaders(tp
, ip
, th
);
767 * Fill in fields, remembering maximum advertised
768 * window for use in delaying messages about window sizes.
769 * If resending a FIN, be sure not to use a new sequence number.
771 if (flags
& TH_FIN
&& tp
->t_flags
& TF_SENTFIN
&&
772 tp
->snd_nxt
== tp
->snd_max
)
775 * If we are doing retransmissions, then snd_nxt will
776 * not reflect the first unsent octet. For ACK only
777 * packets, we do not want the sequence number of the
778 * retransmitted packet, we want the sequence number
779 * of the next unsent octet. So, if there is no data
780 * (and no SYN or FIN), use snd_max instead of snd_nxt
781 * when filling in ti_seq. But if we are in persist
782 * state, snd_max might reflect one byte beyond the
783 * right edge of the window, so use snd_nxt in that
784 * case, since we know we aren't doing a retransmission.
785 * (retransmit and persist are mutually exclusive...)
787 if (len
|| (flags
& (TH_SYN
|TH_FIN
)) || tp
->t_timer
[TCPT_PERSIST
])
788 th
->th_seq
= htonl(tp
->snd_nxt
);
790 th
->th_seq
= htonl(tp
->snd_max
);
791 th
->th_ack
= htonl(tp
->rcv_nxt
);
793 bcopy(opt
, th
+ 1, optlen
);
794 th
->th_off
= (sizeof (struct tcphdr
) + optlen
) >> 2;
796 th
->th_flags
= flags
;
798 * Calculate receive window. Don't shrink window,
799 * but avoid silly window syndrome.
801 if (win
< (long)(so
->so_rcv
.sb_hiwat
/ 4) && win
< (long)tp
->t_maxseg
)
803 if (win
< (long)(tp
->rcv_adv
- tp
->rcv_nxt
))
804 win
= (long)(tp
->rcv_adv
- tp
->rcv_nxt
);
805 if (tp
->t_flags
& TF_SLOWLINK
&& slowlink_wsize
> 0) {
806 if (win
> (long)slowlink_wsize
)
807 win
= slowlink_wsize
;
808 th
->th_win
= htons((u_short
) (win
>>tp
->rcv_scale
));
812 if (win
> (long)TCP_MAXWIN
<< tp
->rcv_scale
)
813 win
= (long)TCP_MAXWIN
<< tp
->rcv_scale
;
814 th
->th_win
= htons((u_short
) (win
>>tp
->rcv_scale
));
816 if (SEQ_GT(tp
->snd_up
, tp
->snd_nxt
)) {
817 th
->th_urp
= htons((u_short
)(tp
->snd_up
- tp
->snd_nxt
));
818 th
->th_flags
|= TH_URG
;
821 * If no urgent pointer to send, then we pull
822 * the urgent pointer to the left edge of the send window
823 * so that it doesn't drift into the send window on sequence
826 tp
->snd_up
= tp
->snd_una
; /* drag it along */
829 * Put TCP length in extended header, and then
830 * checksum extended header and data.
832 m
->m_pkthdr
.len
= hdrlen
+ len
; /* in6_cksum() need this */
836 * ip6_plen is not need to be filled now, and will be filled
839 th
->th_sum
= in6_cksum(m
, IPPROTO_TCP
, sizeof(struct ip6_hdr
),
840 sizeof(struct tcphdr
) + optlen
+ len
);
844 m
->m_pkthdr
.csum_flags
= CSUM_TCP
;
845 m
->m_pkthdr
.csum_data
= offsetof(struct tcphdr
, th_sum
);
847 th
->th_sum
= in_addword(th
->th_sum
,
848 htons((u_short
)(optlen
+ len
)));
850 /* IP version must be set here for ipv4/ipv6 checking later */
851 KASSERT(ip
->ip_v
== IPVERSION
,
852 ("%s: IP version incorrect: %d", __FUNCTION__
, ip
->ip_v
));
856 * In transmit state, time the transmission and arrange for
857 * the retransmit. In persist state, just set snd_max.
859 if (tp
->t_force
== 0 || tp
->t_timer
[TCPT_PERSIST
] == 0) {
860 tcp_seq startseq
= tp
->snd_nxt
;
863 * Advance snd_nxt over sequence space of this segment.
865 if (flags
& (TH_SYN
|TH_FIN
)) {
868 if (flags
& TH_FIN
) {
870 tp
->t_flags
|= TF_SENTFIN
;
874 if (SEQ_GT(tp
->snd_nxt
, tp
->snd_max
)) {
875 tp
->snd_max
= tp
->snd_nxt
;
877 * Time this transmission if not a retransmission and
878 * not currently timing anything.
880 if (tp
->t_rtttime
== 0) {
882 tp
->t_rtseq
= startseq
;
883 tcpstat
.tcps_segstimed
++;
888 * Set retransmit timer if not currently set,
889 * and not doing an ack or a keep-alive probe.
890 * Initial value for retransmit timer is smoothed
891 * round-trip time + 2 * round-trip time variance.
892 * Initialize shift counter which is used for backoff
893 * of retransmit time.
895 if (tp
->t_timer
[TCPT_REXMT
] == 0 &&
896 tp
->snd_nxt
!= tp
->snd_una
) {
897 tp
->t_timer
[TCPT_REXMT
] = tp
->t_rxtcur
;
898 if (tp
->t_timer
[TCPT_PERSIST
]) {
899 tp
->t_timer
[TCPT_PERSIST
] = 0;
904 if (SEQ_GT(tp
->snd_nxt
+ len
, tp
->snd_max
))
905 tp
->snd_max
= tp
->snd_nxt
+ len
;
911 if (so
->so_options
& SO_DEBUG
)
912 tcp_trace(TA_OUTPUT
, tp
->t_state
, tp
, mtod(m
, void *), th
, 0);
916 * Fill in IP length and desired time to live and
917 * send to IP level. There should be a better way
918 * to handle ttl and tos; we could keep them in
919 * the template, but need a way to checksum without them.
922 * m->m_pkthdr.len should have been set before cksum calcuration,
923 * because in6_cksum() need it.
928 * we separately set hoplimit for every segment, since the
929 * user might want to change the value via setsockopt.
930 * Also, desired default hop limit might be changed via
931 * Neighbor Discovery.
933 ip6
->ip6_hlim
= in6_selecthlim(tp
->t_inpcb
,
934 tp
->t_inpcb
->in6p_route
.ro_rt
?
935 tp
->t_inpcb
->in6p_route
.ro_rt
->rt_ifp
938 /* TODO: IPv6 IP6TOS_ECT bit on */
940 if (ipsec_bypass
== 0 && ipsec_setsocket(m
, so
) != 0) {
946 error
= ip6_output(m
,
947 tp
->t_inpcb
->in6p_outputopts
,
948 &tp
->t_inpcb
->in6p_route
,
949 (so
->so_options
& SO_DONTROUTE
), NULL
, NULL
);
954 ip
->ip_len
= m
->m_pkthdr
.len
;
956 if (INP_CHECK_SOCKAF(so
, AF_INET6
))
957 ip
->ip_ttl
= in6_selecthlim(tp
->t_inpcb
,
958 tp
->t_inpcb
->in6p_route
.ro_rt
?
959 tp
->t_inpcb
->in6p_route
.ro_rt
->rt_ifp
963 ip
->ip_ttl
= tp
->t_inpcb
->inp_ip_ttl
; /* XXX */
964 ip
->ip_tos
= tp
->t_inpcb
->inp_ip_tos
; /* XXX */
969 KERNEL_DEBUG(DBG_LAYER_BEG
,
970 ((tp
->t_inpcb
->inp_fport
<< 16) | tp
->t_inpcb
->inp_lport
),
971 (((tp
->t_inpcb
->in6p_laddr
.s6_addr16
[0] & 0xffff) << 16) |
972 (tp
->t_inpcb
->in6p_faddr
.s6_addr16
[0] & 0xffff)),
978 KERNEL_DEBUG(DBG_LAYER_BEG
,
979 ((tp
->t_inpcb
->inp_fport
<< 16) | tp
->t_inpcb
->inp_lport
),
980 (((tp
->t_inpcb
->inp_laddr
.s_addr
& 0xffff) << 16) |
981 (tp
->t_inpcb
->inp_faddr
.s_addr
& 0xffff)),
986 * See if we should do MTU discovery. We do it only if the following
988 * 1) we have a valid route to the destination
989 * 2) the MTU is not locked (if it is, then discovery has been
992 if (path_mtu_discovery
993 && (rt
= tp
->t_inpcb
->inp_route
.ro_rt
)
994 && rt
->rt_flags
& RTF_UP
995 && !(rt
->rt_rmx
.rmx_locks
& RTV_MTU
)) {
999 if (ipsec_bypass
== 0)
1000 ipsec_setsocket(m
, so
);
1002 error
= ip_output(m
, tp
->t_inpcb
->inp_options
, &tp
->t_inpcb
->inp_route
,
1003 (so
->so_options
& SO_DONTROUTE
), 0);
1008 * We know that the packet was lost, so back out the
1009 * sequence number advance, if any.
1011 if (tp
->t_force
== 0 || !tp
->t_timer
[TCPT_PERSIST
]) {
1013 * No need to check for TH_FIN here because
1014 * the TF_SENTFIN flag handles that case.
1016 if ((flags
& TH_SYN
) == 0)
1020 if (error
== ENOBUFS
) {
1021 if (!tp
->t_timer
[TCPT_REXMT
] &&
1022 !tp
->t_timer
[TCPT_PERSIST
])
1023 tp
->t_timer
[TCPT_REXMT
] = tp
->t_rxtcur
;
1024 tcp_quench(tp
->t_inpcb
, 0);
1025 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
1028 if (error
== EMSGSIZE
) {
1030 * ip_output() will have already fixed the route
1031 * for us. tcp_mtudisc() will, as its last action,
1032 * initiate retransmission, so it is important to
1035 tcp_mtudisc(tp
->t_inpcb
, 0);
1036 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
1039 if ((error
== EHOSTUNREACH
|| error
== ENETDOWN
)
1040 && TCPS_HAVERCVDSYN(tp
->t_state
)) {
1041 tp
->t_softerror
= error
;
1042 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
1045 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
1048 tcpstat
.tcps_sndtotal
++;
1051 * Data sent (as far as we can tell).
1052 * If this advertises a larger window than any other segment,
1053 * then remember the size of the advertised window.
1054 * Any pending ACK has now been sent.
1056 if (win
> 0 && SEQ_GT(tp
->rcv_nxt
+win
, tp
->rcv_adv
))
1057 tp
->rcv_adv
= tp
->rcv_nxt
+ win
;
1058 tp
->last_ack_sent
= tp
->rcv_nxt
;
1059 tp
->t_flags
&= ~(TF_ACKNOW
|TF_DELACK
);
1062 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
1068 register struct tcpcb
*tp
;
1070 int t
= ((tp
->t_srtt
>> 2) + tp
->t_rttvar
) >> 1;
1073 if (tp
->t_timer
[TCPT_REXMT
])
1074 panic("tcp_setpersist: retransmit pending");
1076 * Start/restart persistance timer.
1078 TCPT_RANGESET(tp
->t_timer
[TCPT_PERSIST
],
1079 t
* tcp_backoff
[tp
->t_rxtshift
],
1080 TCPTV_PERSMIN
, TCPTV_PERSMAX
);
1081 if (tp
->t_rxtshift
< TCP_MAXRXTSHIFT
)