2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
27 * The Regents of the University of California. All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
32 * 1. Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * 2. Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in the
36 * documentation and/or other materials provided with the distribution.
37 * 3. All advertising materials mentioning features or use of this software
38 * must display the following acknowledgement:
39 * This product includes software developed by the University of
40 * California, Berkeley and its contributors.
41 * 4. Neither the name of the University nor the names of its contributors
42 * may be used to endorse or promote products derived from this software
43 * without specific prior written permission.
45 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * @(#)tcp_output.c 8.4 (Berkeley) 5/24/95
58 * $FreeBSD: src/sys/netinet/tcp_output.c,v 1.39.2.10 2001/07/07 04:30:38 silby Exp $
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/kernel.h>
67 #include <sys/sysctl.h>
69 #include <sys/domain.h>
70 #include <sys/protosw.h>
71 #include <sys/socket.h>
72 #include <sys/socketvar.h>
74 #include <net/route.h>
76 #include <netinet/in.h>
77 #include <netinet/in_systm.h>
78 #include <netinet/ip.h>
79 #include <netinet/in_pcb.h>
80 #include <netinet/ip_var.h>
82 #include <netinet6/in6_pcb.h>
83 #include <netinet/ip6.h>
84 #include <netinet6/ip6_var.h>
86 #include <netinet/tcp.h>
88 #include <netinet/tcp_fsm.h>
89 #include <netinet/tcp_seq.h>
90 #include <netinet/tcp_timer.h>
91 #include <netinet/tcp_var.h>
92 #include <netinet/tcpip.h>
94 #include <netinet/tcp_debug.h>
96 #include <sys/kdebug.h>
99 #include <netinet6/ipsec.h>
102 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETTCP, 1)
103 #define DBG_LAYER_END NETDBG_CODE(DBG_NETTCP, 3)
104 #define DBG_FNC_TCP_OUTPUT NETDBG_CODE(DBG_NETTCP, (4 << 8) | 1)
108 extern struct mbuf
*m_copypack();
111 static int path_mtu_discovery
= 1;
112 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, path_mtu_discovery
, CTLFLAG_RW
,
113 &path_mtu_discovery
, 1, "Enable Path MTU Discovery");
116 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, slowstart_flightsize
, CTLFLAG_RW
,
117 &ss_fltsz
, 1, "Slow start flight size");
119 int ss_fltsz_local
= 4; /* starts with four segments max */
120 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, local_slowstart_flightsize
, CTLFLAG_RW
,
121 &ss_fltsz_local
, 1, "Slow start flight size for local networks");
123 int tcp_do_newreno
= 0;
124 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, newreno
, CTLFLAG_RW
, &tcp_do_newreno
,
125 0, "Enable NewReno Algorithms");
127 struct mbuf
*m_copym_with_hdrs
__P((struct mbuf
*, int, int, int, struct mbuf
**, int*));
130 /* temporary: for testing */
132 extern int ipsec_bypass
;
135 extern int slowlink_wsize
; /* window correction for slow links */
136 extern u_long route_generation
;
140 * Tcp output routine: figure out what should be sent and send it.
144 register struct tcpcb
*tp
;
146 register struct socket
*so
= tp
->t_inpcb
->inp_socket
;
147 register long len
, win
;
148 int off
, flags
, error
;
149 register struct mbuf
*m
;
150 struct ip
*ip
= NULL
;
151 register struct ipovly
*ipov
= NULL
;
153 struct ip6_hdr
*ip6
= NULL
;
155 register struct tcphdr
*th
;
156 u_char opt
[TCP_MAXOLEN
];
157 unsigned ipoptlen
, optlen
, hdrlen
;
159 int maxburst
= TCP_MAXBURST
;
160 struct rmxp_tao
*taop
;
161 struct rmxp_tao tao_noncached
;
164 struct mbuf
*m_last
= 0;
165 struct mbuf
*m_head
= 0;
167 int isipv6
= tp
->t_inpcb
->inp_vflag
& INP_IPV6
;
172 * Determine length of data that should be transmitted,
173 * and flags that will be used.
174 * If there is some data or critical controls (SYN, RST)
175 * to send, then transmit; otherwise, investigate further.
177 idle
= (tp
->snd_max
== tp
->snd_una
);
179 if (idle
&& tp
->t_rcvtime
>= tp
->t_rxtcur
) {
181 if (idle
&& (ticks
- tp
->t_rcvtime
) >= tp
->t_rxtcur
) {
184 * We have been idle for "a while" and no acks are
185 * expected to clock out any data we send --
186 * slow start to get ack "clock" running again.
188 * Set the slow-start flight size depending on whether
189 * this is a local network or not.
193 (isipv6
&& in6_localaddr(&tp
->t_inpcb
->in6p_faddr
)) ||
196 in_localaddr(tp
->t_inpcb
->inp_faddr
)
201 tp
->snd_cwnd
= tp
->t_maxseg
* ss_fltsz_local
;
203 tp
->snd_cwnd
= tp
->t_maxseg
* ss_fltsz
;
207 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_START
, 0,0,0,0,0);
212 KERNEL_DEBUG(DBG_LAYER_BEG
,
213 ((tp
->t_inpcb
->inp_fport
<< 16) | tp
->t_inpcb
->inp_lport
),
214 (((tp
->t_inpcb
->in6p_laddr
.s6_addr16
[0] & 0xffff) << 16) |
215 (tp
->t_inpcb
->in6p_faddr
.s6_addr16
[0] & 0xffff)),
222 KERNEL_DEBUG(DBG_LAYER_BEG
,
223 ((tp
->t_inpcb
->inp_fport
<< 16) | tp
->t_inpcb
->inp_lport
),
224 (((tp
->t_inpcb
->inp_laddr
.s_addr
& 0xffff) << 16) |
225 (tp
->t_inpcb
->inp_faddr
.s_addr
& 0xffff)),
228 * If the route generation id changed, we need to check that our
229 * local (source) IP address is still valid. If it isn't either
230 * return error or silently do nothing (assuming the address will
231 * come back before the TCP connection times out).
234 if (tp
->t_inpcb
->inp_route
.ro_rt
!= NULL
&&
235 (tp
->t_inpcb
->inp_route
.ro_rt
->generation_id
!= route_generation
)) {
236 /* check that the source address is still valid */
237 if (ifa_foraddr(tp
->t_inpcb
->inp_laddr
.s_addr
) == NULL
) {
238 if (tp
->t_state
>= TCPS_CLOSE_WAIT
) {
240 return(EADDRNOTAVAIL
);
243 /* set Retransmit timer if it wasn't set
244 * reset Persist timer and shift register as the
245 * adversed peer window may not be valid anymore
248 if (!tp
->t_timer
[TCPT_REXMT
]) {
249 tp
->t_timer
[TCPT_REXMT
] = tp
->t_rxtcur
;
250 if (tp
->t_timer
[TCPT_PERSIST
]) {
251 tp
->t_timer
[TCPT_PERSIST
] = 0;
256 if (so
->so_flags
& SOF_NOADDRAVAIL
)
257 return(EADDRNOTAVAIL
);
259 return(0); /* silently ignore and keep data in socket */
261 else { /* Clear the cached route, will be reacquired later */
262 rtfree(tp
->t_inpcb
->inp_route
.ro_rt
);
263 tp
->t_inpcb
->inp_route
.ro_rt
= (struct rtentry
*)0;
268 off
= tp
->snd_nxt
- tp
->snd_una
;
269 win
= min(tp
->snd_wnd
, tp
->snd_cwnd
);
270 if (tp
->t_flags
& TF_SLOWLINK
&& slowlink_wsize
> 0)
271 win
= min(win
, slowlink_wsize
);
273 flags
= tcp_outflags
[tp
->t_state
];
275 * Get standard flags, and add SYN or FIN if requested by 'hidden'
278 if (tp
->t_flags
& TF_NEEDFIN
)
280 if (tp
->t_flags
& TF_NEEDSYN
)
284 * If in persist timeout with window of 0, send 1 byte.
285 * Otherwise, if window is small but nonzero
286 * and timer expired, we will send what we can
287 * and go to transmit state.
292 * If we still have some data to send, then
293 * clear the FIN bit. Usually this would
294 * happen below when it realizes that we
295 * aren't sending all the data. However,
296 * if we have exactly 1 byte of unsent data,
297 * then it won't clear the FIN bit below,
298 * and if we are in persist state, we wind
299 * up sending the packet without recording
300 * that we sent the FIN bit.
302 * We can't just blindly clear the FIN bit,
303 * because if we don't have any more data
304 * to send then the probe will be the FIN
307 if (off
< so
->so_snd
.sb_cc
)
311 tp
->t_timer
[TCPT_PERSIST
] = 0;
316 len
= (long)ulmin(so
->so_snd
.sb_cc
, win
) - off
;
318 if ((taop
= tcp_gettaocache(tp
->t_inpcb
)) == NULL
) {
319 taop
= &tao_noncached
;
320 bzero(taop
, sizeof(*taop
));
324 * Lop off SYN bit if it has already been sent. However, if this
325 * is SYN-SENT state and if segment contains data and if we don't
326 * know that foreign host supports TAO, suppress sending segment.
328 if ((flags
& TH_SYN
) && SEQ_GT(tp
->snd_nxt
, tp
->snd_una
)) {
331 if (len
> 0 && tp
->t_state
== TCPS_SYN_SENT
&&
332 taop
->tao_ccsent
== 0) {
333 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
339 * Be careful not to send data and/or FIN on SYN segments
340 * in cases when no CC option will be sent.
341 * This measure is needed to prevent interoperability problems
342 * with not fully conformant TCP implementations.
344 if ((flags
& TH_SYN
) &&
345 ((tp
->t_flags
& TF_NOOPT
) || !(tp
->t_flags
& TF_REQ_CC
) ||
346 ((flags
& TH_ACK
) && !(tp
->t_flags
& TF_RCVD_CC
)))) {
353 * If FIN has been sent but not acked,
354 * but we haven't been called to retransmit,
355 * len will be -1. Otherwise, window shrank
356 * after we sent into it. If window shrank to 0,
357 * cancel pending retransmit, pull snd_nxt back
358 * to (closed) window, and set the persist timer
359 * if it isn't already going. If the window didn't
360 * close completely, just wait for an ACK.
364 tp
->t_timer
[TCPT_REXMT
] = 0;
366 tp
->snd_nxt
= tp
->snd_una
;
367 if (tp
->t_timer
[TCPT_PERSIST
] == 0)
371 if (len
> tp
->t_maxseg
) {
375 if (SEQ_LT(tp
->snd_nxt
+ len
, tp
->snd_una
+ so
->so_snd
.sb_cc
))
378 if (tp
->t_flags
& TF_SLOWLINK
&& slowlink_wsize
> 0 ) /* Clips window size for slow links */
379 win
= min(sbspace(&so
->so_rcv
), slowlink_wsize
);
381 win
= sbspace(&so
->so_rcv
);
384 * Sender silly window avoidance. If connection is idle
385 * and can send all data, a maximum segment,
386 * at least a maximum default-size segment do it,
387 * or are forced, do it; otherwise don't bother.
388 * If peer's buffer is tiny, then send
389 * when window is at least half open.
390 * If retransmitting (possibly after persist timer forced us
391 * to send into a small window), then must resend.
394 if (len
== tp
->t_maxseg
)
396 if (!(tp
->t_flags
& TF_MORETOCOME
) &&
397 (idle
|| tp
->t_flags
& TF_NODELAY
) &&
398 (tp
->t_flags
& TF_NOPUSH
) == 0 &&
399 len
+ off
>= so
->so_snd
.sb_cc
)
403 if (len
>= tp
->max_sndwnd
/ 2 && tp
->max_sndwnd
> 0)
405 if (SEQ_LT(tp
->snd_nxt
, tp
->snd_max
))
410 * Compare available window to amount of window
411 * known to peer (as advertised window less
412 * next expected input). If the difference is at least two
413 * max size segments, or at least 50% of the maximum possible
414 * window, then want to send a window update to peer.
418 * "adv" is the amount we can increase the window,
419 * taking into account that we are limited by
420 * TCP_MAXWIN << tp->rcv_scale.
422 long adv
= min(win
, (long)TCP_MAXWIN
<< tp
->rcv_scale
) -
423 (tp
->rcv_adv
- tp
->rcv_nxt
);
425 if (adv
>= (long) (2 * tp
->t_maxseg
))
427 if (2 * adv
>= (long) so
->so_rcv
.sb_hiwat
)
432 * Send if we owe peer an ACK.
434 if (tp
->t_flags
& TF_ACKNOW
)
436 if ((flags
& TH_RST
) ||
437 ((flags
& TH_SYN
) && (tp
->t_flags
& TF_NEEDSYN
) == 0))
439 if (SEQ_GT(tp
->snd_up
, tp
->snd_una
))
442 * If our state indicates that FIN should be sent
443 * and we have not yet done so, or we're retransmitting the FIN,
444 * then we need to send.
446 if (flags
& TH_FIN
&&
447 ((tp
->t_flags
& TF_SENTFIN
) == 0 || tp
->snd_nxt
== tp
->snd_una
))
451 * TCP window updates are not reliable, rather a polling protocol
452 * using ``persist'' packets is used to insure receipt of window
453 * updates. The three ``states'' for the output side are:
454 * idle not doing retransmits or persists
455 * persisting to move a small or zero window
456 * (re)transmitting and thereby not persisting
458 * tp->t_timer[TCPT_PERSIST]
459 * is set when we are in persist state.
461 * is set when we are called to send a persist packet.
462 * tp->t_timer[TCPT_REXMT]
463 * is set when we are retransmitting
464 * The output side is idle when both timers are zero.
466 * If send window is too small, there is data to transmit, and no
467 * retransmit or persist is pending, then go to persist state.
468 * If nothing happens soon, send when timer expires:
469 * if window is nonzero, transmit what we can,
470 * otherwise force out a byte.
472 if (so
->so_snd
.sb_cc
&& tp
->t_timer
[TCPT_REXMT
] == 0 &&
473 tp
->t_timer
[TCPT_PERSIST
] == 0) {
479 * No reason to send a segment, just return.
481 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
486 * Before ESTABLISHED, force sending of initial options
487 * unless TCP set not to do any options.
488 * NOTE: we assume that the IP/TCP header plus TCP options
489 * always fit in a single mbuf, leaving room for a maximum
491 * max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MCLBYTES
496 hdrlen
= sizeof (struct ip6_hdr
) + sizeof (struct tcphdr
);
499 hdrlen
= sizeof (struct tcpiphdr
);
500 if (flags
& TH_SYN
) {
501 tp
->snd_nxt
= tp
->iss
;
502 if ((tp
->t_flags
& TF_NOOPT
) == 0) {
505 opt
[0] = TCPOPT_MAXSEG
;
506 opt
[1] = TCPOLEN_MAXSEG
;
507 mss
= htons((u_short
) tcp_mssopt(tp
));
508 (void)memcpy(opt
+ 2, &mss
, sizeof(mss
));
509 optlen
= TCPOLEN_MAXSEG
;
511 if ((tp
->t_flags
& TF_REQ_SCALE
) &&
512 ((flags
& TH_ACK
) == 0 ||
513 (tp
->t_flags
& TF_RCVD_SCALE
))) {
514 *((u_int32_t
*)(opt
+ optlen
)) = htonl(
516 TCPOPT_WINDOW
<< 16 |
517 TCPOLEN_WINDOW
<< 8 |
518 tp
->request_r_scale
);
525 * Send a timestamp and echo-reply if this is a SYN and our side
526 * wants to use timestamps (TF_REQ_TSTMP is set) or both our side
527 * and our peer have sent timestamps in our SYN's.
529 if ((tp
->t_flags
& (TF_REQ_TSTMP
|TF_NOOPT
)) == TF_REQ_TSTMP
&&
530 (flags
& TH_RST
) == 0 &&
531 ((flags
& TH_ACK
) == 0 ||
532 (tp
->t_flags
& TF_RCVD_TSTMP
))) {
533 u_int32_t
*lp
= (u_int32_t
*)(opt
+ optlen
);
535 /* Form timestamp option as shown in appendix A of RFC 1323. */
536 *lp
++ = htonl(TCPOPT_TSTAMP_HDR
);
537 *lp
++ = htonl(tcp_now
);
538 *lp
= htonl(tp
->ts_recent
);
539 optlen
+= TCPOLEN_TSTAMP_APPA
;
543 * Send `CC-family' options if our side wants to use them (TF_REQ_CC),
544 * options are allowed (!TF_NOOPT) and it's not a RST.
546 if ((tp
->t_flags
& (TF_REQ_CC
|TF_NOOPT
)) == TF_REQ_CC
&&
547 (flags
& TH_RST
) == 0) {
548 switch (flags
& (TH_SYN
|TH_ACK
)) {
550 * This is a normal ACK, send CC if we received CC before
554 if (!(tp
->t_flags
& TF_RCVD_CC
))
559 * We can only get here in T/TCP's SYN_SENT* state, when
560 * we're a sending a non-SYN segment without waiting for
561 * the ACK of our SYN. A check above assures that we only
562 * do this if our peer understands T/TCP.
565 opt
[optlen
++] = TCPOPT_NOP
;
566 opt
[optlen
++] = TCPOPT_NOP
;
567 opt
[optlen
++] = TCPOPT_CC
;
568 opt
[optlen
++] = TCPOLEN_CC
;
569 *(u_int32_t
*)&opt
[optlen
] = htonl(tp
->cc_send
);
575 * This is our initial SYN, check whether we have to use
579 opt
[optlen
++] = TCPOPT_NOP
;
580 opt
[optlen
++] = TCPOPT_NOP
;
581 opt
[optlen
++] = tp
->t_flags
& TF_SENDCCNEW
?
582 TCPOPT_CCNEW
: TCPOPT_CC
;
583 opt
[optlen
++] = TCPOLEN_CC
;
584 *(u_int32_t
*)&opt
[optlen
] = htonl(tp
->cc_send
);
589 * This is a SYN,ACK; send CC and CC.echo if we received
592 case (TH_SYN
|TH_ACK
):
593 if (tp
->t_flags
& TF_RCVD_CC
) {
594 opt
[optlen
++] = TCPOPT_NOP
;
595 opt
[optlen
++] = TCPOPT_NOP
;
596 opt
[optlen
++] = TCPOPT_CC
;
597 opt
[optlen
++] = TCPOLEN_CC
;
598 *(u_int32_t
*)&opt
[optlen
] =
601 opt
[optlen
++] = TCPOPT_NOP
;
602 opt
[optlen
++] = TCPOPT_NOP
;
603 opt
[optlen
++] = TCPOPT_CCECHO
;
604 opt
[optlen
++] = TCPOLEN_CC
;
605 *(u_int32_t
*)&opt
[optlen
] =
616 ipoptlen
= ip6_optlen(tp
->t_inpcb
);
620 if (tp
->t_inpcb
->inp_options
) {
621 ipoptlen
= tp
->t_inpcb
->inp_options
->m_len
-
622 offsetof(struct ipoption
, ipopt_list
);
628 if (ipsec_bypass
== 0)
629 ipoptlen
+= ipsec_hdrsiz_tcp(tp
);
633 * Adjust data length if insertion of options will
634 * bump the packet length beyond the t_maxopd length.
635 * Clear the FIN bit because we cut off the tail of
638 if (len
+ optlen
+ ipoptlen
> tp
->t_maxopd
) {
640 * If there is still more to send, don't close the connection.
643 len
= tp
->t_maxopd
- optlen
- ipoptlen
;
647 /*#ifdef DIAGNOSTIC*/
649 if (max_linkhdr
+ hdrlen
> MCLBYTES
)
650 panic("tcphdr too big");
652 if (max_linkhdr
+ hdrlen
> MHLEN
)
653 panic("tcphdr too big");
658 * Grab a header mbuf, attaching a copy of data to
659 * be transmitted, and initialize the header from
660 * the template for sends on this connection.
663 if (tp
->t_force
&& len
== 1)
664 tcpstat
.tcps_sndprobe
++;
665 else if (SEQ_LT(tp
->snd_nxt
, tp
->snd_max
)) {
666 tcpstat
.tcps_sndrexmitpack
++;
667 tcpstat
.tcps_sndrexmitbyte
+= len
;
669 tcpstat
.tcps_sndpack
++;
670 tcpstat
.tcps_sndbyte
+= len
;
673 if ((m
= m_copypack(so
->so_snd
.sb_mb
, off
,
674 (int)len
, max_linkhdr
+ hdrlen
)) == 0) {
679 * m_copypack left space for our hdr; use it.
685 * try to use the new interface that allocates all
686 * the necessary mbuf hdrs under 1 mbuf lock and
687 * avoids rescanning the socket mbuf list if
688 * certain conditions are met. This routine can't
689 * be used in the following cases...
690 * 1) the protocol headers exceed the capacity of
691 * of a single mbuf header's data area (no cluster attached)
692 * 2) the length of the data being transmitted plus
693 * the protocol headers fits into a single mbuf header's
694 * data area (no cluster attached)
698 if (MHLEN
< hdrlen
+ max_linkhdr
) {
699 MGETHDR(m
, M_DONTWAIT
, MT_HEADER
);
704 MCLGET(m
, M_DONTWAIT
);
705 if ((m
->m_flags
& M_EXT
) == 0) {
710 m
->m_data
+= max_linkhdr
;
714 if (len
<= MHLEN
- hdrlen
- max_linkhdr
) {
716 MGETHDR(m
, M_DONTWAIT
, MT_HEADER
);
721 m
->m_data
+= max_linkhdr
;
724 /* makes sure we still have data left to be sent at this point */
725 if (so
->so_snd
.sb_mb
== NULL
|| off
== -1) {
726 if (m
!= NULL
) m_freem(m
);
727 error
= 0; /* should we return an error? */
730 m_copydata(so
->so_snd
.sb_mb
, off
, (int) len
,
731 mtod(m
, caddr_t
) + hdrlen
);
735 m
->m_next
= m_copy(so
->so_snd
.sb_mb
, off
, (int) len
);
736 if (m
->m_next
== 0) {
743 * determine whether the mbuf pointer and offset passed back by the 'last' call
744 * to m_copym_with_hdrs are still valid... if the head of the socket chain has
745 * changed (due to an incoming ACK for instance), or the offset into the chain we
746 * just computed is different from the one last returned by m_copym_with_hdrs (perhaps
747 * we're re-transmitting a packet sent earlier), than we can't pass the mbuf pointer and
748 * offset into it as valid hints for m_copym_with_hdrs to use (if valid, these hints allow
749 * m_copym_with_hdrs to avoid rescanning from the beginning of the socket buffer mbuf list.
750 * setting the mbuf pointer to NULL is sufficient to disable the hint mechanism.
752 if (m_head
!= so
->so_snd
.sb_mb
|| last_off
!= off
)
754 last_off
= off
+ len
;
755 m_head
= so
->so_snd
.sb_mb
;
757 /* makes sure we still have data left to be sent at this point */
758 if (m_head
== NULL
) {
759 error
= 0; /* should we return an error? */
764 * m_copym_with_hdrs will always return the last mbuf pointer and the offset into it that
765 * it acted on to fullfill the current request, whether a valid 'hint' was passed in or not
767 if ((m
= m_copym_with_hdrs(so
->so_snd
.sb_mb
, off
, (int) len
, M_DONTWAIT
, &m_last
, &m_off
)) == NULL
) {
771 m
->m_data
+= max_linkhdr
;
777 * If we're sending everything we've got, set PUSH.
778 * (This will keep happy those implementations which only
779 * give data to the user when a buffer fills or
782 if (off
+ len
== so
->so_snd
.sb_cc
)
785 if (tp
->t_flags
& TF_ACKNOW
)
786 tcpstat
.tcps_sndacks
++;
787 else if (flags
& (TH_SYN
|TH_FIN
|TH_RST
))
788 tcpstat
.tcps_sndctrl
++;
789 else if (SEQ_GT(tp
->snd_up
, tp
->snd_una
))
790 tcpstat
.tcps_sndurg
++;
792 tcpstat
.tcps_sndwinup
++;
794 MGETHDR(m
, M_DONTWAIT
, MT_HEADER
);
800 if (isipv6
&& (MHLEN
< hdrlen
+ max_linkhdr
) &&
805 m
->m_data
+= max_linkhdr
;
808 m
->m_pkthdr
.rcvif
= (struct ifnet
*)0;
811 ip6
= mtod(m
, struct ip6_hdr
*);
812 th
= (struct tcphdr
*)(ip6
+ 1);
813 tcp_fillheaders(tp
, ip6
, th
);
817 ip
= mtod(m
, struct ip
*);
818 ipov
= (struct ipovly
*)ip
;
819 th
= (struct tcphdr
*)(ip
+ 1);
820 /* this picks up the pseudo header (w/o the length) */
821 tcp_fillheaders(tp
, ip
, th
);
825 * Fill in fields, remembering maximum advertised
826 * window for use in delaying messages about window sizes.
827 * If resending a FIN, be sure not to use a new sequence number.
829 if (flags
& TH_FIN
&& tp
->t_flags
& TF_SENTFIN
&&
830 tp
->snd_nxt
== tp
->snd_max
)
833 * If we are doing retransmissions, then snd_nxt will
834 * not reflect the first unsent octet. For ACK only
835 * packets, we do not want the sequence number of the
836 * retransmitted packet, we want the sequence number
837 * of the next unsent octet. So, if there is no data
838 * (and no SYN or FIN), use snd_max instead of snd_nxt
839 * when filling in ti_seq. But if we are in persist
840 * state, snd_max might reflect one byte beyond the
841 * right edge of the window, so use snd_nxt in that
842 * case, since we know we aren't doing a retransmission.
843 * (retransmit and persist are mutually exclusive...)
845 if (len
|| (flags
& (TH_SYN
|TH_FIN
)) || tp
->t_timer
[TCPT_PERSIST
])
846 th
->th_seq
= htonl(tp
->snd_nxt
);
848 th
->th_seq
= htonl(tp
->snd_max
);
849 th
->th_ack
= htonl(tp
->rcv_nxt
);
851 bcopy(opt
, th
+ 1, optlen
);
852 th
->th_off
= (sizeof (struct tcphdr
) + optlen
) >> 2;
854 th
->th_flags
= flags
;
856 * Calculate receive window. Don't shrink window,
857 * but avoid silly window syndrome.
859 if (win
< (long)(so
->so_rcv
.sb_hiwat
/ 4) && win
< (long)tp
->t_maxseg
)
861 if (win
< (long)(tp
->rcv_adv
- tp
->rcv_nxt
))
862 win
= (long)(tp
->rcv_adv
- tp
->rcv_nxt
);
863 if (tp
->t_flags
& TF_SLOWLINK
&& slowlink_wsize
> 0) {
864 if (win
> (long)slowlink_wsize
)
865 win
= slowlink_wsize
;
866 th
->th_win
= htons((u_short
) (win
>>tp
->rcv_scale
));
870 if (win
> (long)TCP_MAXWIN
<< tp
->rcv_scale
)
871 win
= (long)TCP_MAXWIN
<< tp
->rcv_scale
;
872 th
->th_win
= htons((u_short
) (win
>>tp
->rcv_scale
));
874 if (SEQ_GT(tp
->snd_up
, tp
->snd_nxt
)) {
875 th
->th_urp
= htons((u_short
)(tp
->snd_up
- tp
->snd_nxt
));
876 th
->th_flags
|= TH_URG
;
879 * If no urgent pointer to send, then we pull
880 * the urgent pointer to the left edge of the send window
881 * so that it doesn't drift into the send window on sequence
884 tp
->snd_up
= tp
->snd_una
; /* drag it along */
887 * Put TCP length in extended header, and then
888 * checksum extended header and data.
890 m
->m_pkthdr
.len
= hdrlen
+ len
; /* in6_cksum() need this */
894 * ip6_plen is not need to be filled now, and will be filled
897 th
->th_sum
= in6_cksum(m
, IPPROTO_TCP
, sizeof(struct ip6_hdr
),
898 sizeof(struct tcphdr
) + optlen
+ len
);
902 m
->m_pkthdr
.csum_flags
= CSUM_TCP
;
903 m
->m_pkthdr
.csum_data
= offsetof(struct tcphdr
, th_sum
);
905 th
->th_sum
= in_addword(th
->th_sum
,
906 htons((u_short
)(optlen
+ len
)));
908 /* IP version must be set here for ipv4/ipv6 checking later */
909 KASSERT(ip
->ip_v
== IPVERSION
,
910 ("%s: IP version incorrect: %d", __FUNCTION__
, ip
->ip_v
));
914 * In transmit state, time the transmission and arrange for
915 * the retransmit. In persist state, just set snd_max.
917 if (tp
->t_force
== 0 || tp
->t_timer
[TCPT_PERSIST
] == 0) {
918 tcp_seq startseq
= tp
->snd_nxt
;
921 * Advance snd_nxt over sequence space of this segment.
923 if (flags
& (TH_SYN
|TH_FIN
)) {
926 if (flags
& TH_FIN
) {
928 tp
->t_flags
|= TF_SENTFIN
;
932 if (SEQ_GT(tp
->snd_nxt
, tp
->snd_max
)) {
933 tp
->snd_max
= tp
->snd_nxt
;
935 * Time this transmission if not a retransmission and
936 * not currently timing anything.
938 if (tp
->t_rtttime
== 0) {
940 tp
->t_rtseq
= startseq
;
941 tcpstat
.tcps_segstimed
++;
946 * Set retransmit timer if not currently set,
947 * and not doing an ack or a keep-alive probe.
948 * Initial value for retransmit timer is smoothed
949 * round-trip time + 2 * round-trip time variance.
950 * Initialize shift counter which is used for backoff
951 * of retransmit time.
953 if (tp
->t_timer
[TCPT_REXMT
] == 0 &&
954 tp
->snd_nxt
!= tp
->snd_una
) {
955 tp
->t_timer
[TCPT_REXMT
] = tp
->t_rxtcur
;
956 if (tp
->t_timer
[TCPT_PERSIST
]) {
957 tp
->t_timer
[TCPT_PERSIST
] = 0;
962 if (SEQ_GT(tp
->snd_nxt
+ len
, tp
->snd_max
))
963 tp
->snd_max
= tp
->snd_nxt
+ len
;
969 if (so
->so_options
& SO_DEBUG
)
970 tcp_trace(TA_OUTPUT
, tp
->t_state
, tp
, mtod(m
, void *), th
, 0);
974 * Fill in IP length and desired time to live and
975 * send to IP level. There should be a better way
976 * to handle ttl and tos; we could keep them in
977 * the template, but need a way to checksum without them.
980 * m->m_pkthdr.len should have been set before cksum calcuration,
981 * because in6_cksum() need it.
986 * we separately set hoplimit for every segment, since the
987 * user might want to change the value via setsockopt.
988 * Also, desired default hop limit might be changed via
989 * Neighbor Discovery.
991 ip6
->ip6_hlim
= in6_selecthlim(tp
->t_inpcb
,
992 tp
->t_inpcb
->in6p_route
.ro_rt
?
993 tp
->t_inpcb
->in6p_route
.ro_rt
->rt_ifp
996 /* TODO: IPv6 IP6TOS_ECT bit on */
998 if (ipsec_bypass
== 0 && ipsec_setsocket(m
, so
) != 0) {
1004 error
= ip6_output(m
,
1005 tp
->t_inpcb
->in6p_outputopts
,
1006 &tp
->t_inpcb
->in6p_route
,
1007 (so
->so_options
& SO_DONTROUTE
), NULL
, NULL
);
1012 ip
->ip_len
= m
->m_pkthdr
.len
;
1015 ip
->ip_ttl
= in6_selecthlim(tp
->t_inpcb
,
1016 tp
->t_inpcb
->in6p_route
.ro_rt
?
1017 tp
->t_inpcb
->in6p_route
.ro_rt
->rt_ifp
1021 ip
->ip_ttl
= tp
->t_inpcb
->inp_ip_ttl
; /* XXX */
1022 ip
->ip_tos
= tp
->t_inpcb
->inp_ip_tos
; /* XXX */
1027 KERNEL_DEBUG(DBG_LAYER_BEG
,
1028 ((tp
->t_inpcb
->inp_fport
<< 16) | tp
->t_inpcb
->inp_lport
),
1029 (((tp
->t_inpcb
->in6p_laddr
.s6_addr16
[0] & 0xffff) << 16) |
1030 (tp
->t_inpcb
->in6p_faddr
.s6_addr16
[0] & 0xffff)),
1036 KERNEL_DEBUG(DBG_LAYER_BEG
,
1037 ((tp
->t_inpcb
->inp_fport
<< 16) | tp
->t_inpcb
->inp_lport
),
1038 (((tp
->t_inpcb
->inp_laddr
.s_addr
& 0xffff) << 16) |
1039 (tp
->t_inpcb
->inp_faddr
.s_addr
& 0xffff)),
1044 * See if we should do MTU discovery. We do it only if the following
1046 * 1) we have a valid route to the destination
1047 * 2) the MTU is not locked (if it is, then discovery has been
1050 if (path_mtu_discovery
1051 && (rt
= tp
->t_inpcb
->inp_route
.ro_rt
)
1052 && rt
->rt_flags
& RTF_UP
1053 && !(rt
->rt_rmx
.rmx_locks
& RTV_MTU
)) {
1054 ip
->ip_off
|= IP_DF
;
1057 if (ipsec_bypass
== 0)
1058 ipsec_setsocket(m
, so
);
1060 error
= ip_output(m
, tp
->t_inpcb
->inp_options
, &tp
->t_inpcb
->inp_route
,
1061 (so
->so_options
& SO_DONTROUTE
), 0);
1066 * We know that the packet was lost, so back out the
1067 * sequence number advance, if any.
1069 if (tp
->t_force
== 0 || !tp
->t_timer
[TCPT_PERSIST
]) {
1071 * No need to check for TH_FIN here because
1072 * the TF_SENTFIN flag handles that case.
1074 if ((flags
& TH_SYN
) == 0)
1078 if (error
== ENOBUFS
) {
1079 if (!tp
->t_timer
[TCPT_REXMT
] &&
1080 !tp
->t_timer
[TCPT_PERSIST
])
1081 tp
->t_timer
[TCPT_REXMT
] = tp
->t_rxtcur
;
1082 tcp_quench(tp
->t_inpcb
, 0);
1083 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
1086 if (error
== EMSGSIZE
) {
1088 * ip_output() will have already fixed the route
1089 * for us. tcp_mtudisc() will, as its last action,
1090 * initiate retransmission, so it is important to
1093 tcp_mtudisc(tp
->t_inpcb
, 0);
1094 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
1097 if ((error
== EHOSTUNREACH
|| error
== ENETDOWN
)
1098 && TCPS_HAVERCVDSYN(tp
->t_state
)) {
1099 tp
->t_softerror
= error
;
1100 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
1103 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
1106 tcpstat
.tcps_sndtotal
++;
1109 * Data sent (as far as we can tell).
1110 * If this advertises a larger window than any other segment,
1111 * then remember the size of the advertised window.
1112 * Any pending ACK has now been sent.
1114 if (win
> 0 && SEQ_GT(tp
->rcv_nxt
+win
, tp
->rcv_adv
))
1115 tp
->rcv_adv
= tp
->rcv_nxt
+ win
;
1116 tp
->last_ack_sent
= tp
->rcv_nxt
;
1117 tp
->t_flags
&= ~(TF_ACKNOW
|TF_DELACK
);
1119 KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT
| DBG_FUNC_END
, 0,0,0,0,0);
1127 register struct tcpcb
*tp
;
1129 int t
= ((tp
->t_srtt
>> 2) + tp
->t_rttvar
) >> 1;
1132 if (tp
->t_timer
[TCPT_REXMT
])
1133 panic("tcp_setpersist: retransmit pending");
1135 * Start/restart persistance timer.
1137 TCPT_RANGESET(tp
->t_timer
[TCPT_PERSIST
],
1138 t
* tcp_backoff
[tp
->t_rxtshift
],
1139 TCPTV_PERSMIN
, TCPTV_PERSMAX
);
1140 if (tp
->t_rxtshift
< TCP_MAXRXTSHIFT
)