2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
24 * The Regents of the University of California. All rights reserved.
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
29 * 1. Redistributions of source code must retain the above copyright
30 * notice, this list of conditions and the following disclaimer.
31 * 2. Redistributions in binary form must reproduce the above copyright
32 * notice, this list of conditions and the following disclaimer in the
33 * documentation and/or other materials provided with the distribution.
34 * 3. All advertising materials mentioning features or use of this software
35 * must display the following acknowledgement:
36 * This product includes software developed by the University of
37 * California, Berkeley and its contributors.
38 * 4. Neither the name of the University nor the names of its contributors
39 * may be used to endorse or promote products derived from this software
40 * without specific prior written permission.
42 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95
58 #include "opt_compat.h"
59 #include "opt_tcpdebug.h"
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/sysctl.h>
66 #include <sys/socket.h>
67 #include <sys/socketvar.h>
68 #include <sys/protosw.h>
70 #include <kern/cpu_number.h> /* before tcp_seq.h, for tcp_random18() */
72 #include <net/route.h>
74 #include <netinet/in.h>
75 #include <netinet/in_systm.h>
76 #include <netinet/ip.h>
77 #include <netinet/ip6.h>
78 #include <netinet/in_pcb.h>
79 #include <netinet/ip_var.h>
80 #include <netinet/tcp.h>
81 #include <netinet/tcp_fsm.h>
82 #include <netinet/tcp_seq.h>
83 #include <netinet/tcp_timer.h>
84 #include <netinet/tcp_var.h>
85 #include <netinet/tcpip.h>
87 #include <netinet/tcp_debug.h>
89 #include <sys/kdebug.h>
91 #define DBG_FNC_TCP_FAST NETDBG_CODE(DBG_NETTCP, (5 << 8))
92 #define DBG_FNC_TCP_SLOW NETDBG_CODE(DBG_NETTCP, (5 << 8) | 1)
95 int tcp_keepinit
= TCPTV_KEEP_INIT
;
96 SYSCTL_INT(_net_inet_tcp
, TCPCTL_KEEPINIT
, keepinit
,
97 CTLFLAG_RW
, &tcp_keepinit
, 0, "");
99 int tcp_keepidle
= TCPTV_KEEP_IDLE
;
100 SYSCTL_INT(_net_inet_tcp
, TCPCTL_KEEPIDLE
, keepidle
,
101 CTLFLAG_RW
, &tcp_keepidle
, 0, "");
103 static int tcp_keepintvl
= TCPTV_KEEPINTVL
;
104 SYSCTL_INT(_net_inet_tcp
, TCPCTL_KEEPINTVL
, keepintvl
,
105 CTLFLAG_RW
, &tcp_keepintvl
, 0, "");
107 static int always_keepalive
= 0;
108 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, always_keepalive
,
109 CTLFLAG_RW
, &always_keepalive
, 0, "");
111 static int tcp_keepcnt
= TCPTV_KEEPCNT
;
112 /* max idle probes */
113 static int tcp_maxpersistidle
= TCPTV_KEEP_IDLE
;
114 /* max idle time in persist */
119 struct inpcbhead time_wait_slots
[N_TIME_WAIT_SLOTS
];
122 u_long
*delack_bitmask
;
123 u_long current_active_connections
= 0;
124 u_long last_active_conn_count
= 0;
127 void add_to_time_wait(tp
)
132 LIST_REMOVE(tp
->t_inpcb
, inp_list
);
134 if (tp
->t_timer
[TCPT_2MSL
] == 0)
135 tp
->t_timer
[TCPT_2MSL
] = 1;
137 tp
->t_idle
+= tp
->t_timer
[TCPT_2MSL
] & (N_TIME_WAIT_SLOTS
- 1);
138 tw_slot
= (tp
->t_timer
[TCPT_2MSL
] & (N_TIME_WAIT_SLOTS
- 1)) + cur_tw_slot
;
139 if (tw_slot
>= N_TIME_WAIT_SLOTS
)
140 tw_slot
-= N_TIME_WAIT_SLOTS
;
142 LIST_INSERT_HEAD(&time_wait_slots
[tw_slot
], tp
->t_inpcb
, inp_list
);
150 * Fast timeout routine for processing delayed acks
155 register struct inpcb
*inp
;
156 register struct tcpcb
*tp
;
160 register u_long temp_mask
;
161 register u_long elem_base
= 0;
162 struct inpcbhead
*head
;
166 int delack_checked
= 0;
168 KERNEL_DEBUG(DBG_FNC_TCP_FAST
| DBG_FUNC_START
, 0,0,0,0,0);
170 if (!tcp_delack_enabled
)
173 if ((current_active_connections
> DELACK_BITMASK_THRESH
) &&
174 (last_active_conn_count
> DELACK_BITMASK_THRESH
)) {
175 for (i
=0; i
< (tcbinfo
.hashsize
/ 32); i
++) {
176 if (delack_bitmask
[i
]) {
178 for (j
=0; j
< 32; j
++) {
179 if (temp_mask
& delack_bitmask
[i
]) {
180 head
= &tcbinfo
.hashbase
[elem_base
+ j
];
181 for (inp
=head
->lh_first
; inp
!= 0; inp
= inp
->inp_hash
.le_next
) {
183 if ((tp
= (struct tcpcb
*)inp
->inp_ppcb
) && (tp
->t_flags
& TF_DELACK
)) {
184 tp
->t_flags
&= ~TF_DELACK
;
185 tp
->t_flags
|= TF_ACKNOW
;
186 tcpstat
.tcps_delack
++;
187 (void) tcp_output(tp
);
193 delack_bitmask
[i
] = 0;
200 for (inp
= tcb
.lh_first
; inp
!= NULL
; inp
= inp
->inp_list
.le_next
) {
201 if ((tp
= (struct tcpcb
*)inp
->inp_ppcb
) &&
202 (tp
->t_flags
& TF_DELACK
)) {
203 tp
->t_flags
&= ~TF_DELACK
;
204 tp
->t_flags
|= TF_ACKNOW
;
205 tcpstat
.tcps_delack
++;
206 (void) tcp_output(tp
);
211 last_active_conn_count
= current_active_connections
;
212 KERNEL_DEBUG(DBG_FNC_TCP_FAST
| DBG_FUNC_END
, delack_checked
,tcpstat
.tcps_delack
,0,0,0);
218 * Tcp protocol timeout routine called every 500 ms.
219 * Updates the timers in all active tcb's and
220 * causes finite state machine actions if timers expire.
225 register struct inpcb
*ip
, *ipnxt
;
226 register struct tcpcb
*tp
;
233 static int tws_checked
;
236 KERNEL_DEBUG(DBG_FNC_TCP_SLOW
| DBG_FUNC_START
, 0,0,0,0,0);
239 tcp_maxidle
= tcp_keepcnt
* tcp_keepintvl
;
247 * Search through tcb's and update active timers.
249 for (; ip
!= NULL
; ip
= ipnxt
) {
250 ipnxt
= ip
->inp_list
.le_next
;
252 if (tp
== 0 || tp
->t_state
== TCPS_LISTEN
)
254 for (i
= 0; i
< TCPT_NTIMERS
; i
++) {
255 if (tp
->t_timer
[i
] && --tp
->t_timer
[i
] == 0) {
257 ostate
= tp
->t_state
;
259 tp
= tcp_timers(tp
, i
);
263 if (tp
->t_inpcb
->inp_socket
->so_options
265 tcp_trace(TA_USER
, ostate
, tp
,
283 KERNEL_DEBUG(DBG_FNC_TCP_SLOW
| DBG_FUNC_NONE
, tws_checked
,0,0,0,0);
286 * Process the items in the current time-wait slot
289 for (ip
= time_wait_slots
[cur_tw_slot
].lh_first
; ip
; ip
= ipnxt
)
294 ipnxt
= ip
->inp_list
.le_next
;
296 if (tp
->t_timer
[TCPT_2MSL
] >= N_TIME_WAIT_SLOTS
) {
297 tp
->t_timer
[TCPT_2MSL
] -= N_TIME_WAIT_SLOTS
;
298 tp
->t_idle
+= N_TIME_WAIT_SLOTS
;
301 tp
->t_timer
[TCPT_2MSL
] = 0;
303 if (tp
->t_timer
[TCPT_2MSL
] == 0)
304 tp
= tcp_timers(tp
, TCPT_2MSL
);
307 if (++cur_tw_slot
>= N_TIME_WAIT_SLOTS
)
311 tcp_iss
+= TCP_ISSINCR
/PR_SLOWHZ
; /* increment iss */
312 if ((int)tcp_iss
< 0)
313 tcp_iss
= TCP_ISSINCR
; /* XXX */
315 tcp_now
++; /* for timestamps */
317 KERNEL_DEBUG(DBG_FNC_TCP_SLOW
| DBG_FUNC_END
, tws_checked
, cur_tw_slot
,0,0,0);
321 * Cancel all timers for TCP tp.
329 for (i
= 0; i
< TCPT_NTIMERS
; i
++)
333 int tcp_backoff
[TCP_MAXRXTSHIFT
+ 1] =
334 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
336 static int tcp_totbackoff
= 511; /* sum of tcp_backoff[] */
339 * TCP timer processing.
342 tcp_timers(tp
, timer
)
343 register struct tcpcb
*tp
;
347 struct socket
*so_tmp
;
349 int isipv6
= (tp
->t_inpcb
->inp_vflag
& INP_IPV4
) == 0;
355 * 2 MSL timeout in shutdown went off. If we're closed but
356 * still waiting for peer to close and connection has been idle
357 * too long, or if 2MSL time is up from TIME_WAIT, delete connection
358 * control block. Otherwise, check again in a bit.
361 if (tp
->t_state
!= TCPS_TIME_WAIT
&&
362 tp
->t_idle
<= tcp_maxidle
) {
363 tp
->t_timer
[TCPT_2MSL
] = tcp_keepintvl
;
364 add_to_time_wait(tp
);
371 * Retransmission timer went off. Message has not
372 * been acked within retransmit interval. Back off
373 * to a longer retransmit interval and retransmit one segment.
376 if (++tp
->t_rxtshift
> TCP_MAXRXTSHIFT
) {
377 tp
->t_rxtshift
= TCP_MAXRXTSHIFT
;
378 tcpstat
.tcps_timeoutdrop
++;
379 so_tmp
= tp
->t_inpcb
->inp_socket
;
380 tp
= tcp_drop(tp
, tp
->t_softerror
?
381 tp
->t_softerror
: ETIMEDOUT
);
382 postevent(so_tmp
, 0, EV_TIMEOUT
);
385 tcpstat
.tcps_rexmttimeo
++;
386 rexmt
= TCP_REXMTVAL(tp
) * tcp_backoff
[tp
->t_rxtshift
];
387 TCPT_RANGESET(tp
->t_rxtcur
, rexmt
,
388 tp
->t_rttmin
, TCPTV_REXMTMAX
);
389 tp
->t_timer
[TCPT_REXMT
] = tp
->t_rxtcur
;
391 * If losing, let the lower level know and try for
392 * a better route. Also, if we backed off this far,
393 * our srtt estimate is probably bogus. Clobber it
394 * so we'll take the next rtt measurement as our srtt;
395 * move the current srtt into rttvar to keep the current
396 * retransmit times until then.
398 if (tp
->t_rxtshift
> TCP_MAXRXTSHIFT
/ 4) {
401 in6_losing(tp
->t_inpcb
);
404 in_losing(tp
->t_inpcb
);
405 tp
->t_rttvar
+= (tp
->t_srtt
>> TCP_RTT_SHIFT
);
408 tp
->snd_nxt
= tp
->snd_una
;
410 * Force a segment to be sent.
412 tp
->t_flags
|= TF_ACKNOW
;
414 * If timing a segment in this window, stop the timer.
418 * Close the congestion window down to one segment
419 * (we'll open it by one segment for each ack we get).
420 * Since we probably have a window's worth of unacked
421 * data accumulated, this "slow start" keeps us from
422 * dumping all that data as back-to-back packets (which
423 * might overwhelm an intermediate gateway).
425 * There are two phases to the opening: Initially we
426 * open by one mss on each ack. This makes the window
427 * size increase exponentially with time. If the
428 * window is larger than the path can handle, this
429 * exponential growth results in dropped packet(s)
430 * almost immediately. To get more time between
431 * drops but still "push" the network to take advantage
432 * of improving conditions, we switch from exponential
433 * to linear window opening at some threshhold size.
434 * For a threshhold, we use half the current window
435 * size, truncated to a multiple of the mss.
437 * (the minimum cwnd that will give us exponential
438 * growth is 2 mss. We don't allow the threshhold
442 u_int win
= min(tp
->snd_wnd
, tp
->snd_cwnd
) / 2 / tp
->t_maxseg
;
445 tp
->snd_cwnd
= tp
->t_maxseg
;
446 tp
->snd_ssthresh
= win
* tp
->t_maxseg
;
449 (void) tcp_output(tp
);
453 * Persistance timer into zero window.
454 * Force a byte to be output, if possible.
457 tcpstat
.tcps_persisttimeo
++;
459 * Hack: if the peer is dead/unreachable, we do not
460 * time out if the window is closed. After a full
461 * backoff, drop the connection if the idle time
462 * (no responses to probes) reaches the maximum
463 * backoff that we would use if retransmitting.
465 if (tp
->t_rxtshift
== TCP_MAXRXTSHIFT
&&
466 (tp
->t_idle
>= tcp_maxpersistidle
||
467 tp
->t_idle
>= TCP_REXMTVAL(tp
) * tcp_totbackoff
)) {
468 tcpstat
.tcps_persistdrop
++;
469 so_tmp
= tp
->t_inpcb
->inp_socket
;
470 tp
= tcp_drop(tp
, ETIMEDOUT
);
471 postevent(so_tmp
, 0, EV_TIMEOUT
);
476 (void) tcp_output(tp
);
481 * Keep-alive timer went off; send something
482 * or drop connection if idle for too long.
485 tcpstat
.tcps_keeptimeo
++;
486 if (tp
->t_state
< TCPS_ESTABLISHED
)
488 if ((always_keepalive
||
489 tp
->t_inpcb
->inp_socket
->so_options
& SO_KEEPALIVE
) &&
490 tp
->t_state
<= TCPS_CLOSING
) {
491 if (tp
->t_idle
>= tcp_keepidle
+ tcp_maxidle
)
494 * Send a packet designed to force a response
495 * if the peer is up and reachable:
496 * either an ACK if the connection is still alive,
497 * or an RST if the peer has closed the connection
498 * due to timeout or reboot.
499 * Using sequence number tp->snd_una-1
500 * causes the transmitted zero-length segment
501 * to lie outside the receive window;
502 * by the protocol spec, this requires the
503 * correspondent TCP to respond.
505 tcpstat
.tcps_keepprobe
++;
508 * The keepalive packet must have nonzero length
509 * to get a 4.2 host to respond.
513 tcp_respond(tp
, (void *)&tp
->t_template
->tt_i6
,
514 &tp
->t_template
->tt_t
,
516 tp
->rcv_nxt
- 1, tp
->snd_una
- 1,
520 tcp_respond(tp
, (void *)&tp
->t_template
->tt_i
,
521 &tp
->t_template
->tt_t
, (struct mbuf
*)NULL
,
522 tp
->rcv_nxt
- 1, tp
->snd_una
- 1, 0,
527 tcp_respond(tp
, (void *)&tp
->t_template
->tt_i6
,
528 &tp
->t_template
->tt_t
,
529 (struct mbuf
*)NULL
, tp
->rcv_nxt
,
530 tp
->snd_una
- 1, 0, isipv6
);
533 tcp_respond(tp
, (void *)&tp
->t_template
->tt_i
,
534 &tp
->t_template
->tt_t
, (struct mbuf
*)NULL
,
535 tp
->rcv_nxt
, tp
->snd_una
- 1, 0, isipv6
);
537 tp
->t_timer
[TCPT_KEEP
] = tcp_keepintvl
;
539 tp
->t_timer
[TCPT_KEEP
] = tcp_keepidle
;
542 tcpstat
.tcps_keepdrops
++;
543 so_tmp
= tp
->t_inpcb
->inp_socket
;
544 tp
= tcp_drop(tp
, ETIMEDOUT
);
545 postevent(so_tmp
, 0, EV_TIMEOUT
);