]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/tcp_timer.c
xnu-344.21.74.tar.gz
[apple/xnu.git] / bsd / netinet / tcp_timer.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
27 * The Regents of the University of California. All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 * 1. Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * 2. Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in the
36 * documentation and/or other materials provided with the distribution.
37 * 3. All advertising materials mentioning features or use of this software
38 * must display the following acknowledgement:
39 * This product includes software developed by the University of
40 * California, Berkeley and its contributors.
41 * 4. Neither the name of the University nor the names of its contributors
42 * may be used to endorse or promote products derived from this software
43 * without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 *
57 * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95
58 * $FreeBSD: src/sys/netinet/tcp_timer.c,v 1.34.2.11 2001/08/22 00:59:12 silby Exp $
59 */
60
61
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/mbuf.h>
66 #include <sys/sysctl.h>
67 #include <sys/socket.h>
68 #include <sys/socketvar.h>
69 #include <sys/protosw.h>
70
71 #include <kern/cpu_number.h> /* before tcp_seq.h, for tcp_random18() */
72
73 #include <net/route.h>
74
75 #include <netinet/in.h>
76 #include <netinet/in_systm.h>
77 #include <netinet/in_pcb.h>
78 #if INET6
79 #include <netinet6/in6_pcb.h>
80 #endif
81 #include <netinet/ip_var.h>
82 #include <netinet/tcp.h>
83 #include <netinet/tcp_fsm.h>
84 #include <netinet/tcp_seq.h>
85 #include <netinet/tcp_timer.h>
86 #include <netinet/tcp_var.h>
87 #include <netinet/tcpip.h>
88 #if TCPDEBUG
89 #include <netinet/tcp_debug.h>
90 #endif
91 #include <sys/kdebug.h>
92
93 #define DBG_FNC_TCP_FAST NETDBG_CODE(DBG_NETTCP, (5 << 8))
94 #define DBG_FNC_TCP_SLOW NETDBG_CODE(DBG_NETTCP, (5 << 8) | 1)
95
96 static int
97 sysctl_msec_to_ticks SYSCTL_HANDLER_ARGS
98 {
99 int error, s, tt;
100
101 tt = *(int *)oidp->oid_arg1;
102 s = tt * 1000 / hz;
103
104 error = sysctl_handle_int(oidp, &s, 0, req);
105 if (error || !req->newptr)
106 return (error);
107
108 tt = s * hz / 1000;
109 if (tt < 1)
110 return (EINVAL);
111
112 *(int *)oidp->oid_arg1 = tt;
113 return (0);
114 }
115
116 int tcp_keepinit;
117 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, CTLTYPE_INT|CTLFLAG_RW,
118 &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", "");
119
120 int tcp_keepidle;
121 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, CTLTYPE_INT|CTLFLAG_RW,
122 &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", "");
123
124 int tcp_keepintvl;
125 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, CTLTYPE_INT|CTLFLAG_RW,
126 &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", "");
127
128 int tcp_delacktime;
129 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DELACKTIME, delacktime,
130 CTLTYPE_INT|CTLFLAG_RW, &tcp_delacktime, 0, sysctl_msec_to_ticks, "I",
131 "Time before a delayed ACK is sent");
132
133 int tcp_msl;
134 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, CTLTYPE_INT|CTLFLAG_RW,
135 &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
136
137 static int always_keepalive = 0;
138 SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_RW,
139 &always_keepalive , 0, "Assume SO_KEEPALIVE on all TCP connections");
140
141 static int tcp_keepcnt = TCPTV_KEEPCNT;
142 /* max idle probes */
143 int tcp_maxpersistidle;
144 /* max idle time in persist */
145 int tcp_maxidle;
146
147 struct inpcbhead time_wait_slots[N_TIME_WAIT_SLOTS];
148 int cur_tw_slot = 0;
149
150 u_long *delack_bitmask;
151
152
153 void add_to_time_wait(tp)
154 struct tcpcb *tp;
155 {
156 int tw_slot;
157
158 LIST_REMOVE(tp->t_inpcb, inp_list);
159
160 if (tp->t_timer[TCPT_2MSL] == 0)
161 tp->t_timer[TCPT_2MSL] = 1;
162
163 tp->t_rcvtime += tp->t_timer[TCPT_2MSL] & (N_TIME_WAIT_SLOTS - 1);
164 tw_slot = (tp->t_timer[TCPT_2MSL] & (N_TIME_WAIT_SLOTS - 1)) + cur_tw_slot;
165 if (tw_slot >= N_TIME_WAIT_SLOTS)
166 tw_slot -= N_TIME_WAIT_SLOTS;
167
168 LIST_INSERT_HEAD(&time_wait_slots[tw_slot], tp->t_inpcb, inp_list);
169 }
170
171
172
173
174
175 /*
176 * Fast timeout routine for processing delayed acks
177 */
178 void
179 tcp_fasttimo()
180 {
181 register struct inpcb *inp;
182 register struct tcpcb *tp;
183
184
185 register u_long i,j;
186 register u_long temp_mask;
187 register u_long elem_base = 0;
188 struct inpcbhead *head;
189 int s = splnet();
190
191 static
192 int delack_checked = 0;
193
194 KERNEL_DEBUG(DBG_FNC_TCP_FAST | DBG_FUNC_START, 0,0,0,0,0);
195
196 if (!tcp_delack_enabled)
197 return;
198
199 for (i=0; i < (tcbinfo.hashsize / 32); i++) {
200 if (delack_bitmask[i]) {
201 temp_mask = 1;
202 for (j=0; j < 32; j++) {
203 if (temp_mask & delack_bitmask[i]) {
204 head = &tcbinfo.hashbase[elem_base + j];
205 for (inp=head->lh_first; inp != 0; inp = inp->inp_hash.le_next) {
206 delack_checked++;
207 if ((tp = (struct tcpcb *)inp->inp_ppcb) && (tp->t_flags & TF_DELACK)) {
208 tp->t_flags &= ~TF_DELACK;
209 tp->t_flags |= TF_ACKNOW;
210 tcpstat.tcps_delack++;
211 (void) tcp_output(tp);
212 }
213 }
214 }
215 temp_mask <<= 1;
216 }
217 delack_bitmask[i] = 0;
218 }
219 elem_base += 32;
220 }
221 KERNEL_DEBUG(DBG_FNC_TCP_FAST | DBG_FUNC_END, delack_checked,tcpstat.tcps_delack,0,0,0);
222 splx(s);
223
224 }
225
226 /*
227 * Tcp protocol timeout routine called every 500 ms.
228 * Updates the timers in all active tcb's and
229 * causes finite state machine actions if timers expire.
230 */
231 void
232 tcp_slowtimo()
233 {
234 register struct inpcb *ip, *ipnxt;
235 register struct tcpcb *tp;
236 register int i;
237 int s;
238 #if TCPDEBUG
239 int ostate;
240 #endif
241 #if KDEBUG
242 static int tws_checked;
243 #endif
244
245 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_START, 0,0,0,0,0);
246 s = splnet();
247
248 tcp_maxidle = tcp_keepcnt * tcp_keepintvl;
249
250 ip = tcb.lh_first;
251 if (ip == NULL) {
252 splx(s);
253 return;
254 }
255 /*
256 * Search through tcb's and update active timers.
257 */
258 for (; ip != NULL; ip = ipnxt) {
259 ipnxt = ip->inp_list.le_next;
260 tp = intotcpcb(ip);
261 if (tp == 0 || tp->t_state == TCPS_LISTEN)
262 continue;
263 /*
264 * Bogus state when port owned by SharedIP with loopback as the
265 * only configured interface: BlueBox does not filters loopback
266 */
267 if (tp->t_state == TCP_NSTATES)
268 continue;
269
270 for (i = 0; i < TCPT_NTIMERS; i++) {
271 if (tp->t_timer[i] && --tp->t_timer[i] == 0) {
272 #if TCPDEBUG
273 ostate = tp->t_state;
274 #endif
275 tp = tcp_timers(tp, i);
276 if (tp == NULL)
277 goto tpgone;
278 #if TCPDEBUG
279 if (tp->t_inpcb->inp_socket->so_options
280 & SO_DEBUG)
281 tcp_trace(TA_USER, ostate, tp,
282 (void *)0,
283 (struct tcphdr *)0,
284 PRU_SLOWTIMO);
285 #endif
286 }
287 }
288 tp->t_rcvtime++;
289 tp->t_starttime++;
290 if (tp->t_rtttime)
291 tp->t_rtttime++;
292 tpgone:
293 ;
294 }
295
296 #if KDEBUG
297 tws_checked = 0;
298 #endif
299 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_NONE, tws_checked,0,0,0,0);
300
301 /*
302 * Process the items in the current time-wait slot
303 */
304
305 for (ip = time_wait_slots[cur_tw_slot].lh_first; ip; ip = ipnxt)
306 {
307 #if KDEBUG
308 tws_checked++;
309 #endif
310 ipnxt = ip->inp_list.le_next;
311 tp = intotcpcb(ip);
312 if (tp->t_timer[TCPT_2MSL] >= N_TIME_WAIT_SLOTS) {
313 tp->t_timer[TCPT_2MSL] -= N_TIME_WAIT_SLOTS;
314 tp->t_rcvtime += N_TIME_WAIT_SLOTS;
315 }
316 else
317 tp->t_timer[TCPT_2MSL] = 0;
318
319 if (tp->t_timer[TCPT_2MSL] == 0)
320 tp = tcp_timers(tp, TCPT_2MSL);
321 }
322
323 if (++cur_tw_slot >= N_TIME_WAIT_SLOTS)
324 cur_tw_slot = 0;
325 tcp_now++; /* for timestamps */
326 splx(s);
327 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END, tws_checked, cur_tw_slot,0,0,0);
328 }
329
330 /*
331 * Cancel all timers for TCP tp.
332 */
333 void
334 tcp_canceltimers(tp)
335 struct tcpcb *tp;
336 {
337 register int i;
338
339 for (i = 0; i < TCPT_NTIMERS; i++)
340 tp->t_timer[i] = 0;
341 }
342
343 int tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] =
344 { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 };
345
346 int tcp_backoff[TCP_MAXRXTSHIFT + 1] =
347 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
348
349 static int tcp_totbackoff = 511; /* sum of tcp_backoff[] */
350
351 /*
352 * TCP timer processing.
353 */
354 struct tcpcb *
355 tcp_timers(tp, timer)
356 register struct tcpcb *tp;
357 int timer;
358 {
359 register int rexmt;
360 struct socket *so_tmp;
361 struct tcptemp *t_template;
362
363 #if INET6
364 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV4) == 0;
365 #endif /* INET6 */
366
367
368 switch (timer) {
369
370 /*
371 * 2 MSL timeout in shutdown went off. If we're closed but
372 * still waiting for peer to close and connection has been idle
373 * too long, or if 2MSL time is up from TIME_WAIT, delete connection
374 * control block. Otherwise, check again in a bit.
375 */
376 case TCPT_2MSL:
377 if (tp->t_state != TCPS_TIME_WAIT &&
378 tp->t_rcvtime <= tcp_maxidle) {
379 tp->t_timer[TCPT_2MSL] = tcp_keepintvl;
380 add_to_time_wait(tp);
381 }
382 else
383 tp = tcp_close(tp);
384 break;
385
386 /*
387 * Retransmission timer went off. Message has not
388 * been acked within retransmit interval. Back off
389 * to a longer retransmit interval and retransmit one segment.
390 */
391 case TCPT_REXMT:
392 if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) {
393 tp->t_rxtshift = TCP_MAXRXTSHIFT;
394 tcpstat.tcps_timeoutdrop++;
395 so_tmp = tp->t_inpcb->inp_socket;
396 tp = tcp_drop(tp, tp->t_softerror ?
397 tp->t_softerror : ETIMEDOUT);
398 postevent(so_tmp, 0, EV_TIMEOUT);
399 break;
400 }
401
402 if (tp->t_rxtshift == 1) {
403 /*
404 * first retransmit; record ssthresh and cwnd so they can
405 * be recovered if this turns out to be a "bad" retransmit.
406 * A retransmit is considered "bad" if an ACK for this
407 * segment is received within RTT/2 interval; the assumption
408 * here is that the ACK was already in flight. See
409 * "On Estimating End-to-End Network Path Properties" by
410 * Allman and Paxson for more details.
411 */
412 tp->snd_cwnd_prev = tp->snd_cwnd;
413 tp->snd_ssthresh_prev = tp->snd_ssthresh;
414 tp->t_badrxtwin = tcp_now + (tp->t_srtt >> (TCP_RTT_SHIFT + 1));
415 }
416 tcpstat.tcps_rexmttimeo++;
417 if (tp->t_state == TCPS_SYN_SENT)
418 rexmt = TCP_REXMTVAL(tp) * tcp_syn_backoff[tp->t_rxtshift];
419 else
420 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
421 TCPT_RANGESET(tp->t_rxtcur, rexmt,
422 tp->t_rttmin, TCPTV_REXMTMAX);
423 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
424
425 /*
426 * Disable rfc1323 and rfc1644 if we havn't got any response to
427 * our third SYN to work-around some broken terminal servers
428 * (most of which have hopefully been retired) that have bad VJ
429 * header compression code which trashes TCP segments containing
430 * unknown-to-them TCP options.
431 */
432 if ((tp->t_state == TCPS_SYN_SENT) && (tp->t_rxtshift == 3))
433 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_REQ_CC);
434 /*
435 * If losing, let the lower level know and try for
436 * a better route. Also, if we backed off this far,
437 * our srtt estimate is probably bogus. Clobber it
438 * so we'll take the next rtt measurement as our srtt;
439 * move the current srtt into rttvar to keep the current
440 * retransmit times until then.
441 */
442 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
443 #if INET6
444 if (isipv6)
445 in6_losing(tp->t_inpcb);
446 else
447 #endif /* INET6 */
448 in_losing(tp->t_inpcb);
449 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
450 tp->t_srtt = 0;
451 }
452 tp->snd_nxt = tp->snd_una;
453 /*
454 * Note: We overload snd_recover to function also as the
455 * snd_last variable described in RFC 2582
456 */
457 tp->snd_recover = tp->snd_max;
458 /*
459 * Force a segment to be sent.
460 */
461 tp->t_flags |= TF_ACKNOW;
462 /*
463 * If timing a segment in this window, stop the timer.
464 */
465 tp->t_rtttime = 0;
466 /*
467 * Close the congestion window down to one segment
468 * (we'll open it by one segment for each ack we get).
469 * Since we probably have a window's worth of unacked
470 * data accumulated, this "slow start" keeps us from
471 * dumping all that data as back-to-back packets (which
472 * might overwhelm an intermediate gateway).
473 *
474 * There are two phases to the opening: Initially we
475 * open by one mss on each ack. This makes the window
476 * size increase exponentially with time. If the
477 * window is larger than the path can handle, this
478 * exponential growth results in dropped packet(s)
479 * almost immediately. To get more time between
480 * drops but still "push" the network to take advantage
481 * of improving conditions, we switch from exponential
482 * to linear window opening at some threshhold size.
483 * For a threshhold, we use half the current window
484 * size, truncated to a multiple of the mss.
485 *
486 * (the minimum cwnd that will give us exponential
487 * growth is 2 mss. We don't allow the threshhold
488 * to go below this.)
489 */
490 {
491 u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
492 if (win < 2)
493 win = 2;
494 tp->snd_cwnd = tp->t_maxseg;
495 tp->snd_ssthresh = win * tp->t_maxseg;
496 tp->t_dupacks = 0;
497 }
498 (void) tcp_output(tp);
499 break;
500
501 /*
502 * Persistance timer into zero window.
503 * Force a byte to be output, if possible.
504 */
505 case TCPT_PERSIST:
506 tcpstat.tcps_persisttimeo++;
507 /*
508 * Hack: if the peer is dead/unreachable, we do not
509 * time out if the window is closed. After a full
510 * backoff, drop the connection if the idle time
511 * (no responses to probes) reaches the maximum
512 * backoff that we would use if retransmitting.
513 */
514 if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
515 (tp->t_rcvtime >= tcp_maxpersistidle ||
516 tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) {
517 tcpstat.tcps_persistdrop++;
518 so_tmp = tp->t_inpcb->inp_socket;
519 tp = tcp_drop(tp, ETIMEDOUT);
520 postevent(so_tmp, 0, EV_TIMEOUT);
521 break;
522 }
523 tcp_setpersist(tp);
524 tp->t_force = 1;
525 (void) tcp_output(tp);
526 tp->t_force = 0;
527 break;
528
529 /*
530 * Keep-alive timer went off; send something
531 * or drop connection if idle for too long.
532 */
533 case TCPT_KEEP:
534 tcpstat.tcps_keeptimeo++;
535 if (tp->t_state < TCPS_ESTABLISHED)
536 goto dropit;
537 if ((always_keepalive ||
538 tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) &&
539 tp->t_state <= TCPS_CLOSING) {
540 if (tp->t_rcvtime >= tcp_keepidle + tcp_maxidle)
541 goto dropit;
542 /*
543 * Send a packet designed to force a response
544 * if the peer is up and reachable:
545 * either an ACK if the connection is still alive,
546 * or an RST if the peer has closed the connection
547 * due to timeout or reboot.
548 * Using sequence number tp->snd_una-1
549 * causes the transmitted zero-length segment
550 * to lie outside the receive window;
551 * by the protocol spec, this requires the
552 * correspondent TCP to respond.
553 */
554 tcpstat.tcps_keepprobe++;
555 t_template = tcp_maketemplate(tp);
556 if (t_template) {
557 tcp_respond(tp, t_template->tt_ipgen,
558 &t_template->tt_t, (struct mbuf *)NULL,
559 tp->rcv_nxt, tp->snd_una - 1, 0);
560 (void) m_free(dtom(t_template));
561 }
562 tp->t_timer[TCPT_KEEP] = tcp_keepintvl;
563 } else
564 tp->t_timer[TCPT_KEEP] = tcp_keepidle;
565 break;
566
567 #if TCPDEBUG
568 if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)
569 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
570 PRU_SLOWTIMO);
571 #endif
572 dropit:
573 tcpstat.tcps_keepdrops++;
574 so_tmp = tp->t_inpcb->inp_socket;
575 tp = tcp_drop(tp, ETIMEDOUT);
576 postevent(so_tmp, 0, EV_TIMEOUT);
577 break;
578 }
579 return (tp);
580 }