]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/tcp_timer.c
b7840d8827639403e4fdf6eb99dd293d43da5734
[apple/xnu.git] / bsd / netinet / tcp_timer.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
27 * The Regents of the University of California. All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 * 1. Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * 2. Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in the
36 * documentation and/or other materials provided with the distribution.
37 * 3. All advertising materials mentioning features or use of this software
38 * must display the following acknowledgement:
39 * This product includes software developed by the University of
40 * California, Berkeley and its contributors.
41 * 4. Neither the name of the University nor the names of its contributors
42 * may be used to endorse or promote products derived from this software
43 * without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 *
57 * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95
58 * $FreeBSD: src/sys/netinet/tcp_timer.c,v 1.34.2.11 2001/08/22 00:59:12 silby Exp $
59 */
60
61
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/mbuf.h>
66 #include <sys/sysctl.h>
67 #include <sys/socket.h>
68 #include <sys/socketvar.h>
69 #include <sys/protosw.h>
70
71 #include <kern/cpu_number.h> /* before tcp_seq.h, for tcp_random18() */
72
73 #include <net/route.h>
74
75 #include <netinet/in.h>
76 #include <netinet/in_systm.h>
77 #include <netinet/in_pcb.h>
78 #if INET6
79 #include <netinet6/in6_pcb.h>
80 #endif
81 #include <netinet/ip_var.h>
82 #include <netinet/tcp.h>
83 #include <netinet/tcp_fsm.h>
84 #include <netinet/tcp_seq.h>
85 #include <netinet/tcp_timer.h>
86 #include <netinet/tcp_var.h>
87 #include <netinet/tcpip.h>
88 #if TCPDEBUG
89 #include <netinet/tcp_debug.h>
90 #endif
91 #include <sys/kdebug.h>
92
93 #define DBG_FNC_TCP_FAST NETDBG_CODE(DBG_NETTCP, (5 << 8))
94 #define DBG_FNC_TCP_SLOW NETDBG_CODE(DBG_NETTCP, (5 << 8) | 1)
95
96 /*
97 * NOTE - WARNING
98 *
99 *
100 *
101 *
102 */
103 static int
104 sysctl_msec_to_ticks SYSCTL_HANDLER_ARGS
105 {
106 int error, s, tt;
107
108 tt = *(int *)oidp->oid_arg1;
109 s = tt * 1000 / hz;
110
111 error = sysctl_handle_int(oidp, &s, 0, req);
112 if (error || !req->newptr)
113 return (error);
114
115 tt = s * hz / 1000;
116 if (tt < 1)
117 return (EINVAL);
118
119 *(int *)oidp->oid_arg1 = tt;
120 return (0);
121 }
122
123 int tcp_keepinit;
124 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, CTLTYPE_INT|CTLFLAG_RW,
125 &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", "");
126
127 int tcp_keepidle;
128 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, CTLTYPE_INT|CTLFLAG_RW,
129 &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", "");
130
131 int tcp_keepintvl;
132 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, CTLTYPE_INT|CTLFLAG_RW,
133 &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", "");
134
135 int tcp_delacktime;
136 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DELACKTIME, delacktime,
137 CTLTYPE_INT|CTLFLAG_RW, &tcp_delacktime, 0, sysctl_msec_to_ticks, "I",
138 "Time before a delayed ACK is sent");
139
140 int tcp_msl;
141 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, CTLTYPE_INT|CTLFLAG_RW,
142 &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
143
144 static int always_keepalive = 0;
145 SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_RW,
146 &always_keepalive , 0, "Assume SO_KEEPALIVE on all TCP connections");
147
148 static int tcp_keepcnt = TCPTV_KEEPCNT;
149 /* max idle probes */
150 int tcp_maxpersistidle;
151 /* max idle time in persist */
152 int tcp_maxidle;
153
154 struct inpcbhead time_wait_slots[N_TIME_WAIT_SLOTS];
155 int cur_tw_slot = 0;
156
157 u_long *delack_bitmask;
158
159
160 void add_to_time_wait(tp)
161 struct tcpcb *tp;
162 {
163 int tw_slot;
164
165 LIST_REMOVE(tp->t_inpcb, inp_list);
166
167 if (tp->t_timer[TCPT_2MSL] == 0)
168 tp->t_timer[TCPT_2MSL] = 1;
169
170 tp->t_rcvtime += tp->t_timer[TCPT_2MSL] & (N_TIME_WAIT_SLOTS - 1);
171 tw_slot = (tp->t_timer[TCPT_2MSL] & (N_TIME_WAIT_SLOTS - 1)) + cur_tw_slot;
172 if (tw_slot >= N_TIME_WAIT_SLOTS)
173 tw_slot -= N_TIME_WAIT_SLOTS;
174
175 LIST_INSERT_HEAD(&time_wait_slots[tw_slot], tp->t_inpcb, inp_list);
176 }
177
178
179
180
181
182 /*
183 * Fast timeout routine for processing delayed acks
184 */
185 void
186 tcp_fasttimo()
187 {
188 register struct inpcb *inp;
189 register struct tcpcb *tp;
190
191
192 register u_long i,j;
193 register u_long temp_mask;
194 register u_long elem_base = 0;
195 struct inpcbhead *head;
196 int s = splnet();
197
198 static
199 int delack_checked = 0;
200
201 KERNEL_DEBUG(DBG_FNC_TCP_FAST | DBG_FUNC_START, 0,0,0,0,0);
202
203 if (!tcp_delack_enabled)
204 return;
205
206 for (i=0; i < (tcbinfo.hashsize / 32); i++) {
207 if (delack_bitmask[i]) {
208 temp_mask = 1;
209 for (j=0; j < 32; j++) {
210 if (temp_mask & delack_bitmask[i]) {
211 head = &tcbinfo.hashbase[elem_base + j];
212 for (inp=head->lh_first; inp != 0; inp = inp->inp_hash.le_next) {
213 delack_checked++;
214 if ((tp = (struct tcpcb *)inp->inp_ppcb) && (tp->t_flags & TF_DELACK)) {
215 tp->t_flags &= ~TF_DELACK;
216 tp->t_flags |= TF_ACKNOW;
217 tcpstat.tcps_delack++;
218 (void) tcp_output(tp);
219 }
220 }
221 }
222 temp_mask <<= 1;
223 }
224 delack_bitmask[i] = 0;
225 }
226 elem_base += 32;
227 }
228 KERNEL_DEBUG(DBG_FNC_TCP_FAST | DBG_FUNC_END, delack_checked,tcpstat.tcps_delack,0,0,0);
229 splx(s);
230
231 }
232
233 /*
234 * Tcp protocol timeout routine called every 500 ms.
235 * Updates the timers in all active tcb's and
236 * causes finite state machine actions if timers expire.
237 */
238 void
239 tcp_slowtimo()
240 {
241 register struct inpcb *ip, *ipnxt;
242 register struct tcpcb *tp;
243 register int i;
244 int s;
245 #if TCPDEBUG
246 int ostate;
247 #endif
248 #if KDEBUG
249 static int tws_checked;
250 #endif
251
252 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_START, 0,0,0,0,0);
253 s = splnet();
254
255 tcp_maxidle = tcp_keepcnt * tcp_keepintvl;
256
257 ip = tcb.lh_first;
258 if (ip == NULL) {
259 splx(s);
260 return;
261 }
262 /*
263 * Search through tcb's and update active timers.
264 */
265 for (; ip != NULL; ip = ipnxt) {
266 ipnxt = ip->inp_list.le_next;
267 tp = intotcpcb(ip);
268 if (tp == 0 || tp->t_state == TCPS_LISTEN)
269 continue;
270 /*
271 * Bogus state when port owned by SharedIP with loopback as the
272 * only configured interface: BlueBox does not filters loopback
273 */
274 if (tp->t_state == TCP_NSTATES)
275 continue;
276
277 for (i = 0; i < TCPT_NTIMERS; i++) {
278 if (tp->t_timer[i] && --tp->t_timer[i] == 0) {
279 #if TCPDEBUG
280 ostate = tp->t_state;
281 #endif
282 tp = tcp_timers(tp, i);
283 if (tp == NULL)
284 goto tpgone;
285 #if TCPDEBUG
286 if (tp->t_inpcb->inp_socket->so_options
287 & SO_DEBUG)
288 tcp_trace(TA_USER, ostate, tp,
289 (void *)0,
290 (struct tcphdr *)0,
291 PRU_SLOWTIMO);
292 #endif
293 }
294 }
295 tp->t_rcvtime++;
296 tp->t_starttime++;
297 if (tp->t_rtttime)
298 tp->t_rtttime++;
299 tpgone:
300 ;
301 }
302
303 #if KDEBUG
304 tws_checked = 0;
305 #endif
306 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_NONE, tws_checked,0,0,0,0);
307
308 /*
309 * Process the items in the current time-wait slot
310 */
311
312 for (ip = time_wait_slots[cur_tw_slot].lh_first; ip; ip = ipnxt)
313 {
314 #if KDEBUG
315 tws_checked++;
316 #endif
317 ipnxt = ip->inp_list.le_next;
318 tp = intotcpcb(ip);
319 if (tp == NULL) { /* tp already closed, remove from list */
320 LIST_REMOVE(ip, inp_list);
321 continue;
322 }
323 if (tp->t_timer[TCPT_2MSL] >= N_TIME_WAIT_SLOTS) {
324 tp->t_timer[TCPT_2MSL] -= N_TIME_WAIT_SLOTS;
325 tp->t_rcvtime += N_TIME_WAIT_SLOTS;
326 }
327 else
328 tp->t_timer[TCPT_2MSL] = 0;
329
330 if (tp->t_timer[TCPT_2MSL] == 0)
331 tp = tcp_timers(tp, TCPT_2MSL);
332 }
333
334 if (++cur_tw_slot >= N_TIME_WAIT_SLOTS)
335 cur_tw_slot = 0;
336 tcp_now++; /* for timestamps */
337 splx(s);
338 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END, tws_checked, cur_tw_slot,0,0,0);
339 }
340
341 /*
342 * Cancel all timers for TCP tp.
343 */
344 void
345 tcp_canceltimers(tp)
346 struct tcpcb *tp;
347 {
348 register int i;
349
350 for (i = 0; i < TCPT_NTIMERS; i++)
351 tp->t_timer[i] = 0;
352 }
353
354 int tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] =
355 { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 };
356
357 int tcp_backoff[TCP_MAXRXTSHIFT + 1] =
358 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
359
360 static int tcp_totbackoff = 511; /* sum of tcp_backoff[] */
361
362 /*
363 * TCP timer processing.
364 */
365 struct tcpcb *
366 tcp_timers(tp, timer)
367 register struct tcpcb *tp;
368 int timer;
369 {
370 register int rexmt;
371 struct socket *so_tmp;
372 struct tcptemp *t_template;
373
374 #if TCPDEBUG
375 int ostate;
376 #endif
377
378 #if INET6
379 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV4) == 0;
380 #endif /* INET6 */
381
382
383 switch (timer) {
384
385 /*
386 * 2 MSL timeout in shutdown went off. If we're closed but
387 * still waiting for peer to close and connection has been idle
388 * too long, or if 2MSL time is up from TIME_WAIT, delete connection
389 * control block. Otherwise, check again in a bit.
390 */
391 case TCPT_2MSL:
392 if (tp->t_state != TCPS_TIME_WAIT &&
393 tp->t_rcvtime <= tcp_maxidle) {
394 tp->t_timer[TCPT_2MSL] = tcp_keepintvl;
395 add_to_time_wait(tp);
396 }
397 else
398 tp = tcp_close(tp);
399 break;
400
401 /*
402 * Retransmission timer went off. Message has not
403 * been acked within retransmit interval. Back off
404 * to a longer retransmit interval and retransmit one segment.
405 */
406 case TCPT_REXMT:
407 if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) {
408 tp->t_rxtshift = TCP_MAXRXTSHIFT;
409 tcpstat.tcps_timeoutdrop++;
410 so_tmp = tp->t_inpcb->inp_socket;
411 tp = tcp_drop(tp, tp->t_softerror ?
412 tp->t_softerror : ETIMEDOUT);
413 postevent(so_tmp, 0, EV_TIMEOUT);
414 break;
415 }
416
417 if (tp->t_rxtshift == 1) {
418 /*
419 * first retransmit; record ssthresh and cwnd so they can
420 * be recovered if this turns out to be a "bad" retransmit.
421 * A retransmit is considered "bad" if an ACK for this
422 * segment is received within RTT/2 interval; the assumption
423 * here is that the ACK was already in flight. See
424 * "On Estimating End-to-End Network Path Properties" by
425 * Allman and Paxson for more details.
426 */
427 tp->snd_cwnd_prev = tp->snd_cwnd;
428 tp->snd_ssthresh_prev = tp->snd_ssthresh;
429 tp->t_badrxtwin = tcp_now + (tp->t_srtt >> (TCP_RTT_SHIFT + 1));
430 }
431 tcpstat.tcps_rexmttimeo++;
432 if (tp->t_state == TCPS_SYN_SENT)
433 rexmt = TCP_REXMTVAL(tp) * tcp_syn_backoff[tp->t_rxtshift];
434 else
435 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
436 TCPT_RANGESET(tp->t_rxtcur, rexmt,
437 tp->t_rttmin, TCPTV_REXMTMAX);
438 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
439
440 /*
441 * Disable rfc1323 and rfc1644 if we havn't got any response to
442 * our third SYN to work-around some broken terminal servers
443 * (most of which have hopefully been retired) that have bad VJ
444 * header compression code which trashes TCP segments containing
445 * unknown-to-them TCP options.
446 */
447 if ((tp->t_state == TCPS_SYN_SENT) && (tp->t_rxtshift == 3))
448 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_REQ_CC);
449 /*
450 * If losing, let the lower level know and try for
451 * a better route. Also, if we backed off this far,
452 * our srtt estimate is probably bogus. Clobber it
453 * so we'll take the next rtt measurement as our srtt;
454 * move the current srtt into rttvar to keep the current
455 * retransmit times until then.
456 */
457 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
458 #if INET6
459 if (isipv6)
460 in6_losing(tp->t_inpcb);
461 else
462 #endif /* INET6 */
463 in_losing(tp->t_inpcb);
464 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
465 tp->t_srtt = 0;
466 }
467 tp->snd_nxt = tp->snd_una;
468 /*
469 * Note: We overload snd_recover to function also as the
470 * snd_last variable described in RFC 2582
471 */
472 tp->snd_recover = tp->snd_max;
473 /*
474 * Force a segment to be sent.
475 */
476 tp->t_flags |= TF_ACKNOW;
477 /*
478 * If timing a segment in this window, stop the timer.
479 */
480 tp->t_rtttime = 0;
481 /*
482 * Close the congestion window down to one segment
483 * (we'll open it by one segment for each ack we get).
484 * Since we probably have a window's worth of unacked
485 * data accumulated, this "slow start" keeps us from
486 * dumping all that data as back-to-back packets (which
487 * might overwhelm an intermediate gateway).
488 *
489 * There are two phases to the opening: Initially we
490 * open by one mss on each ack. This makes the window
491 * size increase exponentially with time. If the
492 * window is larger than the path can handle, this
493 * exponential growth results in dropped packet(s)
494 * almost immediately. To get more time between
495 * drops but still "push" the network to take advantage
496 * of improving conditions, we switch from exponential
497 * to linear window opening at some threshhold size.
498 * For a threshhold, we use half the current window
499 * size, truncated to a multiple of the mss.
500 *
501 * (the minimum cwnd that will give us exponential
502 * growth is 2 mss. We don't allow the threshhold
503 * to go below this.)
504 */
505 {
506 u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
507 if (win < 2)
508 win = 2;
509 tp->snd_cwnd = tp->t_maxseg;
510 tp->snd_ssthresh = win * tp->t_maxseg;
511 tp->t_dupacks = 0;
512 }
513 (void) tcp_output(tp);
514 break;
515
516 /*
517 * Persistance timer into zero window.
518 * Force a byte to be output, if possible.
519 */
520 case TCPT_PERSIST:
521 tcpstat.tcps_persisttimeo++;
522 /*
523 * Hack: if the peer is dead/unreachable, we do not
524 * time out if the window is closed. After a full
525 * backoff, drop the connection if the idle time
526 * (no responses to probes) reaches the maximum
527 * backoff that we would use if retransmitting.
528 */
529 if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
530 (tp->t_rcvtime >= tcp_maxpersistidle ||
531 tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) {
532 tcpstat.tcps_persistdrop++;
533 so_tmp = tp->t_inpcb->inp_socket;
534 tp = tcp_drop(tp, ETIMEDOUT);
535 postevent(so_tmp, 0, EV_TIMEOUT);
536 break;
537 }
538 tcp_setpersist(tp);
539 tp->t_force = 1;
540 (void) tcp_output(tp);
541 tp->t_force = 0;
542 break;
543
544 /*
545 * Keep-alive timer went off; send something
546 * or drop connection if idle for too long.
547 */
548 case TCPT_KEEP:
549 tcpstat.tcps_keeptimeo++;
550 if (tp->t_state < TCPS_ESTABLISHED)
551 goto dropit;
552 if ((always_keepalive ||
553 tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) &&
554 tp->t_state <= TCPS_CLOSING) {
555 if (tp->t_rcvtime >= TCP_KEEPIDLE(tp) + tcp_maxidle)
556 goto dropit;
557 /*
558 * Send a packet designed to force a response
559 * if the peer is up and reachable:
560 * either an ACK if the connection is still alive,
561 * or an RST if the peer has closed the connection
562 * due to timeout or reboot.
563 * Using sequence number tp->snd_una-1
564 * causes the transmitted zero-length segment
565 * to lie outside the receive window;
566 * by the protocol spec, this requires the
567 * correspondent TCP to respond.
568 */
569 tcpstat.tcps_keepprobe++;
570 t_template = tcp_maketemplate(tp);
571 if (t_template) {
572 tcp_respond(tp, t_template->tt_ipgen,
573 &t_template->tt_t, (struct mbuf *)NULL,
574 tp->rcv_nxt, tp->snd_una - 1, 0);
575 (void) m_free(dtom(t_template));
576 }
577 tp->t_timer[TCPT_KEEP] = tcp_keepintvl;
578 } else
579 tp->t_timer[TCPT_KEEP] = TCP_KEEPIDLE(tp);
580 break;
581
582 #if TCPDEBUG
583 if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)
584 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
585 PRU_SLOWTIMO);
586 #endif
587 dropit:
588 tcpstat.tcps_keepdrops++;
589 so_tmp = tp->t_inpcb->inp_socket;
590 tp = tcp_drop(tp, ETIMEDOUT);
591 postevent(so_tmp, 0, EV_TIMEOUT);
592 break;
593 }
594 return (tp);
595 }