]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/tcp_timer.c
529e14bb8c4f8ee58b4e53705fb278b76ae3b6f1
[apple/xnu.git] / bsd / netinet / tcp_timer.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
24 * The Regents of the University of California. All rights reserved.
25 *
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
28 * are met:
29 * 1. Redistributions of source code must retain the above copyright
30 * notice, this list of conditions and the following disclaimer.
31 * 2. Redistributions in binary form must reproduce the above copyright
32 * notice, this list of conditions and the following disclaimer in the
33 * documentation and/or other materials provided with the distribution.
34 * 3. All advertising materials mentioning features or use of this software
35 * must display the following acknowledgement:
36 * This product includes software developed by the University of
37 * California, Berkeley and its contributors.
38 * 4. Neither the name of the University nor the names of its contributors
39 * may be used to endorse or promote products derived from this software
40 * without specific prior written permission.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 *
54 * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95
55 */
56
57 #if ISFB31
58 #include "opt_compat.h"
59 #include "opt_tcpdebug.h"
60 #endif
61
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/sysctl.h>
66 #include <sys/socket.h>
67 #include <sys/socketvar.h>
68 #include <sys/protosw.h>
69
70 #include <kern/cpu_number.h> /* before tcp_seq.h, for tcp_random18() */
71
72 #include <net/route.h>
73
74 #include <netinet/in.h>
75 #include <netinet/in_systm.h>
76 #include <netinet/ip.h>
77 #include <netinet/ip6.h>
78 #include <netinet/in_pcb.h>
79 #include <netinet/ip_var.h>
80 #include <netinet/tcp.h>
81 #include <netinet/tcp_fsm.h>
82 #include <netinet/tcp_seq.h>
83 #include <netinet/tcp_timer.h>
84 #include <netinet/tcp_var.h>
85 #include <netinet/tcpip.h>
86 #if TCPDEBUG
87 #include <netinet/tcp_debug.h>
88 #endif
89 #include <sys/kdebug.h>
90
91 #define DBG_FNC_TCP_FAST NETDBG_CODE(DBG_NETTCP, (5 << 8))
92 #define DBG_FNC_TCP_SLOW NETDBG_CODE(DBG_NETTCP, (5 << 8) | 1)
93
94
95 int tcp_keepinit = TCPTV_KEEP_INIT;
96 SYSCTL_INT(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit,
97 CTLFLAG_RW, &tcp_keepinit , 0, "");
98
99 int tcp_keepidle = TCPTV_KEEP_IDLE;
100 SYSCTL_INT(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle,
101 CTLFLAG_RW, &tcp_keepidle , 0, "");
102
103 static int tcp_keepintvl = TCPTV_KEEPINTVL;
104 SYSCTL_INT(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl,
105 CTLFLAG_RW, &tcp_keepintvl , 0, "");
106
107 static int always_keepalive = 0;
108 SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive,
109 CTLFLAG_RW, &always_keepalive , 0, "");
110
111 static int tcp_keepcnt = TCPTV_KEEPCNT;
112 /* max idle probes */
113 static int tcp_maxpersistidle = TCPTV_KEEP_IDLE;
114 /* max idle time in persist */
115 int tcp_maxidle;
116
117
118
119 struct inpcbhead time_wait_slots[N_TIME_WAIT_SLOTS];
120 int cur_tw_slot = 0;
121
122 u_long *delack_bitmask;
123 u_long current_active_connections = 0;
124 u_long last_active_conn_count = 0;
125
126
127 void add_to_time_wait(tp)
128 struct tcpcb *tp;
129 {
130 int tw_slot;
131
132 LIST_REMOVE(tp->t_inpcb, inp_list);
133
134 if (tp->t_timer[TCPT_2MSL] == 0)
135 tp->t_timer[TCPT_2MSL] = 1;
136
137 tp->t_idle += tp->t_timer[TCPT_2MSL] & (N_TIME_WAIT_SLOTS - 1);
138 tw_slot = (tp->t_timer[TCPT_2MSL] & (N_TIME_WAIT_SLOTS - 1)) + cur_tw_slot;
139 if (tw_slot >= N_TIME_WAIT_SLOTS)
140 tw_slot -= N_TIME_WAIT_SLOTS;
141
142 LIST_INSERT_HEAD(&time_wait_slots[tw_slot], tp->t_inpcb, inp_list);
143 }
144
145
146
147
148
149 /*
150 * Fast timeout routine for processing delayed acks
151 */
152 void
153 tcp_fasttimo()
154 {
155 register struct inpcb *inp;
156 register struct tcpcb *tp;
157
158
159 register u_long i,j;
160 register u_long temp_mask;
161 register u_long elem_base = 0;
162 struct inpcbhead *head;
163 int s = splnet();
164
165 static
166 int delack_checked = 0;
167
168 KERNEL_DEBUG(DBG_FNC_TCP_FAST | DBG_FUNC_START, 0,0,0,0,0);
169
170 if (!tcp_delack_enabled)
171 return;
172
173 if ((current_active_connections > DELACK_BITMASK_THRESH) &&
174 (last_active_conn_count > DELACK_BITMASK_THRESH)) {
175 for (i=0; i < (tcbinfo.hashsize / 32); i++) {
176 if (delack_bitmask[i]) {
177 temp_mask = 1;
178 for (j=0; j < 32; j++) {
179 if (temp_mask & delack_bitmask[i]) {
180 head = &tcbinfo.hashbase[elem_base + j];
181 for (inp=head->lh_first; inp != 0; inp = inp->inp_hash.le_next) {
182 delack_checked++;
183 if ((tp = (struct tcpcb *)inp->inp_ppcb) && (tp->t_flags & TF_DELACK)) {
184 tp->t_flags &= ~TF_DELACK;
185 tp->t_flags |= TF_ACKNOW;
186 tcpstat.tcps_delack++;
187 (void) tcp_output(tp);
188 }
189 }
190 }
191 temp_mask <<= 1;
192 }
193 delack_bitmask[i] = 0;
194 }
195 elem_base += 32;
196 }
197 }
198 else
199 {
200 for (inp = tcb.lh_first; inp != NULL; inp = inp->inp_list.le_next) {
201 if ((tp = (struct tcpcb *)inp->inp_ppcb) &&
202 (tp->t_flags & TF_DELACK)) {
203 tp->t_flags &= ~TF_DELACK;
204 tp->t_flags |= TF_ACKNOW;
205 tcpstat.tcps_delack++;
206 (void) tcp_output(tp);
207 }
208 }
209 }
210
211 last_active_conn_count = current_active_connections;
212 KERNEL_DEBUG(DBG_FNC_TCP_FAST | DBG_FUNC_END, delack_checked,tcpstat.tcps_delack,0,0,0);
213 splx(s);
214
215 }
216
217 /*
218 * Tcp protocol timeout routine called every 500 ms.
219 * Updates the timers in all active tcb's and
220 * causes finite state machine actions if timers expire.
221 */
222 void
223 tcp_slowtimo()
224 {
225 register struct inpcb *ip, *ipnxt;
226 register struct tcpcb *tp;
227 register int i;
228 int s;
229 #if TCPDEBUG
230 int ostate;
231 #endif
232 #if KDEBUG
233 static int tws_checked;
234 #endif
235
236 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_START, 0,0,0,0,0);
237 s = splnet();
238
239 tcp_maxidle = tcp_keepcnt * tcp_keepintvl;
240
241 ip = tcb.lh_first;
242 if (ip == NULL) {
243 splx(s);
244 return;
245 }
246 /*
247 * Search through tcb's and update active timers.
248 */
249 for (; ip != NULL; ip = ipnxt) {
250 ipnxt = ip->inp_list.le_next;
251 tp = intotcpcb(ip);
252 if (tp == 0 || tp->t_state == TCPS_LISTEN)
253 continue;
254 for (i = 0; i < TCPT_NTIMERS; i++) {
255 if (tp->t_timer[i] && --tp->t_timer[i] == 0) {
256 #if TCPDEBUG
257 ostate = tp->t_state;
258 #endif
259 tp = tcp_timers(tp, i);
260 if (tp == NULL)
261 goto tpgone;
262 #if TCPDEBUG
263 if (tp->t_inpcb->inp_socket->so_options
264 & SO_DEBUG)
265 tcp_trace(TA_USER, ostate, tp,
266 (void *)0,
267 (struct tcphdr *)0,
268 PRU_SLOWTIMO);
269 #endif
270 }
271 }
272 tp->t_idle++;
273 tp->t_duration++;
274 if (tp->t_rtt)
275 tp->t_rtt++;
276 tpgone:
277 ;
278 }
279
280 #if KDEBUG
281 tws_checked = 0;
282 #endif
283 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_NONE, tws_checked,0,0,0,0);
284
285 /*
286 * Process the items in the current time-wait slot
287 */
288
289 for (ip = time_wait_slots[cur_tw_slot].lh_first; ip; ip = ipnxt)
290 {
291 #if KDEBUG
292 tws_checked++;
293 #endif
294 ipnxt = ip->inp_list.le_next;
295 tp = intotcpcb(ip);
296 if (tp->t_timer[TCPT_2MSL] >= N_TIME_WAIT_SLOTS) {
297 tp->t_timer[TCPT_2MSL] -= N_TIME_WAIT_SLOTS;
298 tp->t_idle += N_TIME_WAIT_SLOTS;
299 }
300 else
301 tp->t_timer[TCPT_2MSL] = 0;
302
303 if (tp->t_timer[TCPT_2MSL] == 0)
304 tp = tcp_timers(tp, TCPT_2MSL);
305 }
306
307 if (++cur_tw_slot >= N_TIME_WAIT_SLOTS)
308 cur_tw_slot = 0;
309
310 #if TCP_COMPAT_42
311 tcp_iss += TCP_ISSINCR/PR_SLOWHZ; /* increment iss */
312 if ((int)tcp_iss < 0)
313 tcp_iss = TCP_ISSINCR; /* XXX */
314 #endif
315 tcp_now++; /* for timestamps */
316 splx(s);
317 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END, tws_checked, cur_tw_slot,0,0,0);
318 }
319
320 /*
321 * Cancel all timers for TCP tp.
322 */
323 void
324 tcp_canceltimers(tp)
325 struct tcpcb *tp;
326 {
327 register int i;
328
329 for (i = 0; i < TCPT_NTIMERS; i++)
330 tp->t_timer[i] = 0;
331 }
332
333 int tcp_backoff[TCP_MAXRXTSHIFT + 1] =
334 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
335
336 static int tcp_totbackoff = 511; /* sum of tcp_backoff[] */
337
338 /*
339 * TCP timer processing.
340 */
341 struct tcpcb *
342 tcp_timers(tp, timer)
343 register struct tcpcb *tp;
344 int timer;
345 {
346 register int rexmt;
347 struct socket *so_tmp;
348 #if INET6
349 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV4) == 0;
350 #endif /* INET6 */
351
352 switch (timer) {
353
354 /*
355 * 2 MSL timeout in shutdown went off. If we're closed but
356 * still waiting for peer to close and connection has been idle
357 * too long, or if 2MSL time is up from TIME_WAIT, delete connection
358 * control block. Otherwise, check again in a bit.
359 */
360 case TCPT_2MSL:
361 if (tp->t_state != TCPS_TIME_WAIT &&
362 tp->t_idle <= tcp_maxidle) {
363 tp->t_timer[TCPT_2MSL] = tcp_keepintvl;
364 add_to_time_wait(tp);
365 }
366 else
367 tp = tcp_close(tp);
368 break;
369
370 /*
371 * Retransmission timer went off. Message has not
372 * been acked within retransmit interval. Back off
373 * to a longer retransmit interval and retransmit one segment.
374 */
375 case TCPT_REXMT:
376 if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) {
377 tp->t_rxtshift = TCP_MAXRXTSHIFT;
378 tcpstat.tcps_timeoutdrop++;
379 so_tmp = tp->t_inpcb->inp_socket;
380 tp = tcp_drop(tp, tp->t_softerror ?
381 tp->t_softerror : ETIMEDOUT);
382 postevent(so_tmp, 0, EV_TIMEOUT);
383 break;
384 }
385 tcpstat.tcps_rexmttimeo++;
386 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
387 TCPT_RANGESET(tp->t_rxtcur, rexmt,
388 tp->t_rttmin, TCPTV_REXMTMAX);
389 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
390 /*
391 * If losing, let the lower level know and try for
392 * a better route. Also, if we backed off this far,
393 * our srtt estimate is probably bogus. Clobber it
394 * so we'll take the next rtt measurement as our srtt;
395 * move the current srtt into rttvar to keep the current
396 * retransmit times until then.
397 */
398 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
399 #if INET6
400 if (isipv6)
401 in6_losing(tp->t_inpcb);
402 else
403 #endif /* INET6 */
404 in_losing(tp->t_inpcb);
405 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
406 tp->t_srtt = 0;
407 }
408 tp->snd_nxt = tp->snd_una;
409 /*
410 * Force a segment to be sent.
411 */
412 tp->t_flags |= TF_ACKNOW;
413 /*
414 * If timing a segment in this window, stop the timer.
415 */
416 tp->t_rtt = 0;
417 /*
418 * Close the congestion window down to one segment
419 * (we'll open it by one segment for each ack we get).
420 * Since we probably have a window's worth of unacked
421 * data accumulated, this "slow start" keeps us from
422 * dumping all that data as back-to-back packets (which
423 * might overwhelm an intermediate gateway).
424 *
425 * There are two phases to the opening: Initially we
426 * open by one mss on each ack. This makes the window
427 * size increase exponentially with time. If the
428 * window is larger than the path can handle, this
429 * exponential growth results in dropped packet(s)
430 * almost immediately. To get more time between
431 * drops but still "push" the network to take advantage
432 * of improving conditions, we switch from exponential
433 * to linear window opening at some threshhold size.
434 * For a threshhold, we use half the current window
435 * size, truncated to a multiple of the mss.
436 *
437 * (the minimum cwnd that will give us exponential
438 * growth is 2 mss. We don't allow the threshhold
439 * to go below this.)
440 */
441 {
442 u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
443 if (win < 2)
444 win = 2;
445 tp->snd_cwnd = tp->t_maxseg;
446 tp->snd_ssthresh = win * tp->t_maxseg;
447 tp->t_dupacks = 0;
448 }
449 (void) tcp_output(tp);
450 break;
451
452 /*
453 * Persistance timer into zero window.
454 * Force a byte to be output, if possible.
455 */
456 case TCPT_PERSIST:
457 tcpstat.tcps_persisttimeo++;
458 /*
459 * Hack: if the peer is dead/unreachable, we do not
460 * time out if the window is closed. After a full
461 * backoff, drop the connection if the idle time
462 * (no responses to probes) reaches the maximum
463 * backoff that we would use if retransmitting.
464 */
465 if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
466 (tp->t_idle >= tcp_maxpersistidle ||
467 tp->t_idle >= TCP_REXMTVAL(tp) * tcp_totbackoff)) {
468 tcpstat.tcps_persistdrop++;
469 so_tmp = tp->t_inpcb->inp_socket;
470 tp = tcp_drop(tp, ETIMEDOUT);
471 postevent(so_tmp, 0, EV_TIMEOUT);
472 break;
473 }
474 tcp_setpersist(tp);
475 tp->t_force = 1;
476 (void) tcp_output(tp);
477 tp->t_force = 0;
478 break;
479
480 /*
481 * Keep-alive timer went off; send something
482 * or drop connection if idle for too long.
483 */
484 case TCPT_KEEP:
485 tcpstat.tcps_keeptimeo++;
486 if (tp->t_state < TCPS_ESTABLISHED)
487 goto dropit;
488 if ((always_keepalive ||
489 tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) &&
490 tp->t_state <= TCPS_CLOSING) {
491 if (tp->t_idle >= tcp_keepidle + tcp_maxidle)
492 goto dropit;
493 /*
494 * Send a packet designed to force a response
495 * if the peer is up and reachable:
496 * either an ACK if the connection is still alive,
497 * or an RST if the peer has closed the connection
498 * due to timeout or reboot.
499 * Using sequence number tp->snd_una-1
500 * causes the transmitted zero-length segment
501 * to lie outside the receive window;
502 * by the protocol spec, this requires the
503 * correspondent TCP to respond.
504 */
505 tcpstat.tcps_keepprobe++;
506 #if TCP_COMPAT_42
507 /*
508 * The keepalive packet must have nonzero length
509 * to get a 4.2 host to respond.
510 */
511 #if INET6
512 if (isipv6)
513 tcp_respond(tp, (void *)&tp->t_template->tt_i6,
514 &tp->t_template->tt_t,
515 (struct mbuf *)NULL,
516 tp->rcv_nxt - 1, tp->snd_una - 1,
517 0, isipv6);
518 else
519 #endif /* INET6 */
520 tcp_respond(tp, (void *)&tp->t_template->tt_i,
521 &tp->t_template->tt_t, (struct mbuf *)NULL,
522 tp->rcv_nxt - 1, tp->snd_una - 1, 0,
523 isipv6);
524 #else
525 #if INET6
526 if (isipv6)
527 tcp_respond(tp, (void *)&tp->t_template->tt_i6,
528 &tp->t_template->tt_t,
529 (struct mbuf *)NULL, tp->rcv_nxt,
530 tp->snd_una - 1, 0, isipv6);
531 else
532 #endif /* INET6 */
533 tcp_respond(tp, (void *)&tp->t_template->tt_i,
534 &tp->t_template->tt_t, (struct mbuf *)NULL,
535 tp->rcv_nxt, tp->snd_una - 1, 0, isipv6);
536 #endif
537 tp->t_timer[TCPT_KEEP] = tcp_keepintvl;
538 } else
539 tp->t_timer[TCPT_KEEP] = tcp_keepidle;
540 break;
541 dropit:
542 tcpstat.tcps_keepdrops++;
543 so_tmp = tp->t_inpcb->inp_socket;
544 tp = tcp_drop(tp, ETIMEDOUT);
545 postevent(so_tmp, 0, EV_TIMEOUT);
546 break;
547 }
548 return (tp);
549 }