]> git.saurik.com Git - apple/xnu.git/blame - bsd/netinet/tcp_timer.c
xnu-517.12.7.tar.gz
[apple/xnu.git] / bsd / netinet / tcp_timer.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
24 * The Regents of the University of California. All rights reserved.
25 *
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
28 * are met:
29 * 1. Redistributions of source code must retain the above copyright
30 * notice, this list of conditions and the following disclaimer.
31 * 2. Redistributions in binary form must reproduce the above copyright
32 * notice, this list of conditions and the following disclaimer in the
33 * documentation and/or other materials provided with the distribution.
34 * 3. All advertising materials mentioning features or use of this software
35 * must display the following acknowledgement:
36 * This product includes software developed by the University of
37 * California, Berkeley and its contributors.
38 * 4. Neither the name of the University nor the names of its contributors
39 * may be used to endorse or promote products derived from this software
40 * without specific prior written permission.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 *
54 * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95
9bccf70c 55 * $FreeBSD: src/sys/netinet/tcp_timer.c,v 1.34.2.11 2001/08/22 00:59:12 silby Exp $
1c79356b
A
56 */
57
1c79356b
A
58
59#include <sys/param.h>
60#include <sys/systm.h>
61#include <sys/kernel.h>
9bccf70c 62#include <sys/mbuf.h>
1c79356b
A
63#include <sys/sysctl.h>
64#include <sys/socket.h>
65#include <sys/socketvar.h>
66#include <sys/protosw.h>
67
68#include <kern/cpu_number.h> /* before tcp_seq.h, for tcp_random18() */
69
70#include <net/route.h>
71
72#include <netinet/in.h>
73#include <netinet/in_systm.h>
1c79356b 74#include <netinet/in_pcb.h>
9bccf70c
A
75#if INET6
76#include <netinet6/in6_pcb.h>
77#endif
1c79356b
A
78#include <netinet/ip_var.h>
79#include <netinet/tcp.h>
80#include <netinet/tcp_fsm.h>
81#include <netinet/tcp_seq.h>
82#include <netinet/tcp_timer.h>
83#include <netinet/tcp_var.h>
84#include <netinet/tcpip.h>
85#if TCPDEBUG
86#include <netinet/tcp_debug.h>
87#endif
88#include <sys/kdebug.h>
89
90#define DBG_FNC_TCP_FAST NETDBG_CODE(DBG_NETTCP, (5 << 8))
91#define DBG_FNC_TCP_SLOW NETDBG_CODE(DBG_NETTCP, (5 << 8) | 1)
92
55e303ae
A
93/*
94 * NOTE - WARNING
95 *
96 *
97 *
98 *
99 */
9bccf70c
A
100static int
101sysctl_msec_to_ticks SYSCTL_HANDLER_ARGS
102{
103 int error, s, tt;
1c79356b 104
9bccf70c
A
105 tt = *(int *)oidp->oid_arg1;
106 s = tt * 1000 / hz;
1c79356b 107
9bccf70c
A
108 error = sysctl_handle_int(oidp, &s, 0, req);
109 if (error || !req->newptr)
110 return (error);
111
112 tt = s * hz / 1000;
113 if (tt < 1)
114 return (EINVAL);
115
116 *(int *)oidp->oid_arg1 = tt;
117 return (0);
118}
1c79356b 119
9bccf70c
A
120int tcp_keepinit;
121SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, CTLTYPE_INT|CTLFLAG_RW,
122 &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", "");
123
124int tcp_keepidle;
125SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, CTLTYPE_INT|CTLFLAG_RW,
126 &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", "");
127
128int tcp_keepintvl;
129SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, CTLTYPE_INT|CTLFLAG_RW,
130 &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", "");
131
132int tcp_delacktime;
133SYSCTL_PROC(_net_inet_tcp, TCPCTL_DELACKTIME, delacktime,
134 CTLTYPE_INT|CTLFLAG_RW, &tcp_delacktime, 0, sysctl_msec_to_ticks, "I",
135 "Time before a delayed ACK is sent");
136
137int tcp_msl;
138SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, CTLTYPE_INT|CTLFLAG_RW,
139 &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
1c79356b
A
140
141static int always_keepalive = 0;
9bccf70c
A
142SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_RW,
143 &always_keepalive , 0, "Assume SO_KEEPALIVE on all TCP connections");
1c79356b
A
144
145static int tcp_keepcnt = TCPTV_KEEPCNT;
146 /* max idle probes */
9bccf70c 147int tcp_maxpersistidle;
1c79356b
A
148 /* max idle time in persist */
149int tcp_maxidle;
150
1c79356b
A
151struct inpcbhead time_wait_slots[N_TIME_WAIT_SLOTS];
152int cur_tw_slot = 0;
153
154u_long *delack_bitmask;
1c79356b
A
155
156
157void add_to_time_wait(tp)
158 struct tcpcb *tp;
159{
160 int tw_slot;
161
162 LIST_REMOVE(tp->t_inpcb, inp_list);
163
164 if (tp->t_timer[TCPT_2MSL] == 0)
165 tp->t_timer[TCPT_2MSL] = 1;
166
9bccf70c 167 tp->t_rcvtime += tp->t_timer[TCPT_2MSL] & (N_TIME_WAIT_SLOTS - 1);
1c79356b
A
168 tw_slot = (tp->t_timer[TCPT_2MSL] & (N_TIME_WAIT_SLOTS - 1)) + cur_tw_slot;
169 if (tw_slot >= N_TIME_WAIT_SLOTS)
170 tw_slot -= N_TIME_WAIT_SLOTS;
171
172 LIST_INSERT_HEAD(&time_wait_slots[tw_slot], tp->t_inpcb, inp_list);
173}
174
175
176
177
178
179/*
180 * Fast timeout routine for processing delayed acks
181 */
182void
183tcp_fasttimo()
184{
185 register struct inpcb *inp;
186 register struct tcpcb *tp;
187
188
189 register u_long i,j;
190 register u_long temp_mask;
191 register u_long elem_base = 0;
192 struct inpcbhead *head;
193 int s = splnet();
194
195 static
196 int delack_checked = 0;
197
198 KERNEL_DEBUG(DBG_FNC_TCP_FAST | DBG_FUNC_START, 0,0,0,0,0);
199
200 if (!tcp_delack_enabled)
201 return;
202
9bccf70c 203 for (i=0; i < (tcbinfo.hashsize / 32); i++) {
1c79356b
A
204 if (delack_bitmask[i]) {
205 temp_mask = 1;
206 for (j=0; j < 32; j++) {
207 if (temp_mask & delack_bitmask[i]) {
208 head = &tcbinfo.hashbase[elem_base + j];
209 for (inp=head->lh_first; inp != 0; inp = inp->inp_hash.le_next) {
210 delack_checked++;
211 if ((tp = (struct tcpcb *)inp->inp_ppcb) && (tp->t_flags & TF_DELACK)) {
212 tp->t_flags &= ~TF_DELACK;
213 tp->t_flags |= TF_ACKNOW;
214 tcpstat.tcps_delack++;
215 (void) tcp_output(tp);
216 }
217 }
218 }
219 temp_mask <<= 1;
220 }
221 delack_bitmask[i] = 0;
222 }
223 elem_base += 32;
1c79356b 224 }
1c79356b
A
225 KERNEL_DEBUG(DBG_FNC_TCP_FAST | DBG_FUNC_END, delack_checked,tcpstat.tcps_delack,0,0,0);
226 splx(s);
227
228}
229
230/*
231 * Tcp protocol timeout routine called every 500 ms.
232 * Updates the timers in all active tcb's and
233 * causes finite state machine actions if timers expire.
234 */
235void
236tcp_slowtimo()
237{
238 register struct inpcb *ip, *ipnxt;
239 register struct tcpcb *tp;
240 register int i;
241 int s;
242#if TCPDEBUG
243 int ostate;
244#endif
245#if KDEBUG
246 static int tws_checked;
247#endif
248
249 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_START, 0,0,0,0,0);
250 s = splnet();
251
252 tcp_maxidle = tcp_keepcnt * tcp_keepintvl;
253
254 ip = tcb.lh_first;
255 if (ip == NULL) {
256 splx(s);
257 return;
258 }
259 /*
260 * Search through tcb's and update active timers.
261 */
262 for (; ip != NULL; ip = ipnxt) {
263 ipnxt = ip->inp_list.le_next;
264 tp = intotcpcb(ip);
265 if (tp == 0 || tp->t_state == TCPS_LISTEN)
266 continue;
9bccf70c
A
267 /*
268 * Bogus state when port owned by SharedIP with loopback as the
269 * only configured interface: BlueBox does not filters loopback
270 */
271 if (tp->t_state == TCP_NSTATES)
272 continue;
273
1c79356b
A
274 for (i = 0; i < TCPT_NTIMERS; i++) {
275 if (tp->t_timer[i] && --tp->t_timer[i] == 0) {
276#if TCPDEBUG
277 ostate = tp->t_state;
278#endif
279 tp = tcp_timers(tp, i);
280 if (tp == NULL)
281 goto tpgone;
282#if TCPDEBUG
283 if (tp->t_inpcb->inp_socket->so_options
284 & SO_DEBUG)
285 tcp_trace(TA_USER, ostate, tp,
286 (void *)0,
287 (struct tcphdr *)0,
288 PRU_SLOWTIMO);
289#endif
290 }
291 }
9bccf70c
A
292 tp->t_rcvtime++;
293 tp->t_starttime++;
294 if (tp->t_rtttime)
295 tp->t_rtttime++;
1c79356b
A
296tpgone:
297 ;
298 }
299
300#if KDEBUG
301 tws_checked = 0;
302#endif
303 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_NONE, tws_checked,0,0,0,0);
304
305 /*
306 * Process the items in the current time-wait slot
307 */
308
309 for (ip = time_wait_slots[cur_tw_slot].lh_first; ip; ip = ipnxt)
310 {
311#if KDEBUG
312 tws_checked++;
313#endif
314 ipnxt = ip->inp_list.le_next;
315 tp = intotcpcb(ip);
ab86ba33
A
316 if (tp == NULL) { /* tp already closed, remove from list */
317 LIST_REMOVE(ip, inp_list);
318 continue;
319 }
1c79356b
A
320 if (tp->t_timer[TCPT_2MSL] >= N_TIME_WAIT_SLOTS) {
321 tp->t_timer[TCPT_2MSL] -= N_TIME_WAIT_SLOTS;
9bccf70c 322 tp->t_rcvtime += N_TIME_WAIT_SLOTS;
1c79356b
A
323 }
324 else
325 tp->t_timer[TCPT_2MSL] = 0;
326
327 if (tp->t_timer[TCPT_2MSL] == 0)
328 tp = tcp_timers(tp, TCPT_2MSL);
329 }
330
331 if (++cur_tw_slot >= N_TIME_WAIT_SLOTS)
332 cur_tw_slot = 0;
1c79356b
A
333 tcp_now++; /* for timestamps */
334 splx(s);
335 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END, tws_checked, cur_tw_slot,0,0,0);
336}
337
338/*
339 * Cancel all timers for TCP tp.
340 */
341void
342tcp_canceltimers(tp)
343 struct tcpcb *tp;
344{
345 register int i;
346
347 for (i = 0; i < TCPT_NTIMERS; i++)
348 tp->t_timer[i] = 0;
349}
350
9bccf70c
A
351int tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] =
352 { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 };
353
1c79356b
A
354int tcp_backoff[TCP_MAXRXTSHIFT + 1] =
355 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
356
357static int tcp_totbackoff = 511; /* sum of tcp_backoff[] */
358
359/*
360 * TCP timer processing.
361 */
362struct tcpcb *
363tcp_timers(tp, timer)
364 register struct tcpcb *tp;
365 int timer;
366{
367 register int rexmt;
368 struct socket *so_tmp;
9bccf70c
A
369 struct tcptemp *t_template;
370
55e303ae
A
371#if TCPDEBUG
372 int ostate;
373#endif
374
1c79356b
A
375#if INET6
376 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV4) == 0;
377#endif /* INET6 */
378
9bccf70c 379
1c79356b
A
380 switch (timer) {
381
382 /*
383 * 2 MSL timeout in shutdown went off. If we're closed but
384 * still waiting for peer to close and connection has been idle
385 * too long, or if 2MSL time is up from TIME_WAIT, delete connection
386 * control block. Otherwise, check again in a bit.
387 */
388 case TCPT_2MSL:
389 if (tp->t_state != TCPS_TIME_WAIT &&
9bccf70c 390 tp->t_rcvtime <= tcp_maxidle) {
1c79356b
A
391 tp->t_timer[TCPT_2MSL] = tcp_keepintvl;
392 add_to_time_wait(tp);
393 }
394 else
395 tp = tcp_close(tp);
396 break;
397
398 /*
399 * Retransmission timer went off. Message has not
400 * been acked within retransmit interval. Back off
401 * to a longer retransmit interval and retransmit one segment.
402 */
403 case TCPT_REXMT:
404 if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) {
405 tp->t_rxtshift = TCP_MAXRXTSHIFT;
406 tcpstat.tcps_timeoutdrop++;
407 so_tmp = tp->t_inpcb->inp_socket;
408 tp = tcp_drop(tp, tp->t_softerror ?
409 tp->t_softerror : ETIMEDOUT);
410 postevent(so_tmp, 0, EV_TIMEOUT);
411 break;
412 }
9bccf70c
A
413
414 if (tp->t_rxtshift == 1) {
415 /*
416 * first retransmit; record ssthresh and cwnd so they can
417 * be recovered if this turns out to be a "bad" retransmit.
418 * A retransmit is considered "bad" if an ACK for this
419 * segment is received within RTT/2 interval; the assumption
420 * here is that the ACK was already in flight. See
421 * "On Estimating End-to-End Network Path Properties" by
422 * Allman and Paxson for more details.
423 */
424 tp->snd_cwnd_prev = tp->snd_cwnd;
425 tp->snd_ssthresh_prev = tp->snd_ssthresh;
426 tp->t_badrxtwin = tcp_now + (tp->t_srtt >> (TCP_RTT_SHIFT + 1));
427 }
1c79356b 428 tcpstat.tcps_rexmttimeo++;
9bccf70c
A
429 if (tp->t_state == TCPS_SYN_SENT)
430 rexmt = TCP_REXMTVAL(tp) * tcp_syn_backoff[tp->t_rxtshift];
431 else
432 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
1c79356b 433 TCPT_RANGESET(tp->t_rxtcur, rexmt,
9bccf70c 434 tp->t_rttmin, TCPTV_REXMTMAX);
1c79356b 435 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
9bccf70c
A
436
437 /*
438 * Disable rfc1323 and rfc1644 if we havn't got any response to
439 * our third SYN to work-around some broken terminal servers
440 * (most of which have hopefully been retired) that have bad VJ
441 * header compression code which trashes TCP segments containing
442 * unknown-to-them TCP options.
443 */
444 if ((tp->t_state == TCPS_SYN_SENT) && (tp->t_rxtshift == 3))
445 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_REQ_CC);
1c79356b
A
446 /*
447 * If losing, let the lower level know and try for
448 * a better route. Also, if we backed off this far,
449 * our srtt estimate is probably bogus. Clobber it
450 * so we'll take the next rtt measurement as our srtt;
451 * move the current srtt into rttvar to keep the current
452 * retransmit times until then.
453 */
454 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
455#if INET6
456 if (isipv6)
457 in6_losing(tp->t_inpcb);
458 else
459#endif /* INET6 */
460 in_losing(tp->t_inpcb);
461 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
462 tp->t_srtt = 0;
463 }
464 tp->snd_nxt = tp->snd_una;
9bccf70c
A
465 /*
466 * Note: We overload snd_recover to function also as the
467 * snd_last variable described in RFC 2582
468 */
469 tp->snd_recover = tp->snd_max;
1c79356b
A
470 /*
471 * Force a segment to be sent.
472 */
473 tp->t_flags |= TF_ACKNOW;
474 /*
475 * If timing a segment in this window, stop the timer.
476 */
9bccf70c 477 tp->t_rtttime = 0;
1c79356b
A
478 /*
479 * Close the congestion window down to one segment
480 * (we'll open it by one segment for each ack we get).
481 * Since we probably have a window's worth of unacked
482 * data accumulated, this "slow start" keeps us from
483 * dumping all that data as back-to-back packets (which
484 * might overwhelm an intermediate gateway).
485 *
486 * There are two phases to the opening: Initially we
487 * open by one mss on each ack. This makes the window
488 * size increase exponentially with time. If the
489 * window is larger than the path can handle, this
490 * exponential growth results in dropped packet(s)
491 * almost immediately. To get more time between
492 * drops but still "push" the network to take advantage
493 * of improving conditions, we switch from exponential
494 * to linear window opening at some threshhold size.
495 * For a threshhold, we use half the current window
496 * size, truncated to a multiple of the mss.
497 *
498 * (the minimum cwnd that will give us exponential
499 * growth is 2 mss. We don't allow the threshhold
500 * to go below this.)
501 */
502 {
503 u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
504 if (win < 2)
505 win = 2;
506 tp->snd_cwnd = tp->t_maxseg;
507 tp->snd_ssthresh = win * tp->t_maxseg;
508 tp->t_dupacks = 0;
509 }
510 (void) tcp_output(tp);
511 break;
512
513 /*
514 * Persistance timer into zero window.
515 * Force a byte to be output, if possible.
516 */
517 case TCPT_PERSIST:
518 tcpstat.tcps_persisttimeo++;
519 /*
520 * Hack: if the peer is dead/unreachable, we do not
521 * time out if the window is closed. After a full
522 * backoff, drop the connection if the idle time
523 * (no responses to probes) reaches the maximum
524 * backoff that we would use if retransmitting.
525 */
526 if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
9bccf70c
A
527 (tp->t_rcvtime >= tcp_maxpersistidle ||
528 tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) {
1c79356b
A
529 tcpstat.tcps_persistdrop++;
530 so_tmp = tp->t_inpcb->inp_socket;
531 tp = tcp_drop(tp, ETIMEDOUT);
532 postevent(so_tmp, 0, EV_TIMEOUT);
533 break;
534 }
535 tcp_setpersist(tp);
536 tp->t_force = 1;
537 (void) tcp_output(tp);
538 tp->t_force = 0;
539 break;
540
541 /*
542 * Keep-alive timer went off; send something
543 * or drop connection if idle for too long.
544 */
545 case TCPT_KEEP:
546 tcpstat.tcps_keeptimeo++;
547 if (tp->t_state < TCPS_ESTABLISHED)
548 goto dropit;
549 if ((always_keepalive ||
550 tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) &&
551 tp->t_state <= TCPS_CLOSING) {
55e303ae 552 if (tp->t_rcvtime >= TCP_KEEPIDLE(tp) + tcp_maxidle)
1c79356b
A
553 goto dropit;
554 /*
555 * Send a packet designed to force a response
556 * if the peer is up and reachable:
557 * either an ACK if the connection is still alive,
558 * or an RST if the peer has closed the connection
559 * due to timeout or reboot.
560 * Using sequence number tp->snd_una-1
561 * causes the transmitted zero-length segment
562 * to lie outside the receive window;
563 * by the protocol spec, this requires the
564 * correspondent TCP to respond.
565 */
566 tcpstat.tcps_keepprobe++;
9bccf70c
A
567 t_template = tcp_maketemplate(tp);
568 if (t_template) {
569 tcp_respond(tp, t_template->tt_ipgen,
570 &t_template->tt_t, (struct mbuf *)NULL,
571 tp->rcv_nxt, tp->snd_una - 1, 0);
572 (void) m_free(dtom(t_template));
573 }
1c79356b
A
574 tp->t_timer[TCPT_KEEP] = tcp_keepintvl;
575 } else
55e303ae 576 tp->t_timer[TCPT_KEEP] = TCP_KEEPIDLE(tp);
1c79356b 577 break;
9bccf70c
A
578
579#if TCPDEBUG
580 if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)
581 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
582 PRU_SLOWTIMO);
583#endif
1c79356b
A
584 dropit:
585 tcpstat.tcps_keepdrops++;
586 so_tmp = tp->t_inpcb->inp_socket;
587 tp = tcp_drop(tp, ETIMEDOUT);
588 postevent(so_tmp, 0, EV_TIMEOUT);
589 break;
590 }
591 return (tp);
592}