]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/tcp_timer.c
0ffb340d0400845510d56e48104aea566f7cfd46
[apple/xnu.git] / bsd / netinet / tcp_timer.c
1 /*
2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95
61 * $FreeBSD: src/sys/netinet/tcp_timer.c,v 1.34.2.11 2001/08/22 00:59:12 silby Exp $
62 */
63
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/kernel.h>
68 #include <sys/mbuf.h>
69 #include <sys/sysctl.h>
70 #include <sys/socket.h>
71 #include <sys/socketvar.h>
72 #include <sys/protosw.h>
73 #include <sys/domain.h>
74 #include <sys/mcache.h>
75 #include <sys/queue.h>
76 #include <kern/locks.h>
77 #include <kern/cpu_number.h> /* before tcp_seq.h, for tcp_random18() */
78 #include <mach/boolean.h>
79
80 #include <net/route.h>
81 #include <net/if_var.h>
82 #include <net/ntstat.h>
83
84 #include <netinet/in.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/in_pcb.h>
87 #if INET6
88 #include <netinet6/in6_pcb.h>
89 #endif
90 #include <netinet/ip_var.h>
91 #include <netinet/tcp.h>
92 #include <netinet/tcp_cache.h>
93 #include <netinet/tcp_fsm.h>
94 #include <netinet/tcp_seq.h>
95 #include <netinet/tcp_timer.h>
96 #include <netinet/tcp_var.h>
97 #include <netinet/tcp_cc.h>
98 #if INET6
99 #include <netinet6/tcp6_var.h>
100 #endif
101 #include <netinet/tcpip.h>
102 #if TCPDEBUG
103 #include <netinet/tcp_debug.h>
104 #endif
105 #include <sys/kdebug.h>
106 #include <mach/sdt.h>
107 #include <netinet/mptcp_var.h>
108
109 #define TIMERENTRY_TO_TP(te) ((struct tcpcb *)((uintptr_t)te - offsetof(struct tcpcb, tentry.le.le_next)))
110
111 #define VERIFY_NEXT_LINK(elm,field) do { \
112 if (LIST_NEXT((elm),field) != NULL && \
113 LIST_NEXT((elm),field)->field.le_prev != \
114 &((elm)->field.le_next)) \
115 panic("Bad link elm %p next->prev != elm", (elm)); \
116 } while(0)
117
118 #define VERIFY_PREV_LINK(elm,field) do { \
119 if (*(elm)->field.le_prev != (elm)) \
120 panic("Bad link elm %p prev->next != elm", (elm)); \
121 } while(0)
122
123 #define TCP_SET_TIMER_MODE(mode, i) do { \
124 if (IS_TIMER_HZ_10MS(i)) \
125 (mode) |= TCP_TIMERLIST_10MS_MODE; \
126 else if (IS_TIMER_HZ_100MS(i)) \
127 (mode) |= TCP_TIMERLIST_100MS_MODE; \
128 else \
129 (mode) |= TCP_TIMERLIST_500MS_MODE; \
130 } while(0)
131
132 /* Max number of times a stretch ack can be delayed on a connection */
133 #define TCP_STRETCHACK_DELAY_THRESHOLD 5
134
135 /*
136 * If the host processor has been sleeping for too long, this is the threshold
137 * used to avoid sending stale retransmissions.
138 */
139 #define TCP_SLEEP_TOO_LONG (10 * 60 * 1000) /* 10 minutes in ms */
140
141 /* tcp timer list */
142 struct tcptimerlist tcp_timer_list;
143
144 /* List of pcbs in timewait state, protected by tcbinfo's ipi_lock */
145 struct tcptailq tcp_tw_tailq;
146
147 static int
148 sysctl_msec_to_ticks SYSCTL_HANDLER_ARGS
149 {
150 #pragma unused(arg1, arg2)
151 int error, s, tt;
152
153 tt = *(int *)oidp->oid_arg1;
154 s = tt * 1000 / TCP_RETRANSHZ;;
155
156 error = sysctl_handle_int(oidp, &s, 0, req);
157 if (error || !req->newptr)
158 return (error);
159
160 tt = s * TCP_RETRANSHZ / 1000;
161 if (tt < 1)
162 return (EINVAL);
163
164 *(int *)oidp->oid_arg1 = tt;
165 return (0);
166 }
167
168 int tcp_keepinit;
169 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit,
170 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
171 &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", "");
172
173 int tcp_keepidle;
174 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle,
175 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
176 &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", "");
177
178 int tcp_keepintvl;
179 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl,
180 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
181 &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", "");
182
183 int tcp_keepcnt;
184 SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt,
185 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
186 &tcp_keepcnt, 0, "number of times to repeat keepalive");
187
188 int tcp_msl;
189 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl,
190 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
191 &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
192
193 /*
194 * Avoid DoS via TCP Robustness in Persist Condition
195 * (see http://www.ietf.org/id/draft-ananth-tcpm-persist-02.txt)
196 * by allowing a system wide maximum persistence timeout value when in
197 * Zero Window Probe mode.
198 *
199 * Expressed in milliseconds to be consistent without timeout related
200 * values, the TCP socket option is in seconds.
201 */
202 u_int32_t tcp_max_persist_timeout = 0;
203 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout,
204 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
205 &tcp_max_persist_timeout, 0, sysctl_msec_to_ticks, "I",
206 "Maximum persistence timeout for ZWP");
207
208 static int always_keepalive = 0;
209 SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive,
210 CTLFLAG_RW | CTLFLAG_LOCKED,
211 &always_keepalive , 0, "Assume SO_KEEPALIVE on all TCP connections");
212
213 /*
214 * This parameter determines how long the timer list will stay in fast or
215 * quick mode even though all connections are idle. In this state, the
216 * timer will run more frequently anticipating new data.
217 */
218 int timer_fastmode_idlemax = TCP_FASTMODE_IDLERUN_MAX;
219 SYSCTL_INT(_net_inet_tcp, OID_AUTO, timer_fastmode_idlemax,
220 CTLFLAG_RW | CTLFLAG_LOCKED,
221 &timer_fastmode_idlemax, 0, "Maximum idle generations in fast mode");
222
223 /*
224 * See tcp_syn_backoff[] for interval values between SYN retransmits;
225 * the value set below defines the number of retransmits, before we
226 * disable the timestamp and window scaling options during subsequent
227 * SYN retransmits. Setting it to 0 disables the dropping off of those
228 * two options.
229 */
230 static int tcp_broken_peer_syn_rxmit_thres = 10;
231 SYSCTL_INT(_net_inet_tcp, OID_AUTO, broken_peer_syn_rexmit_thres,
232 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_broken_peer_syn_rxmit_thres, 0,
233 "Number of retransmitted SYNs before disabling RFC 1323 "
234 "options on local connections");
235
236 static int tcp_timer_advanced = 0;
237 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_timer_advanced,
238 CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_timer_advanced, 0,
239 "Number of times one of the timers was advanced");
240
241 static int tcp_resched_timerlist = 0;
242 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_resched_timerlist,
243 CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_resched_timerlist, 0,
244 "Number of times timer list was rescheduled as part of processing a packet");
245
246 int tcp_pmtud_black_hole_detect = 1 ;
247 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_detection,
248 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_pmtud_black_hole_detect, 0,
249 "Path MTU Discovery Black Hole Detection");
250
251 int tcp_pmtud_black_hole_mss = 1200 ;
252 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_mss,
253 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_pmtud_black_hole_mss, 0,
254 "Path MTU Discovery Black Hole Detection lowered MSS");
255
256 #define TCP_REPORT_STATS_INTERVAL 43200 /* 12 hours, in seconds */
257 int tcp_report_stats_interval = TCP_REPORT_STATS_INTERVAL;
258 #if (DEVELOPMENT || DEBUG)
259 SYSCTL_INT(_net_inet_tcp, OID_AUTO, report_stats_interval,
260 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_report_stats_interval, 0,
261 "Report stats interval");
262 #endif /* (DEVELOPMENT || DEBUG) */
263
264 /* performed garbage collection of "used" sockets */
265 static boolean_t tcp_gc_done = FALSE;
266
267 /* max idle probes */
268 int tcp_maxpersistidle;
269
270 /*
271 * TCP delack timer is set to 100 ms. Since the processing of timer list
272 * in fast mode will happen no faster than 100 ms, the delayed ack timer
273 * will fire some where between 100 and 200 ms.
274 */
275 int tcp_delack = TCP_RETRANSHZ / 10;
276
277 #if MPTCP
278 /*
279 * MP_JOIN retransmission of 3rd ACK will be every 500 msecs without backoff
280 */
281 int tcp_jack_rxmt = TCP_RETRANSHZ / 2;
282 #endif /* MPTCP */
283
284 static boolean_t tcp_itimer_done = FALSE;
285
286 static void tcp_remove_timer(struct tcpcb *tp);
287 static void tcp_sched_timerlist(uint32_t offset);
288 static u_int32_t tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *mode,
289 u_int16_t probe_if_index);
290 static void tcp_sched_timers(struct tcpcb *tp);
291 static inline void tcp_set_lotimer_index(struct tcpcb *);
292 __private_extern__ void tcp_remove_from_time_wait(struct inpcb *inp);
293 __private_extern__ void tcp_report_stats(void);
294
295 /*
296 * Macro to compare two timers. If there is a reset of the sign bit,
297 * it is safe to assume that the timer has wrapped around. By doing
298 * signed comparision, we take care of wrap around such that the value
299 * with the sign bit reset is actually ahead of the other.
300 */
301 inline int32_t
302 timer_diff(uint32_t t1, uint32_t toff1, uint32_t t2, uint32_t toff2) {
303 return (int32_t)((t1 + toff1) - (t2 + toff2));
304 };
305
306 static u_int64_t tcp_last_report_time;
307
308 /*
309 * Structure to store previously reported stats so that we can send
310 * incremental changes in each report interval.
311 */
312 struct tcp_last_report_stats {
313 u_int32_t tcps_connattempt;
314 u_int32_t tcps_accepts;
315 u_int32_t tcps_ecn_client_setup;
316 u_int32_t tcps_ecn_server_setup;
317 u_int32_t tcps_ecn_client_success;
318 u_int32_t tcps_ecn_server_success;
319 u_int32_t tcps_ecn_not_supported;
320 u_int32_t tcps_ecn_lost_syn;
321 u_int32_t tcps_ecn_lost_synack;
322 u_int32_t tcps_ecn_recv_ce;
323 u_int32_t tcps_ecn_recv_ece;
324 u_int32_t tcps_ecn_sent_ece;
325 u_int32_t tcps_ecn_conn_recv_ce;
326 u_int32_t tcps_ecn_conn_recv_ece;
327 u_int32_t tcps_ecn_conn_plnoce;
328 u_int32_t tcps_ecn_conn_pl_ce;
329 u_int32_t tcps_ecn_conn_nopl_ce;
330 u_int32_t tcps_ecn_fallback_synloss;
331 u_int32_t tcps_ecn_fallback_reorder;
332 u_int32_t tcps_ecn_fallback_ce;
333
334 /* TFO-related statistics */
335 u_int32_t tcps_tfo_syn_data_rcv;
336 u_int32_t tcps_tfo_cookie_req_rcv;
337 u_int32_t tcps_tfo_cookie_sent;
338 u_int32_t tcps_tfo_cookie_invalid;
339 u_int32_t tcps_tfo_cookie_req;
340 u_int32_t tcps_tfo_cookie_rcv;
341 u_int32_t tcps_tfo_syn_data_sent;
342 u_int32_t tcps_tfo_syn_data_acked;
343 u_int32_t tcps_tfo_syn_loss;
344 u_int32_t tcps_tfo_blackhole;
345 };
346
347
348 /* Returns true if the timer is on the timer list */
349 #define TIMER_IS_ON_LIST(tp) ((tp)->t_flags & TF_TIMER_ONLIST)
350
351 /* Run the TCP timerlist atleast once every hour */
352 #define TCP_TIMERLIST_MAX_OFFSET (60 * 60 * TCP_RETRANSHZ)
353
354
355 static void add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay);
356 static boolean_t tcp_garbage_collect(struct inpcb *, int);
357
358 /*
359 * Add to tcp timewait list, delay is given in milliseconds.
360 */
361 static void
362 add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay)
363 {
364 struct inpcbinfo *pcbinfo = &tcbinfo;
365 struct inpcb *inp = tp->t_inpcb;
366 uint32_t timer;
367
368 /* pcb list should be locked when we get here */
369 lck_rw_assert(pcbinfo->ipi_lock, LCK_RW_ASSERT_EXCLUSIVE);
370
371 /* We may get here multiple times, so check */
372 if (!(inp->inp_flags2 & INP2_TIMEWAIT)) {
373 pcbinfo->ipi_twcount++;
374 inp->inp_flags2 |= INP2_TIMEWAIT;
375
376 /* Remove from global inp list */
377 LIST_REMOVE(inp, inp_list);
378 } else {
379 TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry);
380 }
381
382 /* Compute the time at which this socket can be closed */
383 timer = tcp_now + delay;
384
385 /* We will use the TCPT_2MSL timer for tracking this delay */
386
387 if (TIMER_IS_ON_LIST(tp))
388 tcp_remove_timer(tp);
389 tp->t_timer[TCPT_2MSL] = timer;
390
391 TAILQ_INSERT_TAIL(&tcp_tw_tailq, tp, t_twentry);
392 }
393
394 void
395 add_to_time_wait(struct tcpcb *tp, uint32_t delay)
396 {
397 struct inpcbinfo *pcbinfo = &tcbinfo;
398 if (tp->t_inpcb->inp_socket->so_options & SO_NOWAKEFROMSLEEP)
399 socket_post_kev_msg_closed(tp->t_inpcb->inp_socket);
400
401 /* 19182803: Notify nstat that connection is closing before waiting. */
402 nstat_pcb_detach(tp->t_inpcb);
403
404 if (!lck_rw_try_lock_exclusive(pcbinfo->ipi_lock)) {
405 tcp_unlock(tp->t_inpcb->inp_socket, 0, 0);
406 lck_rw_lock_exclusive(pcbinfo->ipi_lock);
407 tcp_lock(tp->t_inpcb->inp_socket, 0, 0);
408 }
409 add_to_time_wait_locked(tp, delay);
410 lck_rw_done(pcbinfo->ipi_lock);
411
412 inpcb_gc_sched(pcbinfo, INPCB_TIMER_LAZY);
413 }
414
415 /* If this is on time wait queue, remove it. */
416 void
417 tcp_remove_from_time_wait(struct inpcb *inp)
418 {
419 struct tcpcb *tp = intotcpcb(inp);
420 if (inp->inp_flags2 & INP2_TIMEWAIT)
421 TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry);
422 }
423
424 static boolean_t
425 tcp_garbage_collect(struct inpcb *inp, int istimewait)
426 {
427 boolean_t active = FALSE;
428 struct socket *so;
429 struct tcpcb *tp;
430
431 so = inp->inp_socket;
432 tp = intotcpcb(inp);
433
434 /*
435 * Skip if still in use or busy; it would have been more efficient
436 * if we were to test so_usecount against 0, but this isn't possible
437 * due to the current implementation of tcp_dropdropablreq() where
438 * overflow sockets that are eligible for garbage collection have
439 * their usecounts set to 1.
440 */
441 if (!lck_mtx_try_lock_spin(&inp->inpcb_mtx))
442 return (TRUE);
443
444 /* Check again under the lock */
445 if (so->so_usecount > 1) {
446 if (inp->inp_wantcnt == WNT_STOPUSING)
447 active = TRUE;
448 lck_mtx_unlock(&inp->inpcb_mtx);
449 return (active);
450 }
451
452 if (istimewait &&
453 TSTMP_GEQ(tcp_now, tp->t_timer[TCPT_2MSL]) &&
454 tp->t_state != TCPS_CLOSED) {
455 /* Become a regular mutex */
456 lck_mtx_convert_spin(&inp->inpcb_mtx);
457 tcp_close(tp);
458 }
459
460 /*
461 * Overflowed socket dropped from the listening queue? Do this
462 * only if we are called to clean up the time wait slots, since
463 * tcp_dropdropablreq() considers a socket to have been fully
464 * dropped after add_to_time_wait() is finished.
465 * Also handle the case of connections getting closed by the peer
466 * while in the queue as seen with rdar://6422317
467 *
468 */
469 if (so->so_usecount == 1 &&
470 ((istimewait && (so->so_flags & SOF_OVERFLOW)) ||
471 ((tp != NULL) && (tp->t_state == TCPS_CLOSED) &&
472 (so->so_head != NULL) &&
473 ((so->so_state & (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE)) ==
474 (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE))))) {
475
476 if (inp->inp_state != INPCB_STATE_DEAD) {
477 /* Become a regular mutex */
478 lck_mtx_convert_spin(&inp->inpcb_mtx);
479 #if INET6
480 if (SOCK_CHECK_DOM(so, PF_INET6))
481 in6_pcbdetach(inp);
482 else
483 #endif /* INET6 */
484 in_pcbdetach(inp);
485 }
486 so->so_usecount--;
487 if (inp->inp_wantcnt == WNT_STOPUSING)
488 active = TRUE;
489 lck_mtx_unlock(&inp->inpcb_mtx);
490 return (active);
491 } else if (inp->inp_wantcnt != WNT_STOPUSING) {
492 lck_mtx_unlock(&inp->inpcb_mtx);
493 return (FALSE);
494 }
495
496 /*
497 * We get here because the PCB is no longer searchable
498 * (WNT_STOPUSING); detach (if needed) and dispose if it is dead
499 * (usecount is 0). This covers all cases, including overflow
500 * sockets and those that are considered as "embryonic",
501 * i.e. created by sonewconn() in TCP input path, and have
502 * not yet been committed. For the former, we reduce the usecount
503 * to 0 as done by the code above. For the latter, the usecount
504 * would have reduced to 0 as part calling soabort() when the
505 * socket is dropped at the end of tcp_input().
506 */
507 if (so->so_usecount == 0) {
508 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
509 struct tcpcb *, tp, int32_t, TCPS_CLOSED);
510 /* Become a regular mutex */
511 lck_mtx_convert_spin(&inp->inpcb_mtx);
512
513 /*
514 * If this tp still happens to be on the timer list,
515 * take it out
516 */
517 if (TIMER_IS_ON_LIST(tp)) {
518 tcp_remove_timer(tp);
519 }
520
521 if (inp->inp_state != INPCB_STATE_DEAD) {
522 #if INET6
523 if (SOCK_CHECK_DOM(so, PF_INET6))
524 in6_pcbdetach(inp);
525 else
526 #endif /* INET6 */
527 in_pcbdetach(inp);
528 }
529 in_pcbdispose(inp);
530 return (FALSE);
531 }
532
533 lck_mtx_unlock(&inp->inpcb_mtx);
534 return (TRUE);
535 }
536
537 /*
538 * TCP garbage collector callback (inpcb_timer_func_t).
539 *
540 * Returns the number of pcbs that will need to be gc-ed soon,
541 * returnining > 0 will keep timer active.
542 */
543 void
544 tcp_gc(struct inpcbinfo *ipi)
545 {
546 struct inpcb *inp, *nxt;
547 struct tcpcb *tw_tp, *tw_ntp;
548 #if TCPDEBUG
549 int ostate;
550 #endif
551 #if KDEBUG
552 static int tws_checked = 0;
553 #endif
554
555 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_START, 0, 0, 0, 0, 0);
556
557 /*
558 * Update tcp_now here as it may get used while
559 * processing the slow timer.
560 */
561 calculate_tcp_clock();
562
563 /*
564 * Garbage collect socket/tcpcb: We need to acquire the list lock
565 * exclusively to do this
566 */
567
568 if (lck_rw_try_lock_exclusive(ipi->ipi_lock) == FALSE) {
569 /* don't sweat it this time; cleanup was done last time */
570 if (tcp_gc_done == TRUE) {
571 tcp_gc_done = FALSE;
572 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END,
573 tws_checked, cur_tw_slot, 0, 0, 0);
574 /* Lock upgrade failed, give up this round */
575 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
576 return;
577 }
578 /* Upgrade failed, lost lock now take it again exclusive */
579 lck_rw_lock_exclusive(ipi->ipi_lock);
580 }
581 tcp_gc_done = TRUE;
582
583 LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
584 if (tcp_garbage_collect(inp, 0))
585 atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
586 }
587
588 /* Now cleanup the time wait ones */
589 TAILQ_FOREACH_SAFE(tw_tp, &tcp_tw_tailq, t_twentry, tw_ntp) {
590 /*
591 * We check the timestamp here without holding the
592 * socket lock for better performance. If there are
593 * any pcbs in time-wait, the timer will get rescheduled.
594 * Hence some error in this check can be tolerated.
595 *
596 * Sometimes a socket on time-wait queue can be closed if
597 * 2MSL timer expired but the application still has a
598 * usecount on it.
599 */
600 if (tw_tp->t_state == TCPS_CLOSED ||
601 TSTMP_GEQ(tcp_now, tw_tp->t_timer[TCPT_2MSL])) {
602 if (tcp_garbage_collect(tw_tp->t_inpcb, 1))
603 atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1);
604 }
605 }
606
607 /* take into account pcbs that are still in time_wait_slots */
608 atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, ipi->ipi_twcount);
609
610 lck_rw_done(ipi->ipi_lock);
611
612 /* Clean up the socache while we are here */
613 if (so_cache_timer())
614 atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1);
615
616 KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END, tws_checked,
617 cur_tw_slot, 0, 0, 0);
618
619 return;
620 }
621
622 /*
623 * Cancel all timers for TCP tp.
624 */
625 void
626 tcp_canceltimers(tp)
627 struct tcpcb *tp;
628 {
629 register int i;
630
631 tcp_remove_timer(tp);
632 for (i = 0; i < TCPT_NTIMERS; i++)
633 tp->t_timer[i] = 0;
634 tp->tentry.timer_start = tcp_now;
635 tp->tentry.index = TCPT_NONE;
636 }
637
638 int tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] =
639 { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 };
640
641 int tcp_backoff[TCP_MAXRXTSHIFT + 1] =
642 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
643
644 static int tcp_totbackoff = 511; /* sum of tcp_backoff[] */
645
646 void tcp_rexmt_save_state(struct tcpcb *tp)
647 {
648 u_int32_t fsize;
649 if (TSTMP_SUPPORTED(tp)) {
650 /*
651 * Since timestamps are supported on the connection,
652 * we can do recovery as described in rfc 4015.
653 */
654 fsize = tp->snd_max - tp->snd_una;
655 tp->snd_ssthresh_prev = max(fsize, tp->snd_ssthresh);
656 tp->snd_recover_prev = tp->snd_recover;
657 } else {
658 /*
659 * Timestamp option is not supported on this connection.
660 * Record ssthresh and cwnd so they can
661 * be recovered if this turns out to be a "bad" retransmit.
662 * A retransmit is considered "bad" if an ACK for this
663 * segment is received within RTT/2 interval; the assumption
664 * here is that the ACK was already in flight. See
665 * "On Estimating End-to-End Network Path Properties" by
666 * Allman and Paxson for more details.
667 */
668 tp->snd_cwnd_prev = tp->snd_cwnd;
669 tp->snd_ssthresh_prev = tp->snd_ssthresh;
670 tp->snd_recover_prev = tp->snd_recover;
671 if (IN_FASTRECOVERY(tp))
672 tp->t_flags |= TF_WASFRECOVERY;
673 else
674 tp->t_flags &= ~TF_WASFRECOVERY;
675 }
676 tp->t_srtt_prev = (tp->t_srtt >> TCP_RTT_SHIFT) + 2;
677 tp->t_rttvar_prev = (tp->t_rttvar >> TCP_RTTVAR_SHIFT);
678 tp->t_flagsext &= ~(TF_RECOMPUTE_RTT);
679 }
680
681 /*
682 * Revert to the older segment size if there is an indication that PMTU
683 * blackhole detection was not needed.
684 */
685 void tcp_pmtud_revert_segment_size(struct tcpcb *tp)
686 {
687 int32_t optlen;
688
689 VERIFY(tp->t_pmtud_saved_maxopd > 0);
690 tp->t_flags |= TF_PMTUD;
691 tp->t_flags &= ~TF_BLACKHOLE;
692 optlen = tp->t_maxopd - tp->t_maxseg;
693 tp->t_maxopd = tp->t_pmtud_saved_maxopd;
694 tp->t_maxseg = tp->t_maxopd - optlen;
695 /*
696 * Reset the slow-start flight size as it
697 * may depend on the new MSS
698 */
699 if (CC_ALGO(tp)->cwnd_init != NULL)
700 CC_ALGO(tp)->cwnd_init(tp);
701 tp->t_pmtud_start_ts = 0;
702 tcpstat.tcps_pmtudbh_reverted++;
703 }
704
705 /*
706 * TCP timer processing.
707 */
708 struct tcpcb *
709 tcp_timers(tp, timer)
710 register struct tcpcb *tp;
711 int timer;
712 {
713 int32_t rexmt, optlen = 0, idle_time = 0;
714 struct socket *so;
715 struct tcptemp *t_template;
716 #if TCPDEBUG
717 int ostate;
718 #endif
719
720 #if INET6
721 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV4) == 0;
722 #endif /* INET6 */
723 u_int64_t accsleep_ms;
724 u_int32_t last_sleep_ms = 0;
725
726 so = tp->t_inpcb->inp_socket;
727 idle_time = tcp_now - tp->t_rcvtime;
728
729 switch (timer) {
730
731 /*
732 * 2 MSL timeout in shutdown went off. If we're closed but
733 * still waiting for peer to close and connection has been idle
734 * too long, or if 2MSL time is up from TIME_WAIT or FIN_WAIT_2,
735 * delete connection control block.
736 * Otherwise, (this case shouldn't happen) check again in a bit
737 * we keep the socket in the main list in that case.
738 */
739 case TCPT_2MSL:
740 tcp_free_sackholes(tp);
741 if (tp->t_state != TCPS_TIME_WAIT &&
742 tp->t_state != TCPS_FIN_WAIT_2 &&
743 ((idle_time > 0) && (idle_time < TCP_CONN_MAXIDLE(tp)))) {
744 tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp,
745 (u_int32_t)TCP_CONN_KEEPINTVL(tp));
746 } else {
747 tp = tcp_close(tp);
748 return(tp);
749 }
750 break;
751
752 /*
753 * Retransmission timer went off. Message has not
754 * been acked within retransmit interval. Back off
755 * to a longer retransmit interval and retransmit one segment.
756 */
757 case TCPT_REXMT:
758 accsleep_ms = mach_absolutetime_asleep / 1000000UL;
759 if (accsleep_ms > tp->t_accsleep_ms)
760 last_sleep_ms = accsleep_ms - tp->t_accsleep_ms;
761 /*
762 * Drop a connection in the retransmit timer
763 * 1. If we have retransmitted more than TCP_MAXRXTSHIFT
764 * times
765 * 2. If the time spent in this retransmission episode is
766 * more than the time limit set with TCP_RXT_CONNDROPTIME
767 * socket option
768 * 3. If TCP_RXT_FINDROP socket option was set and
769 * we have already retransmitted the FIN 3 times without
770 * receiving an ack
771 */
772 if (++tp->t_rxtshift > TCP_MAXRXTSHIFT ||
773 (tp->t_rxt_conndroptime > 0 && tp->t_rxtstart > 0 &&
774 (tcp_now - tp->t_rxtstart) >= tp->t_rxt_conndroptime) ||
775 ((tp->t_flagsext & TF_RXTFINDROP) != 0 &&
776 (tp->t_flags & TF_SENTFIN) != 0 && tp->t_rxtshift >= 4) ||
777 (tp->t_rxtshift > 4 && last_sleep_ms >= TCP_SLEEP_TOO_LONG)) {
778 if ((tp->t_flagsext & TF_RXTFINDROP) != 0) {
779 tcpstat.tcps_rxtfindrop++;
780 } else if (last_sleep_ms >= TCP_SLEEP_TOO_LONG) {
781 tcpstat.tcps_drop_after_sleep++;
782 } else {
783 tcpstat.tcps_timeoutdrop++;
784 }
785 if (tp->t_rxtshift >= TCP_MAXRXTSHIFT) {
786 if (TCP_ECN_ENABLED(tp)) {
787 INP_INC_IFNET_STAT(tp->t_inpcb,
788 ecn_on.rxmit_drop);
789 } else {
790 INP_INC_IFNET_STAT(tp->t_inpcb,
791 ecn_off.rxmit_drop);
792 }
793 }
794 tp->t_rxtshift = TCP_MAXRXTSHIFT;
795 postevent(so, 0, EV_TIMEOUT);
796 soevent(so,
797 (SO_FILT_HINT_LOCKED|SO_FILT_HINT_TIMEOUT));
798 tp = tcp_drop(tp, tp->t_softerror ?
799 tp->t_softerror : ETIMEDOUT);
800
801 break;
802 }
803
804 tcpstat.tcps_rexmttimeo++;
805 tp->t_accsleep_ms = accsleep_ms;
806
807 if (tp->t_rxtshift == 1 &&
808 tp->t_state == TCPS_ESTABLISHED) {
809 /* Set the time at which retransmission started. */
810 tp->t_rxtstart = tcp_now;
811
812 /*
813 * if this is the first retransmit timeout, save
814 * the state so that we can recover if the timeout
815 * is spurious.
816 */
817 tcp_rexmt_save_state(tp);
818 }
819 #if MPTCP
820 if ((tp->t_rxtshift >= mptcp_fail_thresh) &&
821 (tp->t_state == TCPS_ESTABLISHED) &&
822 (tp->t_mpflags & TMPF_MPTCP_TRUE)) {
823 mptcp_act_on_txfail(so);
824
825 }
826 #endif /* MPTCP */
827
828 if (tp->t_adaptive_wtimo > 0 &&
829 tp->t_rxtshift > tp->t_adaptive_wtimo &&
830 TCPS_HAVEESTABLISHED(tp->t_state)) {
831 /* Send an event to the application */
832 soevent(so,
833 (SO_FILT_HINT_LOCKED|
834 SO_FILT_HINT_ADAPTIVE_WTIMO));
835 }
836
837 /*
838 * If this is a retransmit timeout after PTO, the PTO
839 * was not effective
840 */
841 if (tp->t_flagsext & TF_SENT_TLPROBE) {
842 tp->t_flagsext &= ~(TF_SENT_TLPROBE);
843 tcpstat.tcps_rto_after_pto++;
844 }
845
846 if (tp->t_flagsext & TF_DELAY_RECOVERY) {
847 /*
848 * Retransmit timer fired before entering recovery
849 * on a connection with packet re-ordering. This
850 * suggests that the reordering metrics computed
851 * are not accurate.
852 */
853 tp->t_reorderwin = 0;
854 tp->t_timer[TCPT_DELAYFR] = 0;
855 tp->t_flagsext &= ~(TF_DELAY_RECOVERY);
856 }
857
858 if (tp->t_state == TCPS_SYN_RECEIVED)
859 tcp_disable_tfo(tp);
860
861 if ((tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) &&
862 !(tp->t_tfo_flags & TFO_F_NO_SNDPROBING) &&
863 ((tp->t_state != TCPS_SYN_SENT && tp->t_rxtshift > 1) ||
864 tp->t_rxtshift > 2)) {
865 /*
866 * For regular retransmissions, a first one is being
867 * done for tail-loss probe.
868 * Thus, if rxtshift > 1, this means we have sent the segment
869 * a total of 3 times.
870 *
871 * If we are in SYN-SENT state, then there is no tail-loss
872 * probe thus we have to let rxtshift go up to 3.
873 */
874 tcp_heuristic_tfo_middlebox(tp);
875
876 so->so_error = ENODATA;
877 sorwakeup(so);
878 sowwakeup(so);
879 }
880
881 if (tp->t_state == TCPS_SYN_SENT) {
882 rexmt = TCP_REXMTVAL(tp) * tcp_syn_backoff[tp->t_rxtshift];
883 tp->t_stat.synrxtshift = tp->t_rxtshift;
884
885 /* When retransmitting, disable TFO */
886 if (tfo_enabled(tp)) {
887 tp->t_flagsext &= ~TF_FASTOPEN;
888 tp->t_tfo_flags |= TFO_F_SYN_LOSS;
889
890 tp->t_tfo_stats |= TFO_S_SYN_LOSS;
891 tcpstat.tcps_tfo_syn_loss++;
892 }
893 } else {
894 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
895 }
896
897 TCPT_RANGESET(tp->t_rxtcur, rexmt,
898 tp->t_rttmin, TCPTV_REXMTMAX,
899 TCP_ADD_REXMTSLOP(tp));
900 tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur);
901
902 if (INP_WAIT_FOR_IF_FEEDBACK(tp->t_inpcb))
903 goto fc_output;
904
905 tcp_free_sackholes(tp);
906 /*
907 * Check for potential Path MTU Discovery Black Hole
908 */
909 if (tcp_pmtud_black_hole_detect &&
910 !(tp->t_flagsext & TF_NOBLACKHOLE_DETECTION) &&
911 (tp->t_state == TCPS_ESTABLISHED)) {
912 if ((tp->t_flags & TF_PMTUD) &&
913 ((tp->t_flags & TF_MAXSEGSNT)
914 || tp->t_pmtud_lastseg_size > tcp_pmtud_black_hole_mss) &&
915 tp->t_rxtshift == 2) {
916 /*
917 * Enter Path MTU Black-hole Detection mechanism:
918 * - Disable Path MTU Discovery (IP "DF" bit).
919 * - Reduce MTU to lower value than what we
920 * negotiated with the peer.
921 */
922 /* Disable Path MTU Discovery for now */
923 tp->t_flags &= ~TF_PMTUD;
924 /* Record that we may have found a black hole */
925 tp->t_flags |= TF_BLACKHOLE;
926 optlen = tp->t_maxopd - tp->t_maxseg;
927 /* Keep track of previous MSS */
928 tp->t_pmtud_saved_maxopd = tp->t_maxopd;
929 tp->t_pmtud_start_ts = tcp_now;
930 if (tp->t_pmtud_start_ts == 0)
931 tp->t_pmtud_start_ts++;
932 /* Reduce the MSS to intermediary value */
933 if (tp->t_maxopd > tcp_pmtud_black_hole_mss) {
934 tp->t_maxopd = tcp_pmtud_black_hole_mss;
935 } else {
936 tp->t_maxopd = /* use the default MSS */
937 #if INET6
938 isipv6 ? tcp_v6mssdflt :
939 #endif /* INET6 */
940 tcp_mssdflt;
941 }
942 tp->t_maxseg = tp->t_maxopd - optlen;
943
944 /*
945 * Reset the slow-start flight size
946 * as it may depend on the new MSS
947 */
948 if (CC_ALGO(tp)->cwnd_init != NULL)
949 CC_ALGO(tp)->cwnd_init(tp);
950 }
951 /*
952 * If further retransmissions are still
953 * unsuccessful with a lowered MTU, maybe this
954 * isn't a Black Hole and we restore the previous
955 * MSS and blackhole detection flags.
956 */
957 else {
958
959 if ((tp->t_flags & TF_BLACKHOLE) &&
960 (tp->t_rxtshift > 4)) {
961 tcp_pmtud_revert_segment_size(tp);
962 }
963 }
964 }
965
966
967 /*
968 * Disable rfc1323 and rfc1644 if we haven't got any
969 * response to our SYN (after we reach the threshold)
970 * to work-around some broken terminal servers (most of
971 * which have hopefully been retired) that have bad VJ
972 * header compression code which trashes TCP segments
973 * containing unknown-to-them TCP options.
974 * Do this only on non-local connections.
975 */
976 if (tp->t_state == TCPS_SYN_SENT &&
977 tp->t_rxtshift == tcp_broken_peer_syn_rxmit_thres)
978 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_REQ_CC);
979
980 /*
981 * If losing, let the lower level know and try for
982 * a better route. Also, if we backed off this far,
983 * our srtt estimate is probably bogus. Clobber it
984 * so we'll take the next rtt measurement as our srtt;
985 * move the current srtt into rttvar to keep the current
986 * retransmit times until then.
987 */
988 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
989 #if INET6
990 if (isipv6)
991 in6_losing(tp->t_inpcb);
992 else
993 #endif /* INET6 */
994 in_losing(tp->t_inpcb);
995 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
996 tp->t_srtt = 0;
997 }
998 tp->snd_nxt = tp->snd_una;
999 /*
1000 * Note: We overload snd_recover to function also as the
1001 * snd_last variable described in RFC 2582
1002 */
1003 tp->snd_recover = tp->snd_max;
1004 /*
1005 * Force a segment to be sent.
1006 */
1007 tp->t_flags |= TF_ACKNOW;
1008
1009 /* If timing a segment in this window, stop the timer */
1010 tp->t_rtttime = 0;
1011
1012 if (!IN_FASTRECOVERY(tp) && tp->t_rxtshift == 1)
1013 tcpstat.tcps_tailloss_rto++;
1014
1015
1016 /*
1017 * RFC 5681 says: when a TCP sender detects segment loss
1018 * using retransmit timer and the given segment has already
1019 * been retransmitted by way of the retransmission timer at
1020 * least once, the value of ssthresh is held constant
1021 */
1022 if (tp->t_rxtshift == 1 &&
1023 CC_ALGO(tp)->after_timeout != NULL) {
1024 CC_ALGO(tp)->after_timeout(tp);
1025 /*
1026 * CWR notifications are to be sent on new data
1027 * right after Fast Retransmits and ECE
1028 * notification receipts.
1029 */
1030 if (TCP_ECN_ENABLED(tp))
1031 tp->ecn_flags |= TE_SENDCWR;
1032 }
1033
1034 EXIT_FASTRECOVERY(tp);
1035
1036 /* Exit cwnd non validated phase */
1037 tp->t_flagsext &= ~TF_CWND_NONVALIDATED;
1038
1039
1040 fc_output:
1041 tcp_ccdbg_trace(tp, NULL, TCP_CC_REXMT_TIMEOUT);
1042
1043 (void) tcp_output(tp);
1044 break;
1045
1046 /*
1047 * Persistance timer into zero window.
1048 * Force a byte to be output, if possible.
1049 */
1050 case TCPT_PERSIST:
1051 tcpstat.tcps_persisttimeo++;
1052 /*
1053 * Hack: if the peer is dead/unreachable, we do not
1054 * time out if the window is closed. After a full
1055 * backoff, drop the connection if the idle time
1056 * (no responses to probes) reaches the maximum
1057 * backoff that we would use if retransmitting.
1058 *
1059 * Drop the connection if we reached the maximum allowed time for
1060 * Zero Window Probes without a non-zero update from the peer.
1061 * See rdar://5805356
1062 */
1063 if ((tp->t_rxtshift == TCP_MAXRXTSHIFT &&
1064 (idle_time >= tcp_maxpersistidle ||
1065 idle_time >= TCP_REXMTVAL(tp) * tcp_totbackoff)) ||
1066 ((tp->t_persist_stop != 0) &&
1067 TSTMP_LEQ(tp->t_persist_stop, tcp_now))) {
1068 tcpstat.tcps_persistdrop++;
1069 postevent(so, 0, EV_TIMEOUT);
1070 soevent(so,
1071 (SO_FILT_HINT_LOCKED|SO_FILT_HINT_TIMEOUT));
1072 tp = tcp_drop(tp, ETIMEDOUT);
1073 break;
1074 }
1075 tcp_setpersist(tp);
1076 tp->t_flagsext |= TF_FORCE;
1077 (void) tcp_output(tp);
1078 tp->t_flagsext &= ~TF_FORCE;
1079 break;
1080
1081 /*
1082 * Keep-alive timer went off; send something
1083 * or drop connection if idle for too long.
1084 */
1085 case TCPT_KEEP:
1086 tcpstat.tcps_keeptimeo++;
1087 #if MPTCP
1088 /*
1089 * Regular TCP connections do not send keepalives after closing
1090 * MPTCP must not also, after sending Data FINs.
1091 */
1092 struct mptcb *mp_tp = tp->t_mptcb;
1093 if ((tp->t_mpflags & TMPF_MPTCP_TRUE) &&
1094 (tp->t_state > TCPS_ESTABLISHED)) {
1095 goto dropit;
1096 } else if (mp_tp != NULL) {
1097 if ((mptcp_ok_to_keepalive(mp_tp) == 0))
1098 goto dropit;
1099 }
1100 #endif /* MPTCP */
1101 if (tp->t_state < TCPS_ESTABLISHED)
1102 goto dropit;
1103 if ((always_keepalive ||
1104 (tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) ||
1105 (tp->t_flagsext & TF_DETECT_READSTALL) ||
1106 (tp->t_tfo_probe_state == TFO_PROBE_PROBING)) &&
1107 (tp->t_state <= TCPS_CLOSING || tp->t_state == TCPS_FIN_WAIT_2)) {
1108 if (idle_time >= TCP_CONN_KEEPIDLE(tp) + TCP_CONN_MAXIDLE(tp))
1109 goto dropit;
1110 /*
1111 * Send a packet designed to force a response
1112 * if the peer is up and reachable:
1113 * either an ACK if the connection is still alive,
1114 * or an RST if the peer has closed the connection
1115 * due to timeout or reboot.
1116 * Using sequence number tp->snd_una-1
1117 * causes the transmitted zero-length segment
1118 * to lie outside the receive window;
1119 * by the protocol spec, this requires the
1120 * correspondent TCP to respond.
1121 */
1122 tcpstat.tcps_keepprobe++;
1123 t_template = tcp_maketemplate(tp);
1124 if (t_template) {
1125 struct inpcb *inp = tp->t_inpcb;
1126 struct tcp_respond_args tra;
1127
1128 bzero(&tra, sizeof(tra));
1129 tra.nocell = INP_NO_CELLULAR(inp);
1130 tra.noexpensive = INP_NO_EXPENSIVE(inp);
1131 tra.awdl_unrestricted = INP_AWDL_UNRESTRICTED(inp);
1132 if (tp->t_inpcb->inp_flags & INP_BOUND_IF)
1133 tra.ifscope = tp->t_inpcb->inp_boundifp->if_index;
1134 else
1135 tra.ifscope = IFSCOPE_NONE;
1136 tcp_respond(tp, t_template->tt_ipgen,
1137 &t_template->tt_t, (struct mbuf *)NULL,
1138 tp->rcv_nxt, tp->snd_una - 1, 0, &tra);
1139 (void) m_free(dtom(t_template));
1140 if (tp->t_flagsext & TF_DETECT_READSTALL)
1141 tp->t_rtimo_probes++;
1142 }
1143 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
1144 TCP_CONN_KEEPINTVL(tp));
1145 } else {
1146 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
1147 TCP_CONN_KEEPIDLE(tp));
1148 }
1149 if (tp->t_flagsext & TF_DETECT_READSTALL) {
1150 struct ifnet *outifp = tp->t_inpcb->inp_last_outifp;
1151 bool reenable_probe = false;
1152 /*
1153 * The keep alive packets sent to detect a read
1154 * stall did not get a response from the
1155 * peer. Generate more keep-alives to confirm this.
1156 * If the number of probes sent reaches the limit,
1157 * generate an event.
1158 */
1159 if (tp->t_adaptive_rtimo > 0) {
1160 if (tp->t_rtimo_probes > tp->t_adaptive_rtimo) {
1161 /* Generate an event */
1162 soevent(so,
1163 (SO_FILT_HINT_LOCKED |
1164 SO_FILT_HINT_ADAPTIVE_RTIMO));
1165 tcp_keepalive_reset(tp);
1166 } else {
1167 reenable_probe = true;
1168 }
1169 } else if (outifp != NULL &&
1170 (outifp->if_eflags & IFEF_PROBE_CONNECTIVITY) &&
1171 tp->t_rtimo_probes <= TCP_CONNECTIVITY_PROBES_MAX) {
1172 reenable_probe = true;
1173 } else {
1174 tp->t_flagsext &= ~TF_DETECT_READSTALL;
1175 }
1176 if (reenable_probe) {
1177 int ind = min(tp->t_rtimo_probes,
1178 TCP_MAXRXTSHIFT);
1179 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(
1180 tp, tcp_backoff[ind] * TCP_REXMTVAL(tp));
1181 }
1182 }
1183 if (tp->t_tfo_probe_state == TFO_PROBE_PROBING) {
1184 int ind;
1185
1186 tp->t_tfo_probes++;
1187 ind = min(tp->t_tfo_probes, TCP_MAXRXTSHIFT);
1188
1189 /*
1190 * We take the minimum among the time set by true
1191 * keepalive (see above) and the backoff'd RTO. That
1192 * way we backoff in case of packet-loss but will never
1193 * timeout slower than regular keepalive due to the
1194 * backing off.
1195 */
1196 tp->t_timer[TCPT_KEEP] = min(OFFSET_FROM_START(
1197 tp, tcp_backoff[ind] * TCP_REXMTVAL(tp)),
1198 tp->t_timer[TCPT_KEEP]);
1199 } else if (tp->t_tfo_probe_state == TFO_PROBE_WAIT_DATA) {
1200 /* Still no data! Let's assume a TFO-error and err out... */
1201 tcp_heuristic_tfo_middlebox(tp);
1202
1203 so->so_error = ENODATA;
1204 sorwakeup(so);
1205 tcpstat.tcps_tfo_blackhole++;
1206 }
1207 break;
1208 case TCPT_DELACK:
1209 if (tcp_delack_enabled && (tp->t_flags & TF_DELACK)) {
1210 tp->t_flags &= ~TF_DELACK;
1211 tp->t_timer[TCPT_DELACK] = 0;
1212 tp->t_flags |= TF_ACKNOW;
1213
1214 /*
1215 * If delayed ack timer fired while stretching
1216 * acks, count the number of times the streaming
1217 * detection was not correct. If this exceeds a
1218 * threshold, disable strech ack on this
1219 * connection
1220 *
1221 * Also, go back to acking every other packet.
1222 */
1223 if ((tp->t_flags & TF_STRETCHACK)) {
1224 if (tp->t_unacksegs > 1 &&
1225 tp->t_unacksegs < maxseg_unacked)
1226 tp->t_stretchack_delayed++;
1227
1228 if (tp->t_stretchack_delayed >
1229 TCP_STRETCHACK_DELAY_THRESHOLD) {
1230 tp->t_flagsext |= TF_DISABLE_STRETCHACK;
1231 /*
1232 * Note the time at which stretch
1233 * ack was disabled automatically
1234 */
1235 tp->rcv_nostrack_ts = tcp_now;
1236 tcpstat.tcps_nostretchack++;
1237 tp->t_stretchack_delayed = 0;
1238 }
1239 tcp_reset_stretch_ack(tp);
1240 }
1241
1242 /*
1243 * If we are measuring inter packet arrival jitter
1244 * for throttling a connection, this delayed ack
1245 * might be the reason for accumulating some
1246 * jitter. So let's restart the measurement.
1247 */
1248 CLEAR_IAJ_STATE(tp);
1249
1250 tcpstat.tcps_delack++;
1251 (void) tcp_output(tp);
1252 }
1253 break;
1254
1255 #if MPTCP
1256 case TCPT_JACK_RXMT:
1257 if ((tp->t_state == TCPS_ESTABLISHED) &&
1258 (tp->t_mpflags & TMPF_PREESTABLISHED) &&
1259 (tp->t_mpflags & TMPF_JOINED_FLOW)) {
1260 if (++tp->t_mprxtshift > TCP_MAXRXTSHIFT) {
1261 tcpstat.tcps_timeoutdrop++;
1262 postevent(so, 0, EV_TIMEOUT);
1263 soevent(so,
1264 (SO_FILT_HINT_LOCKED|
1265 SO_FILT_HINT_TIMEOUT));
1266 tp = tcp_drop(tp, tp->t_softerror ?
1267 tp->t_softerror : ETIMEDOUT);
1268 break;
1269 }
1270 tcpstat.tcps_join_rxmts++;
1271 tp->t_flags |= TF_ACKNOW;
1272
1273 /*
1274 * No backoff is implemented for simplicity for this
1275 * corner case.
1276 */
1277 (void) tcp_output(tp);
1278 }
1279 break;
1280 #endif /* MPTCP */
1281
1282 case TCPT_PTO:
1283 {
1284 int32_t snd_len;
1285 tp->t_flagsext &= ~(TF_SENT_TLPROBE);
1286
1287 /*
1288 * Check if the connection is in the right state to
1289 * send a probe
1290 */
1291 if (tp->t_state != TCPS_ESTABLISHED ||
1292 (tp->t_rxtshift > 0 && !(tp->t_flagsext & TF_PROBING))
1293 || tp->snd_max == tp->snd_una ||
1294 !SACK_ENABLED(tp) || !TAILQ_EMPTY(&tp->snd_holes) ||
1295 IN_FASTRECOVERY(tp))
1296 break;
1297
1298 /*
1299 * If there is no new data to send or if the
1300 * connection is limited by receive window then
1301 * retransmit the last segment, otherwise send
1302 * new data.
1303 */
1304 snd_len = min(so->so_snd.sb_cc, tp->snd_wnd)
1305 - (tp->snd_max - tp->snd_una);
1306 if (snd_len > 0) {
1307 tp->snd_nxt = tp->snd_max;
1308 } else {
1309 snd_len = min((tp->snd_max - tp->snd_una),
1310 tp->t_maxseg);
1311 tp->snd_nxt = tp->snd_max - snd_len;
1312 }
1313
1314 tcpstat.tcps_pto++;
1315 if (tp->t_flagsext & TF_PROBING)
1316 tcpstat.tcps_probe_if++;
1317
1318 /* If timing a segment in this window, stop the timer */
1319 tp->t_rtttime = 0;
1320 /* Note that tail loss probe is being sent */
1321 tp->t_flagsext |= TF_SENT_TLPROBE;
1322 tp->t_tlpstart = tcp_now;
1323
1324 tp->snd_cwnd += tp->t_maxseg;
1325 (void )tcp_output(tp);
1326 tp->snd_cwnd -= tp->t_maxseg;
1327
1328 tp->t_tlphighrxt = tp->snd_nxt;
1329 break;
1330 }
1331 case TCPT_DELAYFR:
1332 tp->t_flagsext &= ~TF_DELAY_RECOVERY;
1333
1334 /*
1335 * Don't do anything if one of the following is true:
1336 * - the connection is already in recovery
1337 * - sequence until snd_recover has been acknowledged.
1338 * - retransmit timeout has fired
1339 */
1340 if (IN_FASTRECOVERY(tp) ||
1341 SEQ_GEQ(tp->snd_una, tp->snd_recover) ||
1342 tp->t_rxtshift > 0)
1343 break;
1344
1345 VERIFY(SACK_ENABLED(tp));
1346 tcp_rexmt_save_state(tp);
1347 if (CC_ALGO(tp)->pre_fr != NULL) {
1348 CC_ALGO(tp)->pre_fr(tp);
1349 if (TCP_ECN_ENABLED(tp))
1350 tp->ecn_flags |= TE_SENDCWR;
1351 }
1352 ENTER_FASTRECOVERY(tp);
1353
1354 tp->t_timer[TCPT_REXMT] = 0;
1355 tcpstat.tcps_sack_recovery_episode++;
1356 tp->t_sack_recovery_episode++;
1357 tp->sack_newdata = tp->snd_nxt;
1358 tp->snd_cwnd = tp->t_maxseg;
1359 tcp_ccdbg_trace(tp, NULL, TCP_CC_ENTER_FASTRECOVERY);
1360 (void) tcp_output(tp);
1361 break;
1362 dropit:
1363 tcpstat.tcps_keepdrops++;
1364 postevent(so, 0, EV_TIMEOUT);
1365 soevent(so,
1366 (SO_FILT_HINT_LOCKED|SO_FILT_HINT_TIMEOUT));
1367 tp = tcp_drop(tp, ETIMEDOUT);
1368 break;
1369 }
1370 #if TCPDEBUG
1371 if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)
1372 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
1373 PRU_SLOWTIMO);
1374 #endif
1375 return (tp);
1376 }
1377
1378 /* Remove a timer entry from timer list */
1379 void
1380 tcp_remove_timer(struct tcpcb *tp)
1381 {
1382 struct tcptimerlist *listp = &tcp_timer_list;
1383
1384 lck_mtx_assert(&tp->t_inpcb->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
1385 if (!(TIMER_IS_ON_LIST(tp))) {
1386 return;
1387 }
1388 lck_mtx_lock(listp->mtx);
1389
1390 /* Check if pcb is on timer list again after acquiring the lock */
1391 if (!(TIMER_IS_ON_LIST(tp))) {
1392 lck_mtx_unlock(listp->mtx);
1393 return;
1394 }
1395
1396 if (listp->next_te != NULL && listp->next_te == &tp->tentry)
1397 listp->next_te = LIST_NEXT(&tp->tentry, le);
1398
1399 LIST_REMOVE(&tp->tentry, le);
1400 tp->t_flags &= ~(TF_TIMER_ONLIST);
1401
1402 listp->entries--;
1403
1404 tp->tentry.le.le_next = NULL;
1405 tp->tentry.le.le_prev = NULL;
1406 lck_mtx_unlock(listp->mtx);
1407 }
1408
1409 /*
1410 * Function to check if the timerlist needs to be rescheduled to run
1411 * the timer entry correctly. Basically, this is to check if we can avoid
1412 * taking the list lock.
1413 */
1414
1415 static boolean_t
1416 need_to_resched_timerlist(u_int32_t runtime, u_int16_t mode)
1417 {
1418 struct tcptimerlist *listp = &tcp_timer_list;
1419 int32_t diff;
1420
1421 /*
1422 * If the list is being processed then the state of the list is
1423 * in flux. In this case always acquire the lock and set the state
1424 * correctly.
1425 */
1426 if (listp->running)
1427 return (TRUE);
1428
1429 if (!listp->scheduled)
1430 return (TRUE);
1431
1432 diff = timer_diff(listp->runtime, 0, runtime, 0);
1433 if (diff <= 0) {
1434 /* The list is going to run before this timer */
1435 return (FALSE);
1436 } else {
1437 if (mode & TCP_TIMERLIST_10MS_MODE) {
1438 if (diff <= TCP_TIMER_10MS_QUANTUM)
1439 return (FALSE);
1440 } else if (mode & TCP_TIMERLIST_100MS_MODE) {
1441 if (diff <= TCP_TIMER_100MS_QUANTUM)
1442 return (FALSE);
1443 } else {
1444 if (diff <= TCP_TIMER_500MS_QUANTUM)
1445 return (FALSE);
1446 }
1447 }
1448 return (TRUE);
1449 }
1450
1451 void
1452 tcp_sched_timerlist(uint32_t offset)
1453 {
1454 uint64_t deadline = 0;
1455 struct tcptimerlist *listp = &tcp_timer_list;
1456
1457 lck_mtx_assert(listp->mtx, LCK_MTX_ASSERT_OWNED);
1458
1459 offset = min(offset, TCP_TIMERLIST_MAX_OFFSET);
1460 listp->runtime = tcp_now + offset;
1461 if (listp->runtime == 0) {
1462 listp->runtime++;
1463 offset++;
1464 }
1465
1466 clock_interval_to_deadline(offset, USEC_PER_SEC, &deadline);
1467
1468 thread_call_enter_delayed(listp->call, deadline);
1469 listp->scheduled = TRUE;
1470 }
1471
1472 /*
1473 * Function to run the timers for a connection.
1474 *
1475 * Returns the offset of next timer to be run for this connection which
1476 * can be used to reschedule the timerlist.
1477 *
1478 * te_mode is an out parameter that indicates the modes of active
1479 * timers for this connection.
1480 */
1481 u_int32_t
1482 tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode,
1483 u_int16_t probe_if_index)
1484 {
1485 struct socket *so;
1486 u_int16_t i = 0, index = TCPT_NONE, lo_index = TCPT_NONE;
1487 u_int32_t timer_val, offset = 0, lo_timer = 0;
1488 int32_t diff;
1489 boolean_t needtorun[TCPT_NTIMERS];
1490 int count = 0;
1491
1492 VERIFY(tp != NULL);
1493 bzero(needtorun, sizeof(needtorun));
1494 *te_mode = 0;
1495
1496 tcp_lock(tp->t_inpcb->inp_socket, 1, 0);
1497
1498 so = tp->t_inpcb->inp_socket;
1499 /* Release the want count on inp */
1500 if (in_pcb_checkstate(tp->t_inpcb, WNT_RELEASE, 1)
1501 == WNT_STOPUSING) {
1502 if (TIMER_IS_ON_LIST(tp)) {
1503 tcp_remove_timer(tp);
1504 }
1505
1506 /* Looks like the TCP connection got closed while we
1507 * were waiting for the lock.. Done
1508 */
1509 goto done;
1510 }
1511
1512 /*
1513 * If this connection is over an interface that needs to
1514 * be probed, send probe packets to reinitiate communication.
1515 */
1516 if (probe_if_index > 0 && tp->t_inpcb->inp_last_outifp != NULL &&
1517 tp->t_inpcb->inp_last_outifp->if_index == probe_if_index) {
1518 tp->t_flagsext |= TF_PROBING;
1519 tcp_timers(tp, TCPT_PTO);
1520 tp->t_timer[TCPT_PTO] = 0;
1521 tp->t_flagsext &= TF_PROBING;
1522 }
1523
1524 /*
1525 * Since the timer thread needs to wait for tcp lock, it may race
1526 * with another thread that can cancel or reschedule the timer
1527 * that is about to run. Check if we need to run anything.
1528 */
1529 if ((index = tp->tentry.index) == TCPT_NONE)
1530 goto done;
1531
1532 timer_val = tp->t_timer[index];
1533
1534 diff = timer_diff(tp->tentry.runtime, 0, tcp_now, 0);
1535 if (diff > 0) {
1536 if (tp->tentry.index != TCPT_NONE) {
1537 offset = diff;
1538 *(te_mode) = tp->tentry.mode;
1539 }
1540 goto done;
1541 }
1542
1543 tp->t_timer[index] = 0;
1544 if (timer_val > 0) {
1545 tp = tcp_timers(tp, index);
1546 if (tp == NULL)
1547 goto done;
1548 }
1549
1550 /*
1551 * Check if there are any other timers that need to be run.
1552 * While doing it, adjust the timer values wrt tcp_now.
1553 */
1554 tp->tentry.mode = 0;
1555 for (i = 0; i < TCPT_NTIMERS; ++i) {
1556 if (tp->t_timer[i] != 0) {
1557 diff = timer_diff(tp->tentry.timer_start,
1558 tp->t_timer[i], tcp_now, 0);
1559 if (diff <= 0) {
1560 needtorun[i] = TRUE;
1561 count++;
1562 } else {
1563 tp->t_timer[i] = diff;
1564 needtorun[i] = FALSE;
1565 if (lo_timer == 0 || diff < lo_timer) {
1566 lo_timer = diff;
1567 lo_index = i;
1568 }
1569 TCP_SET_TIMER_MODE(tp->tentry.mode, i);
1570 }
1571 }
1572 }
1573
1574 tp->tentry.timer_start = tcp_now;
1575 tp->tentry.index = lo_index;
1576 VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0);
1577
1578 if (tp->tentry.index != TCPT_NONE) {
1579 tp->tentry.runtime = tp->tentry.timer_start +
1580 tp->t_timer[tp->tentry.index];
1581 if (tp->tentry.runtime == 0)
1582 tp->tentry.runtime++;
1583 }
1584
1585 if (count > 0) {
1586 /* run any other timers outstanding at this time. */
1587 for (i = 0; i < TCPT_NTIMERS; ++i) {
1588 if (needtorun[i]) {
1589 tp->t_timer[i] = 0;
1590 tp = tcp_timers(tp, i);
1591 if (tp == NULL) {
1592 offset = 0;
1593 *(te_mode) = 0;
1594 goto done;
1595 }
1596 }
1597 }
1598 tcp_set_lotimer_index(tp);
1599 }
1600
1601 if (tp->tentry.index < TCPT_NONE) {
1602 offset = tp->t_timer[tp->tentry.index];
1603 *(te_mode) = tp->tentry.mode;
1604 }
1605
1606 done:
1607 if (tp != NULL && tp->tentry.index == TCPT_NONE) {
1608 tcp_remove_timer(tp);
1609 offset = 0;
1610 }
1611
1612 tcp_unlock(so, 1, 0);
1613 return(offset);
1614 }
1615
1616 void
1617 tcp_run_timerlist(void * arg1, void * arg2) {
1618 #pragma unused(arg1, arg2)
1619 struct tcptimerentry *te, *next_te;
1620 struct tcptimerlist *listp = &tcp_timer_list;
1621 struct tcpcb *tp;
1622 uint32_t next_timer = 0; /* offset of the next timer on the list */
1623 u_int16_t te_mode = 0; /* modes of all active timers in a tcpcb */
1624 u_int16_t list_mode = 0; /* cumulative of modes of all tcpcbs */
1625 uint32_t active_count = 0;
1626
1627 calculate_tcp_clock();
1628
1629 lck_mtx_lock(listp->mtx);
1630
1631 listp->running = TRUE;
1632
1633 LIST_FOREACH_SAFE(te, &listp->lhead, le, next_te) {
1634 uint32_t offset = 0;
1635 uint32_t runtime = te->runtime;
1636 if (te->index < TCPT_NONE && TSTMP_GT(runtime, tcp_now)) {
1637 offset = timer_diff(runtime, 0, tcp_now, 0);
1638 if (next_timer == 0 || offset < next_timer) {
1639 next_timer = offset;
1640 }
1641 list_mode |= te->mode;
1642 continue;
1643 }
1644
1645 tp = TIMERENTRY_TO_TP(te);
1646
1647 /*
1648 * Acquire an inp wantcnt on the inpcb so that the socket
1649 * won't get detached even if tcp_close is called
1650 */
1651 if (in_pcb_checkstate(tp->t_inpcb, WNT_ACQUIRE, 0)
1652 == WNT_STOPUSING) {
1653 /*
1654 * Some how this pcb went into dead state while
1655 * on the timer list, just take it off the list.
1656 * Since the timer list entry pointers are
1657 * protected by the timer list lock, we can
1658 * do it here without the socket lock.
1659 */
1660 if (TIMER_IS_ON_LIST(tp)) {
1661 tp->t_flags &= ~(TF_TIMER_ONLIST);
1662 LIST_REMOVE(&tp->tentry, le);
1663 listp->entries--;
1664
1665 tp->tentry.le.le_next = NULL;
1666 tp->tentry.le.le_prev = NULL;
1667 }
1668 continue;
1669 }
1670 active_count++;
1671
1672 /*
1673 * Store the next timerentry pointer before releasing the
1674 * list lock. If that entry has to be removed when we
1675 * release the lock, this pointer will be updated to the
1676 * element after that.
1677 */
1678 listp->next_te = next_te;
1679
1680 VERIFY_NEXT_LINK(&tp->tentry, le);
1681 VERIFY_PREV_LINK(&tp->tentry, le);
1682
1683 lck_mtx_unlock(listp->mtx);
1684
1685 offset = tcp_run_conn_timer(tp, &te_mode,
1686 listp->probe_if_index);
1687
1688 lck_mtx_lock(listp->mtx);
1689
1690 next_te = listp->next_te;
1691 listp->next_te = NULL;
1692
1693 if (offset > 0 && te_mode != 0) {
1694 list_mode |= te_mode;
1695
1696 if (next_timer == 0 || offset < next_timer)
1697 next_timer = offset;
1698 }
1699 }
1700
1701 if (!LIST_EMPTY(&listp->lhead)) {
1702 u_int16_t next_mode = 0;
1703 if ((list_mode & TCP_TIMERLIST_10MS_MODE) ||
1704 (listp->pref_mode & TCP_TIMERLIST_10MS_MODE))
1705 next_mode = TCP_TIMERLIST_10MS_MODE;
1706 else if ((list_mode & TCP_TIMERLIST_100MS_MODE) ||
1707 (listp->pref_mode & TCP_TIMERLIST_100MS_MODE))
1708 next_mode = TCP_TIMERLIST_100MS_MODE;
1709 else
1710 next_mode = TCP_TIMERLIST_500MS_MODE;
1711
1712 if (next_mode != TCP_TIMERLIST_500MS_MODE) {
1713 listp->idleruns = 0;
1714 } else {
1715 /*
1716 * the next required mode is slow mode, but if
1717 * the last one was a faster mode and we did not
1718 * have enough idle runs, repeat the last mode.
1719 *
1720 * We try to keep the timer list in fast mode for
1721 * some idle time in expectation of new data.
1722 */
1723 if (listp->mode != next_mode &&
1724 listp->idleruns < timer_fastmode_idlemax) {
1725 listp->idleruns++;
1726 next_mode = listp->mode;
1727 next_timer = TCP_TIMER_100MS_QUANTUM;
1728 } else {
1729 listp->idleruns = 0;
1730 }
1731 }
1732 listp->mode = next_mode;
1733 if (listp->pref_offset != 0)
1734 next_timer = min(listp->pref_offset, next_timer);
1735
1736 if (listp->mode == TCP_TIMERLIST_500MS_MODE)
1737 next_timer = max(next_timer,
1738 TCP_TIMER_500MS_QUANTUM);
1739
1740 tcp_sched_timerlist(next_timer);
1741 } else {
1742 /*
1743 * No need to reschedule this timer, but always run
1744 * periodically at a much higher granularity.
1745 */
1746 tcp_sched_timerlist(TCP_TIMERLIST_MAX_OFFSET);
1747 }
1748
1749 listp->running = FALSE;
1750 listp->pref_mode = 0;
1751 listp->pref_offset = 0;
1752 listp->probe_if_index = 0;
1753
1754 lck_mtx_unlock(listp->mtx);
1755 }
1756
1757 /*
1758 * Function to check if the timerlist needs to be rescheduled to run this
1759 * connection's timers correctly.
1760 */
1761 void
1762 tcp_sched_timers(struct tcpcb *tp)
1763 {
1764 struct tcptimerentry *te = &tp->tentry;
1765 u_int16_t index = te->index;
1766 u_int16_t mode = te->mode;
1767 struct tcptimerlist *listp = &tcp_timer_list;
1768 int32_t offset = 0;
1769 boolean_t list_locked = FALSE;
1770
1771 if (tp->t_inpcb->inp_state == INPCB_STATE_DEAD) {
1772 /* Just return without adding the dead pcb to the list */
1773 if (TIMER_IS_ON_LIST(tp)) {
1774 tcp_remove_timer(tp);
1775 }
1776 return;
1777 }
1778
1779 if (index == TCPT_NONE) {
1780 /* Nothing to run */
1781 tcp_remove_timer(tp);
1782 return;
1783 }
1784
1785 /*
1786 * compute the offset at which the next timer for this connection
1787 * has to run.
1788 */
1789 offset = timer_diff(te->runtime, 0, tcp_now, 0);
1790 if (offset <= 0) {
1791 offset = 1;
1792 tcp_timer_advanced++;
1793 }
1794
1795 if (!TIMER_IS_ON_LIST(tp)) {
1796 if (!list_locked) {
1797 lck_mtx_lock(listp->mtx);
1798 list_locked = TRUE;
1799 }
1800
1801 LIST_INSERT_HEAD(&listp->lhead, te, le);
1802 tp->t_flags |= TF_TIMER_ONLIST;
1803
1804 listp->entries++;
1805 if (listp->entries > listp->maxentries)
1806 listp->maxentries = listp->entries;
1807
1808 /* if the list is not scheduled, just schedule it */
1809 if (!listp->scheduled)
1810 goto schedule;
1811 }
1812
1813
1814 /*
1815 * Timer entry is currently on the list, check if the list needs
1816 * to be rescheduled.
1817 */
1818 if (need_to_resched_timerlist(te->runtime, mode)) {
1819 tcp_resched_timerlist++;
1820
1821 if (!list_locked) {
1822 lck_mtx_lock(listp->mtx);
1823 list_locked = TRUE;
1824 }
1825
1826 VERIFY_NEXT_LINK(te, le);
1827 VERIFY_PREV_LINK(te, le);
1828
1829 if (listp->running) {
1830 listp->pref_mode |= mode;
1831 if (listp->pref_offset == 0 ||
1832 offset < listp->pref_offset) {
1833 listp->pref_offset = offset;
1834 }
1835 } else {
1836 /*
1837 * The list could have got rescheduled while
1838 * this thread was waiting for the lock
1839 */
1840 if (listp->scheduled) {
1841 int32_t diff;
1842 diff = timer_diff(listp->runtime, 0,
1843 tcp_now, offset);
1844 if (diff <= 0)
1845 goto done;
1846 else
1847 goto schedule;
1848 } else {
1849 goto schedule;
1850 }
1851 }
1852 }
1853 goto done;
1854
1855 schedule:
1856 /*
1857 * Since a connection with timers is getting scheduled, the timer
1858 * list moves from idle to active state and that is why idlegen is
1859 * reset
1860 */
1861 if (mode & TCP_TIMERLIST_10MS_MODE) {
1862 listp->mode = TCP_TIMERLIST_10MS_MODE;
1863 listp->idleruns = 0;
1864 offset = min(offset, TCP_TIMER_10MS_QUANTUM);
1865 } else if (mode & TCP_TIMERLIST_100MS_MODE) {
1866 if (listp->mode > TCP_TIMERLIST_100MS_MODE)
1867 listp->mode = TCP_TIMERLIST_100MS_MODE;
1868 listp->idleruns = 0;
1869 offset = min(offset, TCP_TIMER_100MS_QUANTUM);
1870 }
1871 tcp_sched_timerlist(offset);
1872
1873 done:
1874 if (list_locked)
1875 lck_mtx_unlock(listp->mtx);
1876
1877 return;
1878 }
1879
1880 static inline void
1881 tcp_set_lotimer_index(struct tcpcb *tp)
1882 {
1883 uint16_t i, lo_index = TCPT_NONE, mode = 0;
1884 uint32_t lo_timer = 0;
1885 for (i = 0; i < TCPT_NTIMERS; ++i) {
1886 if (tp->t_timer[i] != 0) {
1887 TCP_SET_TIMER_MODE(mode, i);
1888 if (lo_timer == 0 || tp->t_timer[i] < lo_timer) {
1889 lo_timer = tp->t_timer[i];
1890 lo_index = i;
1891 }
1892 }
1893 }
1894 tp->tentry.index = lo_index;
1895 tp->tentry.mode = mode;
1896 VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0);
1897
1898 if (tp->tentry.index != TCPT_NONE) {
1899 tp->tentry.runtime = tp->tentry.timer_start
1900 + tp->t_timer[tp->tentry.index];
1901 if (tp->tentry.runtime == 0)
1902 tp->tentry.runtime++;
1903 }
1904 }
1905
1906 void
1907 tcp_check_timer_state(struct tcpcb *tp)
1908 {
1909 lck_mtx_assert(&tp->t_inpcb->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
1910
1911 if (tp->t_inpcb->inp_flags2 & INP2_TIMEWAIT)
1912 return;
1913
1914 tcp_set_lotimer_index(tp);
1915
1916 tcp_sched_timers(tp);
1917 return;
1918 }
1919
1920 static inline void
1921 tcp_cumulative_stat(u_int32_t cur, u_int32_t *prev, u_int32_t *dest)
1922 {
1923 /* handle wrap around */
1924 int32_t diff = (int32_t) (cur - *prev);
1925 if (diff > 0)
1926 *dest = diff;
1927 else
1928 *dest = 0;
1929 *prev = cur;
1930 return;
1931 }
1932
1933 __private_extern__ void
1934 tcp_report_stats(void)
1935 {
1936 struct nstat_sysinfo_data data;
1937 struct sockaddr_in dst;
1938 struct sockaddr_in6 dst6;
1939 struct rtentry *rt = NULL;
1940 static struct tcp_last_report_stats prev;
1941 u_int64_t var, uptime;
1942
1943 #define stat data.u.tcp_stats
1944 if (((uptime = net_uptime()) - tcp_last_report_time) <
1945 tcp_report_stats_interval)
1946 return;
1947
1948 tcp_last_report_time = uptime;
1949
1950 bzero(&data, sizeof(data));
1951 data.flags = NSTAT_SYSINFO_TCP_STATS;
1952
1953 bzero(&dst, sizeof(dst));
1954 dst.sin_len = sizeof(dst);
1955 dst.sin_family = AF_INET;
1956
1957 /* ipv4 avg rtt */
1958 lck_mtx_lock(rnh_lock);
1959 rt = rt_lookup(TRUE, (struct sockaddr *)&dst, NULL,
1960 rt_tables[AF_INET], IFSCOPE_NONE);
1961 lck_mtx_unlock(rnh_lock);
1962 if (rt != NULL) {
1963 RT_LOCK(rt);
1964 if (rt_primary_default(rt, rt_key(rt)) &&
1965 rt->rt_stats != NULL) {
1966 stat.ipv4_avgrtt = rt->rt_stats->nstat_avg_rtt;
1967 }
1968 RT_UNLOCK(rt);
1969 rtfree(rt);
1970 rt = NULL;
1971 }
1972
1973 /* ipv6 avg rtt */
1974 bzero(&dst6, sizeof(dst6));
1975 dst6.sin6_len = sizeof(dst6);
1976 dst6.sin6_family = AF_INET6;
1977
1978 lck_mtx_lock(rnh_lock);
1979 rt = rt_lookup(TRUE,(struct sockaddr *)&dst6, NULL,
1980 rt_tables[AF_INET6], IFSCOPE_NONE);
1981 lck_mtx_unlock(rnh_lock);
1982 if (rt != NULL) {
1983 RT_LOCK(rt);
1984 if (rt_primary_default(rt, rt_key(rt)) &&
1985 rt->rt_stats != NULL) {
1986 stat.ipv6_avgrtt = rt->rt_stats->nstat_avg_rtt;
1987 }
1988 RT_UNLOCK(rt);
1989 rtfree(rt);
1990 rt = NULL;
1991 }
1992
1993 /* send packet loss rate, shift by 10 for precision */
1994 if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_sndrexmitpack > 0) {
1995 var = tcpstat.tcps_sndrexmitpack << 10;
1996 stat.send_plr = (var * 100) / tcpstat.tcps_sndpack;
1997 }
1998
1999 /* recv packet loss rate, shift by 10 for precision */
2000 if (tcpstat.tcps_rcvpack > 0 && tcpstat.tcps_recovered_pkts > 0) {
2001 var = tcpstat.tcps_recovered_pkts << 10;
2002 stat.recv_plr = (var * 100) / tcpstat.tcps_rcvpack;
2003 }
2004
2005 /* RTO after tail loss, shift by 10 for precision */
2006 if (tcpstat.tcps_sndrexmitpack > 0
2007 && tcpstat.tcps_tailloss_rto > 0) {
2008 var = tcpstat.tcps_tailloss_rto << 10;
2009 stat.send_tlrto_rate =
2010 (var * 100) / tcpstat.tcps_sndrexmitpack;
2011 }
2012
2013 /* packet reordering */
2014 if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_reordered_pkts > 0) {
2015 var = tcpstat.tcps_reordered_pkts << 10;
2016 stat.send_reorder_rate =
2017 (var * 100) / tcpstat.tcps_sndpack;
2018 }
2019
2020 if (tcp_ecn_outbound == 1)
2021 stat.ecn_client_enabled = 1;
2022 if (tcp_ecn_inbound == 1)
2023 stat.ecn_server_enabled = 1;
2024 tcp_cumulative_stat(tcpstat.tcps_connattempt,
2025 &prev.tcps_connattempt, &stat.connection_attempts);
2026 tcp_cumulative_stat(tcpstat.tcps_accepts,
2027 &prev.tcps_accepts, &stat.connection_accepts);
2028 tcp_cumulative_stat(tcpstat.tcps_ecn_client_setup,
2029 &prev.tcps_ecn_client_setup, &stat.ecn_client_setup);
2030 tcp_cumulative_stat(tcpstat.tcps_ecn_server_setup,
2031 &prev.tcps_ecn_server_setup, &stat.ecn_server_setup);
2032 tcp_cumulative_stat(tcpstat.tcps_ecn_client_success,
2033 &prev.tcps_ecn_client_success, &stat.ecn_client_success);
2034 tcp_cumulative_stat(tcpstat.tcps_ecn_server_success,
2035 &prev.tcps_ecn_server_success, &stat.ecn_server_success);
2036 tcp_cumulative_stat(tcpstat.tcps_ecn_not_supported,
2037 &prev.tcps_ecn_not_supported, &stat.ecn_not_supported);
2038 tcp_cumulative_stat(tcpstat.tcps_ecn_lost_syn,
2039 &prev.tcps_ecn_lost_syn, &stat.ecn_lost_syn);
2040 tcp_cumulative_stat(tcpstat.tcps_ecn_lost_synack,
2041 &prev.tcps_ecn_lost_synack, &stat.ecn_lost_synack);
2042 tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ce,
2043 &prev.tcps_ecn_recv_ce, &stat.ecn_recv_ce);
2044 tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
2045 &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
2046 tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
2047 &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
2048 tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
2049 &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
2050 tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
2051 &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
2052 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ce,
2053 &prev.tcps_ecn_conn_recv_ce, &stat.ecn_conn_recv_ce);
2054 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ece,
2055 &prev.tcps_ecn_conn_recv_ece, &stat.ecn_conn_recv_ece);
2056 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_plnoce,
2057 &prev.tcps_ecn_conn_plnoce, &stat.ecn_conn_plnoce);
2058 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_pl_ce,
2059 &prev.tcps_ecn_conn_pl_ce, &stat.ecn_conn_pl_ce);
2060 tcp_cumulative_stat(tcpstat.tcps_ecn_conn_nopl_ce,
2061 &prev.tcps_ecn_conn_nopl_ce, &stat.ecn_conn_nopl_ce);
2062 tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_synloss,
2063 &prev.tcps_ecn_fallback_synloss, &stat.ecn_fallback_synloss);
2064 tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_reorder,
2065 &prev.tcps_ecn_fallback_reorder, &stat.ecn_fallback_reorder);
2066 tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_ce,
2067 &prev.tcps_ecn_fallback_ce, &stat.ecn_fallback_ce);
2068 tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_rcv,
2069 &prev.tcps_tfo_syn_data_rcv, &stat.tfo_syn_data_rcv);
2070 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req_rcv,
2071 &prev.tcps_tfo_cookie_req_rcv, &stat.tfo_cookie_req_rcv);
2072 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_sent,
2073 &prev.tcps_tfo_cookie_sent, &stat.tfo_cookie_sent);
2074 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_invalid,
2075 &prev.tcps_tfo_cookie_invalid, &stat.tfo_cookie_invalid);
2076 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req,
2077 &prev.tcps_tfo_cookie_req, &stat.tfo_cookie_req);
2078 tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_rcv,
2079 &prev.tcps_tfo_cookie_rcv, &stat.tfo_cookie_rcv);
2080 tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_sent,
2081 &prev.tcps_tfo_syn_data_sent, &stat.tfo_syn_data_sent);
2082 tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_acked,
2083 &prev.tcps_tfo_syn_data_acked, &stat.tfo_syn_data_acked);
2084 tcp_cumulative_stat(tcpstat.tcps_tfo_syn_loss,
2085 &prev.tcps_tfo_syn_loss, &stat.tfo_syn_loss);
2086 tcp_cumulative_stat(tcpstat.tcps_tfo_blackhole,
2087 &prev.tcps_tfo_blackhole, &stat.tfo_blackhole);
2088
2089 nstat_sysinfo_send_data(&data);
2090
2091 #undef stat
2092 }
2093
2094 void
2095 tcp_interface_send_probe(u_int16_t probe_if_index)
2096 {
2097 int32_t offset = 0;
2098 struct tcptimerlist *listp = &tcp_timer_list;
2099
2100 /* Make sure TCP clock is up to date */
2101 calculate_tcp_clock();
2102
2103 lck_mtx_lock(listp->mtx);
2104 if (listp->probe_if_index > 0) {
2105 tcpstat.tcps_probe_if_conflict++;
2106 goto done;
2107 }
2108
2109 listp->probe_if_index = probe_if_index;
2110 if (listp->running)
2111 goto done;
2112
2113 /*
2114 * Reschedule the timerlist to run within the next 10ms, which is
2115 * the fastest that we can do.
2116 */
2117 offset = TCP_TIMER_10MS_QUANTUM;
2118 if (listp->scheduled) {
2119 int32_t diff;
2120 diff = timer_diff(listp->runtime, 0, tcp_now, offset);
2121 if (diff <= 0) {
2122 /* The timer will fire sooner than what's needed */
2123 goto done;
2124 }
2125 }
2126 listp->mode = TCP_TIMERLIST_10MS_MODE;
2127 listp->idleruns = 0;
2128
2129 tcp_sched_timerlist(offset);
2130
2131 done:
2132 lck_mtx_unlock(listp->mtx);
2133 return;
2134 }
2135
2136 /*
2137 * Enable read probes on this connection, if:
2138 * - it is in established state
2139 * - doesn't have any data outstanding
2140 * - the outgoing ifp matches
2141 * - we have not already sent any read probes
2142 */
2143 static void
2144 tcp_enable_read_probe(struct tcpcb *tp, struct ifnet *ifp)
2145 {
2146 if (tp->t_state == TCPS_ESTABLISHED &&
2147 tp->snd_max == tp->snd_una &&
2148 tp->t_inpcb->inp_last_outifp == ifp &&
2149 !(tp->t_flagsext & TF_DETECT_READSTALL) &&
2150 tp->t_rtimo_probes == 0) {
2151 tp->t_flagsext |= TF_DETECT_READSTALL;
2152 tp->t_rtimo_probes = 0;
2153 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
2154 TCP_TIMER_10MS_QUANTUM);
2155 if (tp->tentry.index == TCPT_NONE) {
2156 tp->tentry.index = TCPT_KEEP;
2157 tp->tentry.runtime = tcp_now +
2158 TCP_TIMER_10MS_QUANTUM;
2159 } else {
2160 int32_t diff = 0;
2161
2162 /* Reset runtime to be in next 10ms */
2163 diff = timer_diff(tp->tentry.runtime, 0,
2164 tcp_now, TCP_TIMER_10MS_QUANTUM);
2165 if (diff > 0) {
2166 tp->tentry.index = TCPT_KEEP;
2167 tp->tentry.runtime = tcp_now +
2168 TCP_TIMER_10MS_QUANTUM;
2169 if (tp->tentry.runtime == 0)
2170 tp->tentry.runtime++;
2171 }
2172 }
2173 }
2174 }
2175
2176 /*
2177 * Disable read probe and reset the keep alive timer
2178 */
2179 static void
2180 tcp_disable_read_probe(struct tcpcb *tp)
2181 {
2182 if (tp->t_adaptive_rtimo == 0 &&
2183 ((tp->t_flagsext & TF_DETECT_READSTALL) ||
2184 tp->t_rtimo_probes > 0)) {
2185 tcp_keepalive_reset(tp);
2186 }
2187 }
2188
2189 /*
2190 * Reschedule the tcp timerlist in the next 10ms to re-enable read/write
2191 * probes on connections going over a particular interface.
2192 */
2193 void
2194 tcp_probe_connectivity(struct ifnet *ifp, u_int32_t enable)
2195 {
2196 int32_t offset;
2197 struct tcptimerlist *listp = &tcp_timer_list;
2198 struct inpcbinfo *pcbinfo = &tcbinfo;
2199 struct inpcb *inp, *nxt;
2200
2201 if (ifp == NULL)
2202 return;
2203
2204 /* update clock */
2205 calculate_tcp_clock();
2206
2207 /*
2208 * Enable keep alive timer on all connections that are
2209 * active/established on this interface.
2210 */
2211 lck_rw_lock_shared(pcbinfo->ipi_lock);
2212
2213 LIST_FOREACH_SAFE(inp, pcbinfo->ipi_listhead, inp_list, nxt) {
2214 struct tcpcb *tp = NULL;
2215 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) ==
2216 WNT_STOPUSING)
2217 continue;
2218
2219 /* Acquire lock to look at the state of the connection */
2220 tcp_lock(inp->inp_socket, 1, 0);
2221
2222 /* Release the want count */
2223 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
2224 tcp_unlock(inp->inp_socket, 1, 0);
2225 continue;
2226 }
2227
2228 tp = intotcpcb(inp);
2229 if (enable)
2230 tcp_enable_read_probe(tp, ifp);
2231 else
2232 tcp_disable_read_probe(tp);
2233
2234 tcp_unlock(inp->inp_socket, 1, 0);
2235 }
2236 lck_rw_done(pcbinfo->ipi_lock);
2237
2238 lck_mtx_lock(listp->mtx);
2239 if (listp->running) {
2240 listp->pref_mode |= TCP_TIMERLIST_10MS_MODE;
2241 goto done;
2242 }
2243
2244 /* Reschedule within the next 10ms */
2245 offset = TCP_TIMER_10MS_QUANTUM;
2246 if (listp->scheduled) {
2247 int32_t diff;
2248 diff = timer_diff(listp->runtime, 0, tcp_now, offset);
2249 if (diff <= 0) {
2250 /* The timer will fire sooner than what's needed */
2251 goto done;
2252 }
2253 }
2254 listp->mode = TCP_TIMERLIST_10MS_MODE;
2255 listp->idleruns = 0;
2256
2257 tcp_sched_timerlist(offset);
2258 done:
2259 lck_mtx_unlock(listp->mtx);
2260 return;
2261 }
2262
2263 void
2264 tcp_itimer(struct inpcbinfo *ipi)
2265 {
2266 struct inpcb *inp, *nxt;
2267
2268 if (lck_rw_try_lock_exclusive(ipi->ipi_lock) == FALSE) {
2269 if (tcp_itimer_done == TRUE) {
2270 tcp_itimer_done = FALSE;
2271 atomic_add_32(&ipi->ipi_timer_req.intimer_fast, 1);
2272 return;
2273 }
2274 /* Upgrade failed, lost lock now take it again exclusive */
2275 lck_rw_lock_exclusive(ipi->ipi_lock);
2276 }
2277 tcp_itimer_done = TRUE;
2278
2279 LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
2280 struct socket *so;
2281
2282 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING)
2283 continue;
2284 so = inp->inp_socket;
2285 tcp_lock(so, 1, 0);
2286 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
2287 tcp_unlock(so, 1, 0);
2288 continue;
2289 }
2290 so_check_extended_bk_idle_time(so);
2291 tcp_unlock(so, 1, 0);
2292 }
2293
2294 lck_rw_done(ipi->ipi_lock);
2295 }
2296