2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95
61 * $FreeBSD: src/sys/netinet/tcp_timer.c,v 1.34.2.11 2001/08/22 00:59:12 silby Exp $
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/kernel.h>
69 #include <sys/sysctl.h>
70 #include <sys/socket.h>
71 #include <sys/socketvar.h>
72 #include <sys/protosw.h>
73 #include <sys/domain.h>
74 #include <sys/mcache.h>
75 #include <sys/queue.h>
76 #include <kern/locks.h>
77 #include <kern/cpu_number.h> /* before tcp_seq.h, for tcp_random18() */
78 #include <mach/boolean.h>
80 #include <net/route.h>
81 #include <net/if_var.h>
82 #include <net/ntstat.h>
84 #include <netinet/in.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/in_pcb.h>
87 #include <netinet/in_var.h>
89 #include <netinet6/in6_pcb.h>
91 #include <netinet/ip_var.h>
92 #include <netinet/tcp.h>
93 #include <netinet/tcp_cache.h>
94 #include <netinet/tcp_fsm.h>
95 #include <netinet/tcp_seq.h>
96 #include <netinet/tcp_timer.h>
97 #include <netinet/tcp_var.h>
98 #include <netinet/tcp_cc.h>
100 #include <netinet6/tcp6_var.h>
102 #include <netinet/tcpip.h>
104 #include <netinet/tcp_debug.h>
106 #include <netinet/tcp_log.h>
108 #include <sys/kdebug.h>
109 #include <mach/sdt.h>
110 #include <netinet/mptcp_var.h>
112 /* Max number of times a stretch ack can be delayed on a connection */
113 #define TCP_STRETCHACK_DELAY_THRESHOLD 5
116 * If the host processor has been sleeping for too long, this is the threshold
117 * used to avoid sending stale retransmissions.
119 #define TCP_SLEEP_TOO_LONG (10 * 60 * 1000) /* 10 minutes in ms */
122 struct tcptimerlist tcp_timer_list
;
124 /* List of pcbs in timewait state, protected by tcbinfo's ipi_lock */
125 struct tcptailq tcp_tw_tailq
;
128 sysctl_msec_to_ticks SYSCTL_HANDLER_ARGS
135 s
= tt
* 1000 / TCP_RETRANSHZ
;
136 if (tt
< 0 || s
> INT_MAX
) {
141 error
= sysctl_handle_int(oidp
, &temp
, 0, req
);
142 if (error
|| !req
->newptr
) {
146 tt
= temp
* TCP_RETRANSHZ
/ 1000;
147 if (tt
< 1 || tt
> INT_MAX
) {
151 *(int *)arg1
= (int)tt
;
152 SYSCTL_SKMEM_UPDATE_AT_OFFSET(arg2
, *(int*)arg1
);
157 int tcp_keepinit
= TCPTV_KEEP_INIT
;
158 SYSCTL_PROC(_net_inet_tcp
, TCPCTL_KEEPINIT
, keepinit
,
159 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
160 &tcp_keepinit
, offsetof(skmem_sysctl
, tcp
.keepinit
),
161 sysctl_msec_to_ticks
, "I", "");
163 int tcp_keepidle
= TCPTV_KEEP_IDLE
;
164 SYSCTL_PROC(_net_inet_tcp
, TCPCTL_KEEPIDLE
, keepidle
,
165 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
166 &tcp_keepidle
, offsetof(skmem_sysctl
, tcp
.keepidle
),
167 sysctl_msec_to_ticks
, "I", "");
169 int tcp_keepintvl
= TCPTV_KEEPINTVL
;
170 SYSCTL_PROC(_net_inet_tcp
, TCPCTL_KEEPINTVL
, keepintvl
,
171 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
172 &tcp_keepintvl
, offsetof(skmem_sysctl
, tcp
.keepintvl
),
173 sysctl_msec_to_ticks
, "I", "");
175 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, keepcnt
,
176 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
177 int, tcp_keepcnt
, TCPTV_KEEPCNT
, "number of times to repeat keepalive");
179 int tcp_msl
= TCPTV_MSL
;
180 SYSCTL_PROC(_net_inet_tcp
, OID_AUTO
, msl
,
181 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
182 &tcp_msl
, offsetof(skmem_sysctl
, tcp
.msl
),
183 sysctl_msec_to_ticks
, "I", "Maximum segment lifetime");
184 #else /* SYSCTL_SKMEM */
186 SYSCTL_PROC(_net_inet_tcp
, TCPCTL_KEEPINIT
, keepinit
,
187 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
188 &tcp_keepinit
, 0, sysctl_msec_to_ticks
, "I", "");
191 SYSCTL_PROC(_net_inet_tcp
, TCPCTL_KEEPIDLE
, keepidle
,
192 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
193 &tcp_keepidle
, 0, sysctl_msec_to_ticks
, "I", "");
196 SYSCTL_PROC(_net_inet_tcp
, TCPCTL_KEEPINTVL
, keepintvl
,
197 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
198 &tcp_keepintvl
, 0, sysctl_msec_to_ticks
, "I", "");
201 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, keepcnt
,
202 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
203 &tcp_keepcnt
, 0, "number of times to repeat keepalive");
206 SYSCTL_PROC(_net_inet_tcp
, OID_AUTO
, msl
,
207 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
208 &tcp_msl
, 0, sysctl_msec_to_ticks
, "I", "Maximum segment lifetime");
209 #endif /* SYSCTL_SKMEM */
212 * Avoid DoS via TCP Robustness in Persist Condition
213 * (see http://www.ietf.org/id/draft-ananth-tcpm-persist-02.txt)
214 * by allowing a system wide maximum persistence timeout value when in
215 * Zero Window Probe mode.
217 * Expressed in milliseconds to be consistent without timeout related
218 * values, the TCP socket option is in seconds.
221 u_int32_t tcp_max_persist_timeout
= 0;
222 SYSCTL_PROC(_net_inet_tcp
, OID_AUTO
, max_persist_timeout
,
223 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
224 &tcp_max_persist_timeout
, offsetof(skmem_sysctl
, tcp
.max_persist_timeout
),
225 sysctl_msec_to_ticks
, "I", "Maximum persistence timeout for ZWP");
226 #else /* SYSCTL_SKMEM */
227 u_int32_t tcp_max_persist_timeout
= 0;
228 SYSCTL_PROC(_net_inet_tcp
, OID_AUTO
, max_persist_timeout
,
229 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
230 &tcp_max_persist_timeout
, 0, sysctl_msec_to_ticks
, "I",
231 "Maximum persistence timeout for ZWP");
232 #endif /* SYSCTL_SKMEM */
234 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, always_keepalive
,
235 CTLFLAG_RW
| CTLFLAG_LOCKED
, static int, always_keepalive
, 0,
236 "Assume SO_KEEPALIVE on all TCP connections");
239 * This parameter determines how long the timer list will stay in fast or
240 * quick mode even though all connections are idle. In this state, the
241 * timer will run more frequently anticipating new data.
243 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, timer_fastmode_idlemax
,
244 CTLFLAG_RW
| CTLFLAG_LOCKED
, int, timer_fastmode_idlemax
,
245 TCP_FASTMODE_IDLERUN_MAX
, "Maximum idle generations in fast mode");
248 * See tcp_syn_backoff[] for interval values between SYN retransmits;
249 * the value set below defines the number of retransmits, before we
250 * disable the timestamp and window scaling options during subsequent
251 * SYN retransmits. Setting it to 0 disables the dropping off of those
254 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, broken_peer_syn_rexmit_thres
,
255 CTLFLAG_RW
| CTLFLAG_LOCKED
, static int, tcp_broken_peer_syn_rxmit_thres
,
256 10, "Number of retransmitted SYNs before disabling RFC 1323 "
257 "options on local connections");
259 static int tcp_timer_advanced
= 0;
260 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, tcp_timer_advanced
,
261 CTLFLAG_RD
| CTLFLAG_LOCKED
, &tcp_timer_advanced
, 0,
262 "Number of times one of the timers was advanced");
264 static int tcp_resched_timerlist
= 0;
265 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, tcp_resched_timerlist
,
266 CTLFLAG_RD
| CTLFLAG_LOCKED
, &tcp_resched_timerlist
, 0,
267 "Number of times timer list was rescheduled as part of processing a packet");
269 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, pmtud_blackhole_detection
,
270 CTLFLAG_RW
| CTLFLAG_LOCKED
, int, tcp_pmtud_black_hole_detect
, 1,
271 "Path MTU Discovery Black Hole Detection");
273 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, pmtud_blackhole_mss
,
274 CTLFLAG_RW
| CTLFLAG_LOCKED
, int, tcp_pmtud_black_hole_mss
, 1200,
275 "Path MTU Discovery Black Hole Detection lowered MSS");
277 #if (DEBUG || DEVELOPMENT)
278 int tcp_probe_if_fix_port
= 0;
279 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, probe_if_fix_port
,
280 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
281 &tcp_probe_if_fix_port
, 0, "");
282 #endif /* (DEBUG || DEVELOPMENT) */
284 static u_int32_t tcp_mss_rec_medium
= 1200;
285 static u_int32_t tcp_mss_rec_low
= 512;
287 #define TCP_REPORT_STATS_INTERVAL 43200 /* 12 hours, in seconds */
288 int tcp_report_stats_interval
= TCP_REPORT_STATS_INTERVAL
;
290 /* performed garbage collection of "used" sockets */
291 static boolean_t tcp_gc_done
= FALSE
;
293 /* max idle probes */
294 int tcp_maxpersistidle
= TCPTV_KEEP_IDLE
;
297 * TCP delack timer is set to 100 ms. Since the processing of timer list
298 * in fast mode will happen no faster than 100 ms, the delayed ack timer
299 * will fire some where between 100 and 200 ms.
301 int tcp_delack
= TCP_RETRANSHZ
/ 10;
305 * MP_JOIN retransmission of 3rd ACK will be every 500 msecs without backoff
307 int tcp_jack_rxmt
= TCP_RETRANSHZ
/ 2;
310 static boolean_t tcp_itimer_done
= FALSE
;
312 static void tcp_remove_timer(struct tcpcb
*tp
);
313 static void tcp_sched_timerlist(uint32_t offset
);
314 static u_int32_t
tcp_run_conn_timer(struct tcpcb
*tp
, u_int16_t
*mode
,
315 u_int16_t probe_if_index
);
316 static inline void tcp_set_lotimer_index(struct tcpcb
*);
317 __private_extern__
void tcp_remove_from_time_wait(struct inpcb
*inp
);
318 static inline void tcp_update_mss_core(struct tcpcb
*tp
, struct ifnet
*ifp
);
319 __private_extern__
void tcp_report_stats(void);
321 static u_int64_t tcp_last_report_time
;
324 * Structure to store previously reported stats so that we can send
325 * incremental changes in each report interval.
327 struct tcp_last_report_stats
{
328 u_int32_t tcps_connattempt
;
329 u_int32_t tcps_accepts
;
330 u_int32_t tcps_ecn_client_setup
;
331 u_int32_t tcps_ecn_server_setup
;
332 u_int32_t tcps_ecn_client_success
;
333 u_int32_t tcps_ecn_server_success
;
334 u_int32_t tcps_ecn_not_supported
;
335 u_int32_t tcps_ecn_lost_syn
;
336 u_int32_t tcps_ecn_lost_synack
;
337 u_int32_t tcps_ecn_recv_ce
;
338 u_int32_t tcps_ecn_recv_ece
;
339 u_int32_t tcps_ecn_sent_ece
;
340 u_int32_t tcps_ecn_conn_recv_ce
;
341 u_int32_t tcps_ecn_conn_recv_ece
;
342 u_int32_t tcps_ecn_conn_plnoce
;
343 u_int32_t tcps_ecn_conn_pl_ce
;
344 u_int32_t tcps_ecn_conn_nopl_ce
;
345 u_int32_t tcps_ecn_fallback_synloss
;
346 u_int32_t tcps_ecn_fallback_reorder
;
347 u_int32_t tcps_ecn_fallback_ce
;
349 /* TFO-related statistics */
350 u_int32_t tcps_tfo_syn_data_rcv
;
351 u_int32_t tcps_tfo_cookie_req_rcv
;
352 u_int32_t tcps_tfo_cookie_sent
;
353 u_int32_t tcps_tfo_cookie_invalid
;
354 u_int32_t tcps_tfo_cookie_req
;
355 u_int32_t tcps_tfo_cookie_rcv
;
356 u_int32_t tcps_tfo_syn_data_sent
;
357 u_int32_t tcps_tfo_syn_data_acked
;
358 u_int32_t tcps_tfo_syn_loss
;
359 u_int32_t tcps_tfo_blackhole
;
360 u_int32_t tcps_tfo_cookie_wrong
;
361 u_int32_t tcps_tfo_no_cookie_rcv
;
362 u_int32_t tcps_tfo_heuristics_disable
;
363 u_int32_t tcps_tfo_sndblackhole
;
365 /* MPTCP-related statistics */
366 u_int32_t tcps_mptcp_handover_attempt
;
367 u_int32_t tcps_mptcp_interactive_attempt
;
368 u_int32_t tcps_mptcp_aggregate_attempt
;
369 u_int32_t tcps_mptcp_fp_handover_attempt
;
370 u_int32_t tcps_mptcp_fp_interactive_attempt
;
371 u_int32_t tcps_mptcp_fp_aggregate_attempt
;
372 u_int32_t tcps_mptcp_heuristic_fallback
;
373 u_int32_t tcps_mptcp_fp_heuristic_fallback
;
374 u_int32_t tcps_mptcp_handover_success_wifi
;
375 u_int32_t tcps_mptcp_handover_success_cell
;
376 u_int32_t tcps_mptcp_interactive_success
;
377 u_int32_t tcps_mptcp_aggregate_success
;
378 u_int32_t tcps_mptcp_fp_handover_success_wifi
;
379 u_int32_t tcps_mptcp_fp_handover_success_cell
;
380 u_int32_t tcps_mptcp_fp_interactive_success
;
381 u_int32_t tcps_mptcp_fp_aggregate_success
;
382 u_int32_t tcps_mptcp_handover_cell_from_wifi
;
383 u_int32_t tcps_mptcp_handover_wifi_from_cell
;
384 u_int32_t tcps_mptcp_interactive_cell_from_wifi
;
385 u_int64_t tcps_mptcp_handover_cell_bytes
;
386 u_int64_t tcps_mptcp_interactive_cell_bytes
;
387 u_int64_t tcps_mptcp_aggregate_cell_bytes
;
388 u_int64_t tcps_mptcp_handover_all_bytes
;
389 u_int64_t tcps_mptcp_interactive_all_bytes
;
390 u_int64_t tcps_mptcp_aggregate_all_bytes
;
391 u_int32_t tcps_mptcp_back_to_wifi
;
392 u_int32_t tcps_mptcp_wifi_proxy
;
393 u_int32_t tcps_mptcp_cell_proxy
;
394 u_int32_t tcps_mptcp_triggered_cell
;
398 /* Returns true if the timer is on the timer list */
399 #define TIMER_IS_ON_LIST(tp) ((tp)->t_flags & TF_TIMER_ONLIST)
401 /* Run the TCP timerlist atleast once every hour */
402 #define TCP_TIMERLIST_MAX_OFFSET (60 * 60 * TCP_RETRANSHZ)
405 static void add_to_time_wait_locked(struct tcpcb
*tp
, uint32_t delay
);
406 static boolean_t
tcp_garbage_collect(struct inpcb
*, int);
408 #define TIMERENTRY_TO_TP(te) ((struct tcpcb *)((uintptr_t)te - offsetof(struct tcpcb, tentry.le.le_next)))
410 #define VERIFY_NEXT_LINK(elm, field) do { \
411 if (LIST_NEXT((elm),field) != NULL && \
412 LIST_NEXT((elm),field)->field.le_prev != \
413 &((elm)->field.le_next)) \
414 panic("Bad link elm %p next->prev != elm", (elm)); \
417 #define VERIFY_PREV_LINK(elm, field) do { \
418 if (*(elm)->field.le_prev != (elm)) \
419 panic("Bad link elm %p prev->next != elm", (elm)); \
422 #define TCP_SET_TIMER_MODE(mode, i) do { \
423 if (IS_TIMER_HZ_10MS(i)) \
424 (mode) |= TCP_TIMERLIST_10MS_MODE; \
425 else if (IS_TIMER_HZ_100MS(i)) \
426 (mode) |= TCP_TIMERLIST_100MS_MODE; \
428 (mode) |= TCP_TIMERLIST_500MS_MODE; \
431 #if (DEVELOPMENT || DEBUG)
432 SYSCTL_UINT(_net_inet_tcp
, OID_AUTO
, mss_rec_medium
,
433 CTLFLAG_RW
| CTLFLAG_LOCKED
, &tcp_mss_rec_medium
, 0,
434 "Medium MSS based on recommendation in link status report");
435 SYSCTL_UINT(_net_inet_tcp
, OID_AUTO
, mss_rec_low
,
436 CTLFLAG_RW
| CTLFLAG_LOCKED
, &tcp_mss_rec_low
, 0,
437 "Low MSS based on recommendation in link status report");
439 static int32_t tcp_change_mss_recommended
= 0;
441 sysctl_change_mss_recommended SYSCTL_HANDLER_ARGS
443 #pragma unused(oidp, arg1, arg2)
444 int i
, err
= 0, changed
= 0;
446 struct if_link_status ifsr
;
447 struct if_cellular_status_v1
*new_cell_sr
;
448 err
= sysctl_io_number(req
, tcp_change_mss_recommended
,
449 sizeof(int32_t), &i
, &changed
);
451 ifnet_head_lock_shared();
452 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
453 if (IFNET_IS_CELLULAR(ifp
)) {
454 bzero(&ifsr
, sizeof(ifsr
));
455 new_cell_sr
= &ifsr
.ifsr_u
.ifsr_cell
.if_cell_u
.if_status_v1
;
456 ifsr
.ifsr_version
= IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION
;
457 ifsr
.ifsr_len
= sizeof(*new_cell_sr
);
459 /* Set MSS recommended */
460 new_cell_sr
->valid_bitmask
|= IF_CELL_UL_MSS_RECOMMENDED_VALID
;
461 new_cell_sr
->mss_recommended
= i
;
462 err
= ifnet_link_status_report(ifp
, new_cell_sr
, sizeof(new_cell_sr
));
464 tcp_change_mss_recommended
= i
;
475 SYSCTL_PROC(_net_inet_tcp
, OID_AUTO
, change_mss_recommended
,
476 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, &tcp_change_mss_recommended
,
477 0, sysctl_change_mss_recommended
, "IU", "Change MSS recommended");
479 SYSCTL_INT(_net_inet_tcp
, OID_AUTO
, report_stats_interval
,
480 CTLFLAG_RW
| CTLFLAG_LOCKED
, &tcp_report_stats_interval
, 0,
481 "Report stats interval");
482 #endif /* (DEVELOPMENT || DEBUG) */
485 * Macro to compare two timers. If there is a reset of the sign bit,
486 * it is safe to assume that the timer has wrapped around. By doing
487 * signed comparision, we take care of wrap around such that the value
488 * with the sign bit reset is actually ahead of the other.
491 timer_diff(uint32_t t1
, uint32_t toff1
, uint32_t t2
, uint32_t toff2
)
493 return (int32_t)((t1
+ toff1
) - (t2
+ toff2
));
497 * Add to tcp timewait list, delay is given in milliseconds.
500 add_to_time_wait_locked(struct tcpcb
*tp
, uint32_t delay
)
502 struct inpcbinfo
*pcbinfo
= &tcbinfo
;
503 struct inpcb
*inp
= tp
->t_inpcb
;
506 /* pcb list should be locked when we get here */
507 LCK_RW_ASSERT(pcbinfo
->ipi_lock
, LCK_RW_ASSERT_EXCLUSIVE
);
509 /* We may get here multiple times, so check */
510 if (!(inp
->inp_flags2
& INP2_TIMEWAIT
)) {
511 pcbinfo
->ipi_twcount
++;
512 inp
->inp_flags2
|= INP2_TIMEWAIT
;
514 /* Remove from global inp list */
515 LIST_REMOVE(inp
, inp_list
);
517 TAILQ_REMOVE(&tcp_tw_tailq
, tp
, t_twentry
);
520 /* Compute the time at which this socket can be closed */
521 timer
= tcp_now
+ delay
;
523 /* We will use the TCPT_2MSL timer for tracking this delay */
525 if (TIMER_IS_ON_LIST(tp
)) {
526 tcp_remove_timer(tp
);
528 tp
->t_timer
[TCPT_2MSL
] = timer
;
530 TAILQ_INSERT_TAIL(&tcp_tw_tailq
, tp
, t_twentry
);
534 add_to_time_wait(struct tcpcb
*tp
, uint32_t delay
)
536 struct inpcbinfo
*pcbinfo
= &tcbinfo
;
537 if (tp
->t_inpcb
->inp_socket
->so_options
& SO_NOWAKEFROMSLEEP
) {
538 socket_post_kev_msg_closed(tp
->t_inpcb
->inp_socket
);
541 /* 19182803: Notify nstat that connection is closing before waiting. */
542 nstat_pcb_detach(tp
->t_inpcb
);
544 if (!lck_rw_try_lock_exclusive(pcbinfo
->ipi_lock
)) {
545 socket_unlock(tp
->t_inpcb
->inp_socket
, 0);
546 lck_rw_lock_exclusive(pcbinfo
->ipi_lock
);
547 socket_lock(tp
->t_inpcb
->inp_socket
, 0);
549 add_to_time_wait_locked(tp
, delay
);
550 lck_rw_done(pcbinfo
->ipi_lock
);
552 inpcb_gc_sched(pcbinfo
, INPCB_TIMER_LAZY
);
555 /* If this is on time wait queue, remove it. */
557 tcp_remove_from_time_wait(struct inpcb
*inp
)
559 struct tcpcb
*tp
= intotcpcb(inp
);
560 if (inp
->inp_flags2
& INP2_TIMEWAIT
) {
561 TAILQ_REMOVE(&tcp_tw_tailq
, tp
, t_twentry
);
566 tcp_garbage_collect(struct inpcb
*inp
, int istimewait
)
568 boolean_t active
= FALSE
;
569 struct socket
*so
, *mp_so
= NULL
;
572 so
= inp
->inp_socket
;
575 if (so
->so_flags
& SOF_MP_SUBFLOW
) {
576 mp_so
= mptetoso(tptomptp(tp
)->mpt_mpte
);
577 if (!socket_try_lock(mp_so
)) {
582 if (mpsotomppcb(mp_so
)->mpp_inside
> 0) {
583 os_log(mptcp_log_handle
, "%s - %lx: Still inside %d usecount %d\n", __func__
,
584 (unsigned long)VM_KERNEL_ADDRPERM(mpsotompte(mp_so
)),
585 mpsotomppcb(mp_so
)->mpp_inside
,
587 socket_unlock(mp_so
, 0);
592 /* We call socket_unlock with refcount further below */
593 mp_so
->so_usecount
++;
594 tptomptp(tp
)->mpt_mpte
->mpte_mppcb
->mpp_inside
++;
598 * Skip if still in use or busy; it would have been more efficient
599 * if we were to test so_usecount against 0, but this isn't possible
600 * due to the current implementation of tcp_dropdropablreq() where
601 * overflow sockets that are eligible for garbage collection have
602 * their usecounts set to 1.
604 if (!lck_mtx_try_lock_spin(&inp
->inpcb_mtx
)) {
609 /* Check again under the lock */
610 if (so
->so_usecount
> 1) {
611 if (inp
->inp_wantcnt
== WNT_STOPUSING
) {
614 lck_mtx_unlock(&inp
->inpcb_mtx
);
618 if (istimewait
&& TSTMP_GEQ(tcp_now
, tp
->t_timer
[TCPT_2MSL
]) &&
619 tp
->t_state
!= TCPS_CLOSED
) {
620 /* Become a regular mutex */
621 lck_mtx_convert_spin(&inp
->inpcb_mtx
);
626 * Overflowed socket dropped from the listening queue? Do this
627 * only if we are called to clean up the time wait slots, since
628 * tcp_dropdropablreq() considers a socket to have been fully
629 * dropped after add_to_time_wait() is finished.
630 * Also handle the case of connections getting closed by the peer
631 * while in the queue as seen with rdar://6422317
634 if (so
->so_usecount
== 1 &&
635 ((istimewait
&& (so
->so_flags
& SOF_OVERFLOW
)) ||
636 ((tp
!= NULL
) && (tp
->t_state
== TCPS_CLOSED
) &&
637 (so
->so_head
!= NULL
) &&
638 ((so
->so_state
& (SS_INCOMP
| SS_CANTSENDMORE
| SS_CANTRCVMORE
)) ==
639 (SS_INCOMP
| SS_CANTSENDMORE
| SS_CANTRCVMORE
))))) {
640 if (inp
->inp_state
!= INPCB_STATE_DEAD
) {
641 /* Become a regular mutex */
642 lck_mtx_convert_spin(&inp
->inpcb_mtx
);
644 if (SOCK_CHECK_DOM(so
, PF_INET6
)) {
650 VERIFY(so
->so_usecount
> 0);
652 if (inp
->inp_wantcnt
== WNT_STOPUSING
) {
655 lck_mtx_unlock(&inp
->inpcb_mtx
);
657 } else if (inp
->inp_wantcnt
!= WNT_STOPUSING
) {
658 lck_mtx_unlock(&inp
->inpcb_mtx
);
664 * We get here because the PCB is no longer searchable
665 * (WNT_STOPUSING); detach (if needed) and dispose if it is dead
666 * (usecount is 0). This covers all cases, including overflow
667 * sockets and those that are considered as "embryonic",
668 * i.e. created by sonewconn() in TCP input path, and have
669 * not yet been committed. For the former, we reduce the usecount
670 * to 0 as done by the code above. For the latter, the usecount
671 * would have reduced to 0 as part calling soabort() when the
672 * socket is dropped at the end of tcp_input().
674 if (so
->so_usecount
== 0) {
675 DTRACE_TCP4(state__change
, void, NULL
, struct inpcb
*, inp
,
676 struct tcpcb
*, tp
, int32_t, TCPS_CLOSED
);
677 /* Become a regular mutex */
678 lck_mtx_convert_spin(&inp
->inpcb_mtx
);
681 * If this tp still happens to be on the timer list,
684 if (TIMER_IS_ON_LIST(tp
)) {
685 tcp_remove_timer(tp
);
688 if (inp
->inp_state
!= INPCB_STATE_DEAD
) {
690 if (SOCK_CHECK_DOM(so
, PF_INET6
)) {
698 mptcp_subflow_del(tptomptp(tp
)->mpt_mpte
, tp
->t_mpsub
);
700 /* so is now unlinked from mp_so - let's drop the lock */
701 socket_unlock(mp_so
, 1);
710 lck_mtx_unlock(&inp
->inpcb_mtx
);
715 socket_unlock(mp_so
, 1);
722 * TCP garbage collector callback (inpcb_timer_func_t).
724 * Returns the number of pcbs that will need to be gc-ed soon,
725 * returnining > 0 will keep timer active.
728 tcp_gc(struct inpcbinfo
*ipi
)
730 struct inpcb
*inp
, *nxt
;
731 struct tcpcb
*tw_tp
, *tw_ntp
;
736 static int tws_checked
= 0;
739 KERNEL_DEBUG(DBG_FNC_TCP_SLOW
| DBG_FUNC_START
, 0, 0, 0, 0, 0);
742 * Update tcp_now here as it may get used while
743 * processing the slow timer.
745 calculate_tcp_clock();
748 * Garbage collect socket/tcpcb: We need to acquire the list lock
749 * exclusively to do this
752 if (lck_rw_try_lock_exclusive(ipi
->ipi_lock
) == FALSE
) {
753 /* don't sweat it this time; cleanup was done last time */
754 if (tcp_gc_done
== TRUE
) {
756 KERNEL_DEBUG(DBG_FNC_TCP_SLOW
| DBG_FUNC_END
,
757 tws_checked
, cur_tw_slot
, 0, 0, 0);
758 /* Lock upgrade failed, give up this round */
759 atomic_add_32(&ipi
->ipi_gc_req
.intimer_fast
, 1);
762 /* Upgrade failed, lost lock now take it again exclusive */
763 lck_rw_lock_exclusive(ipi
->ipi_lock
);
767 LIST_FOREACH_SAFE(inp
, &tcb
, inp_list
, nxt
) {
768 if (tcp_garbage_collect(inp
, 0)) {
769 atomic_add_32(&ipi
->ipi_gc_req
.intimer_fast
, 1);
773 /* Now cleanup the time wait ones */
774 TAILQ_FOREACH_SAFE(tw_tp
, &tcp_tw_tailq
, t_twentry
, tw_ntp
) {
776 * We check the timestamp here without holding the
777 * socket lock for better performance. If there are
778 * any pcbs in time-wait, the timer will get rescheduled.
779 * Hence some error in this check can be tolerated.
781 * Sometimes a socket on time-wait queue can be closed if
782 * 2MSL timer expired but the application still has a
785 if (tw_tp
->t_state
== TCPS_CLOSED
||
786 TSTMP_GEQ(tcp_now
, tw_tp
->t_timer
[TCPT_2MSL
])) {
787 if (tcp_garbage_collect(tw_tp
->t_inpcb
, 1)) {
788 atomic_add_32(&ipi
->ipi_gc_req
.intimer_lazy
, 1);
793 /* take into account pcbs that are still in time_wait_slots */
794 atomic_add_32(&ipi
->ipi_gc_req
.intimer_lazy
, ipi
->ipi_twcount
);
796 lck_rw_done(ipi
->ipi_lock
);
798 /* Clean up the socache while we are here */
799 if (so_cache_timer()) {
800 atomic_add_32(&ipi
->ipi_gc_req
.intimer_lazy
, 1);
803 KERNEL_DEBUG(DBG_FNC_TCP_SLOW
| DBG_FUNC_END
, tws_checked
,
804 cur_tw_slot
, 0, 0, 0);
810 * Cancel all timers for TCP tp.
813 tcp_canceltimers(struct tcpcb
*tp
)
817 tcp_remove_timer(tp
);
818 for (i
= 0; i
< TCPT_NTIMERS
; i
++) {
821 tp
->tentry
.timer_start
= tcp_now
;
822 tp
->tentry
.index
= TCPT_NONE
;
825 int tcp_syn_backoff
[TCP_MAXRXTSHIFT
+ 1] =
826 { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 };
828 int tcp_backoff
[TCP_MAXRXTSHIFT
+ 1] =
829 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
831 static int tcp_totbackoff
= 511; /* sum of tcp_backoff[] */
834 tcp_rexmt_save_state(struct tcpcb
*tp
)
837 if (TSTMP_SUPPORTED(tp
)) {
839 * Since timestamps are supported on the connection,
840 * we can do recovery as described in rfc 4015.
842 fsize
= tp
->snd_max
- tp
->snd_una
;
843 tp
->snd_ssthresh_prev
= max(fsize
, tp
->snd_ssthresh
);
844 tp
->snd_recover_prev
= tp
->snd_recover
;
847 * Timestamp option is not supported on this connection.
848 * Record ssthresh and cwnd so they can
849 * be recovered if this turns out to be a "bad" retransmit.
850 * A retransmit is considered "bad" if an ACK for this
851 * segment is received within RTT/2 interval; the assumption
852 * here is that the ACK was already in flight. See
853 * "On Estimating End-to-End Network Path Properties" by
854 * Allman and Paxson for more details.
856 tp
->snd_cwnd_prev
= tp
->snd_cwnd
;
857 tp
->snd_ssthresh_prev
= tp
->snd_ssthresh
;
858 tp
->snd_recover_prev
= tp
->snd_recover
;
859 if (IN_FASTRECOVERY(tp
)) {
860 tp
->t_flags
|= TF_WASFRECOVERY
;
862 tp
->t_flags
&= ~TF_WASFRECOVERY
;
865 tp
->t_srtt_prev
= (tp
->t_srtt
>> TCP_RTT_SHIFT
) + 2;
866 tp
->t_rttvar_prev
= (tp
->t_rttvar
>> TCP_RTTVAR_SHIFT
);
867 tp
->t_flagsext
&= ~(TF_RECOMPUTE_RTT
);
871 * Revert to the older segment size if there is an indication that PMTU
872 * blackhole detection was not needed.
875 tcp_pmtud_revert_segment_size(struct tcpcb
*tp
)
879 VERIFY(tp
->t_pmtud_saved_maxopd
> 0);
880 tp
->t_flags
|= TF_PMTUD
;
881 tp
->t_flags
&= ~TF_BLACKHOLE
;
882 optlen
= tp
->t_maxopd
- tp
->t_maxseg
;
883 tp
->t_maxopd
= tp
->t_pmtud_saved_maxopd
;
884 tp
->t_maxseg
= tp
->t_maxopd
- optlen
;
887 * Reset the slow-start flight size as it
888 * may depend on the new MSS
890 if (CC_ALGO(tp
)->cwnd_init
!= NULL
) {
891 CC_ALGO(tp
)->cwnd_init(tp
);
893 tp
->t_pmtud_start_ts
= 0;
894 tcpstat
.tcps_pmtudbh_reverted
++;
896 /* change MSS according to recommendation, if there was one */
897 tcp_update_mss_locked(tp
->t_inpcb
->inp_socket
, NULL
);
901 * TCP timer processing.
904 tcp_timers(struct tcpcb
*tp
, int timer
)
906 int32_t rexmt
, optlen
= 0, idle_time
= 0;
908 struct tcptemp
*t_template
;
914 int isipv6
= (tp
->t_inpcb
->inp_vflag
& INP_IPV4
) == 0;
916 u_int64_t accsleep_ms
;
917 u_int32_t last_sleep_ms
= 0;
919 so
= tp
->t_inpcb
->inp_socket
;
920 idle_time
= tcp_now
- tp
->t_rcvtime
;
924 * 2 MSL timeout in shutdown went off. If we're closed but
925 * still waiting for peer to close and connection has been idle
926 * too long, or if 2MSL time is up from TIME_WAIT or FIN_WAIT_2,
927 * delete connection control block.
928 * Otherwise, (this case shouldn't happen) check again in a bit
929 * we keep the socket in the main list in that case.
932 tcp_free_sackholes(tp
);
933 if (tp
->t_state
!= TCPS_TIME_WAIT
&&
934 tp
->t_state
!= TCPS_FIN_WAIT_2
&&
935 ((idle_time
> 0) && (idle_time
< TCP_CONN_MAXIDLE(tp
)))) {
936 tp
->t_timer
[TCPT_2MSL
] = OFFSET_FROM_START(tp
,
937 (u_int32_t
)TCP_CONN_KEEPINTVL(tp
));
945 * Retransmission timer went off. Message has not
946 * been acked within retransmit interval. Back off
947 * to a longer retransmit interval and retransmit one segment.
950 absolutetime_to_nanoseconds(mach_absolutetime_asleep
,
952 accsleep_ms
= accsleep_ms
/ 1000000UL;
953 if (accsleep_ms
> tp
->t_accsleep_ms
) {
954 last_sleep_ms
= accsleep_ms
- tp
->t_accsleep_ms
;
957 * Drop a connection in the retransmit timer
958 * 1. If we have retransmitted more than TCP_MAXRXTSHIFT
960 * 2. If the time spent in this retransmission episode is
961 * more than the time limit set with TCP_RXT_CONNDROPTIME
963 * 3. If TCP_RXT_FINDROP socket option was set and
964 * we have already retransmitted the FIN 3 times without
967 if (++tp
->t_rxtshift
> TCP_MAXRXTSHIFT
||
968 (tp
->t_rxt_conndroptime
> 0 && tp
->t_rxtstart
> 0 &&
969 (tcp_now
- tp
->t_rxtstart
) >= tp
->t_rxt_conndroptime
) ||
970 ((tp
->t_flagsext
& TF_RXTFINDROP
) != 0 &&
971 (tp
->t_flags
& TF_SENTFIN
) != 0 && tp
->t_rxtshift
>= 4) ||
972 (tp
->t_rxtshift
> 4 && last_sleep_ms
>= TCP_SLEEP_TOO_LONG
)) {
973 if (tp
->t_state
== TCPS_ESTABLISHED
&&
974 tp
->t_rxt_minimum_timeout
> 0) {
976 * Avoid dropping a connection if minimum
977 * timeout is set and that time did not
978 * pass. We will retry sending
979 * retransmissions at the maximum interval
981 if (TSTMP_LT(tcp_now
, (tp
->t_rxtstart
+
982 tp
->t_rxt_minimum_timeout
))) {
983 tp
->t_rxtshift
= TCP_MAXRXTSHIFT
- 1;
984 goto retransmit_packet
;
987 if ((tp
->t_flagsext
& TF_RXTFINDROP
) != 0) {
988 tcpstat
.tcps_rxtfindrop
++;
989 } else if (last_sleep_ms
>= TCP_SLEEP_TOO_LONG
) {
990 tcpstat
.tcps_drop_after_sleep
++;
992 tcpstat
.tcps_timeoutdrop
++;
994 if (tp
->t_rxtshift
>= TCP_MAXRXTSHIFT
) {
995 if (TCP_ECN_ENABLED(tp
)) {
996 INP_INC_IFNET_STAT(tp
->t_inpcb
,
999 INP_INC_IFNET_STAT(tp
->t_inpcb
,
1000 ecn_off
.rxmit_drop
);
1003 tp
->t_rxtshift
= TCP_MAXRXTSHIFT
;
1004 postevent(so
, 0, EV_TIMEOUT
);
1006 (SO_FILT_HINT_LOCKED
| SO_FILT_HINT_TIMEOUT
));
1008 if (TCP_ECN_ENABLED(tp
) &&
1009 tp
->t_state
== TCPS_ESTABLISHED
) {
1010 tcp_heuristic_ecn_droprxmt(tp
);
1013 tp
= tcp_drop(tp
, tp
->t_softerror
?
1014 tp
->t_softerror
: ETIMEDOUT
);
1019 tcpstat
.tcps_rexmttimeo
++;
1020 tp
->t_accsleep_ms
= accsleep_ms
;
1022 if (tp
->t_rxtshift
== 1 &&
1023 tp
->t_state
== TCPS_ESTABLISHED
) {
1024 /* Set the time at which retransmission started. */
1025 tp
->t_rxtstart
= tcp_now
;
1028 * if this is the first retransmit timeout, save
1029 * the state so that we can recover if the timeout
1032 tcp_rexmt_save_state(tp
);
1033 tcp_ccdbg_trace(tp
, NULL
, TCP_CC_FIRST_REXMT
);
1036 if ((tp
->t_rxtshift
>= mptcp_fail_thresh
) &&
1037 (tp
->t_state
== TCPS_ESTABLISHED
) &&
1038 (tp
->t_mpflags
& TMPF_MPTCP_TRUE
)) {
1039 mptcp_act_on_txfail(so
);
1042 if (TCPS_HAVEESTABLISHED(tp
->t_state
) &&
1043 (so
->so_flags
& SOF_MP_SUBFLOW
)) {
1044 struct mptses
*mpte
= tptomptp(tp
)->mpt_mpte
;
1046 if (mpte
->mpte_svctype
== MPTCP_SVCTYPE_HANDOVER
) {
1047 mptcp_check_subflows_and_add(mpte
);
1052 if (tp
->t_adaptive_wtimo
> 0 &&
1053 tp
->t_rxtshift
> tp
->t_adaptive_wtimo
&&
1054 TCPS_HAVEESTABLISHED(tp
->t_state
)) {
1055 /* Send an event to the application */
1057 (SO_FILT_HINT_LOCKED
|
1058 SO_FILT_HINT_ADAPTIVE_WTIMO
));
1062 * If this is a retransmit timeout after PTO, the PTO
1065 if (tp
->t_flagsext
& TF_SENT_TLPROBE
) {
1066 tp
->t_flagsext
&= ~(TF_SENT_TLPROBE
);
1067 tcpstat
.tcps_rto_after_pto
++;
1070 if (tp
->t_flagsext
& TF_DELAY_RECOVERY
) {
1072 * Retransmit timer fired before entering recovery
1073 * on a connection with packet re-ordering. This
1074 * suggests that the reordering metrics computed
1077 tp
->t_reorderwin
= 0;
1078 tp
->t_timer
[TCPT_DELAYFR
] = 0;
1079 tp
->t_flagsext
&= ~(TF_DELAY_RECOVERY
);
1082 if (!(tp
->t_flagsext
& TF_FASTOPEN_FORCE_ENABLE
) &&
1083 tp
->t_state
== TCPS_SYN_RECEIVED
) {
1084 tcp_disable_tfo(tp
);
1087 if (!(tp
->t_flagsext
& TF_FASTOPEN_FORCE_ENABLE
) &&
1088 !(tp
->t_tfo_flags
& TFO_F_HEURISTIC_DONE
) &&
1089 (tp
->t_tfo_stats
& TFO_S_SYN_DATA_SENT
) &&
1090 !(tp
->t_tfo_flags
& TFO_F_NO_SNDPROBING
) &&
1091 ((tp
->t_state
!= TCPS_SYN_SENT
&& tp
->t_rxtshift
> 1) ||
1092 tp
->t_rxtshift
> 4)) {
1094 * For regular retransmissions, a first one is being
1095 * done for tail-loss probe.
1096 * Thus, if rxtshift > 1, this means we have sent the segment
1097 * a total of 3 times.
1099 * If we are in SYN-SENT state, then there is no tail-loss
1100 * probe thus we have to let rxtshift go up to 3.
1102 tcp_heuristic_tfo_middlebox(tp
);
1104 so
->so_error
= ENODATA
;
1106 (SO_FILT_HINT_LOCKED
| SO_FILT_HINT_MP_SUB_ERROR
));
1110 tp
->t_tfo_stats
|= TFO_S_SEND_BLACKHOLE
;
1111 tcpstat
.tcps_tfo_sndblackhole
++;
1114 if (!(tp
->t_flagsext
& TF_FASTOPEN_FORCE_ENABLE
) &&
1115 !(tp
->t_tfo_flags
& TFO_F_HEURISTIC_DONE
) &&
1116 (tp
->t_tfo_stats
& TFO_S_SYN_DATA_ACKED
) &&
1117 tp
->t_rxtshift
> 3) {
1118 if (TSTMP_GT(tp
->t_sndtime
- 10 * TCP_RETRANSHZ
, tp
->t_rcvtime
)) {
1119 tcp_heuristic_tfo_middlebox(tp
);
1121 so
->so_error
= ENODATA
;
1123 (SO_FILT_HINT_LOCKED
| SO_FILT_HINT_MP_SUB_ERROR
));
1129 if (tp
->t_state
== TCPS_SYN_SENT
) {
1130 rexmt
= TCP_REXMTVAL(tp
) * tcp_syn_backoff
[tp
->t_rxtshift
];
1131 tp
->t_stat
.synrxtshift
= tp
->t_rxtshift
;
1132 tp
->t_stat
.rxmitsyns
++;
1134 /* When retransmitting, disable TFO */
1135 if (tfo_enabled(tp
) &&
1136 !(tp
->t_flagsext
& TF_FASTOPEN_FORCE_ENABLE
)) {
1137 tcp_disable_tfo(tp
);
1138 tp
->t_tfo_flags
|= TFO_F_SYN_LOSS
;
1141 rexmt
= TCP_REXMTVAL(tp
) * tcp_backoff
[tp
->t_rxtshift
];
1144 TCPT_RANGESET(tp
->t_rxtcur
, rexmt
, tp
->t_rttmin
, TCPTV_REXMTMAX
,
1145 TCP_ADD_REXMTSLOP(tp
));
1146 tp
->t_timer
[TCPT_REXMT
] = OFFSET_FROM_START(tp
, tp
->t_rxtcur
);
1148 TCP_LOG_RTT_INFO(tp
);
1150 if (INP_WAIT_FOR_IF_FEEDBACK(tp
->t_inpcb
)) {
1154 tcp_free_sackholes(tp
);
1156 * Check for potential Path MTU Discovery Black Hole
1158 if (tcp_pmtud_black_hole_detect
&&
1159 !(tp
->t_flagsext
& TF_NOBLACKHOLE_DETECTION
) &&
1160 (tp
->t_state
== TCPS_ESTABLISHED
)) {
1161 if ((tp
->t_flags
& TF_PMTUD
) &&
1162 ((tp
->t_flags
& TF_MAXSEGSNT
)
1163 || tp
->t_pmtud_lastseg_size
> tcp_pmtud_black_hole_mss
) &&
1164 tp
->t_rxtshift
== 2) {
1166 * Enter Path MTU Black-hole Detection mechanism:
1167 * - Disable Path MTU Discovery (IP "DF" bit).
1168 * - Reduce MTU to lower value than what we
1169 * negotiated with the peer.
1171 /* Disable Path MTU Discovery for now */
1172 tp
->t_flags
&= ~TF_PMTUD
;
1173 /* Record that we may have found a black hole */
1174 tp
->t_flags
|= TF_BLACKHOLE
;
1175 optlen
= tp
->t_maxopd
- tp
->t_maxseg
;
1176 /* Keep track of previous MSS */
1177 tp
->t_pmtud_saved_maxopd
= tp
->t_maxopd
;
1178 tp
->t_pmtud_start_ts
= tcp_now
;
1179 if (tp
->t_pmtud_start_ts
== 0) {
1180 tp
->t_pmtud_start_ts
++;
1182 /* Reduce the MSS to intermediary value */
1183 if (tp
->t_maxopd
> tcp_pmtud_black_hole_mss
) {
1184 tp
->t_maxopd
= tcp_pmtud_black_hole_mss
;
1186 tp
->t_maxopd
= /* use the default MSS */
1188 isipv6
? tcp_v6mssdflt
:
1192 tp
->t_maxseg
= tp
->t_maxopd
- optlen
;
1195 * Reset the slow-start flight size
1196 * as it may depend on the new MSS
1198 if (CC_ALGO(tp
)->cwnd_init
!= NULL
) {
1199 CC_ALGO(tp
)->cwnd_init(tp
);
1201 tp
->snd_cwnd
= tp
->t_maxseg
;
1204 * If further retransmissions are still
1205 * unsuccessful with a lowered MTU, maybe this
1206 * isn't a Black Hole and we restore the previous
1207 * MSS and blackhole detection flags.
1210 if ((tp
->t_flags
& TF_BLACKHOLE
) &&
1211 (tp
->t_rxtshift
> 4)) {
1212 tcp_pmtud_revert_segment_size(tp
);
1213 tp
->snd_cwnd
= tp
->t_maxseg
;
1220 * Disable rfc1323 and rfc1644 if we haven't got any
1221 * response to our SYN (after we reach the threshold)
1222 * to work-around some broken terminal servers (most of
1223 * which have hopefully been retired) that have bad VJ
1224 * header compression code which trashes TCP segments
1225 * containing unknown-to-them TCP options.
1226 * Do this only on non-local connections.
1228 if (tp
->t_state
== TCPS_SYN_SENT
&&
1229 tp
->t_rxtshift
== tcp_broken_peer_syn_rxmit_thres
) {
1230 tp
->t_flags
&= ~(TF_REQ_SCALE
| TF_REQ_TSTMP
| TF_REQ_CC
);
1234 * If losing, let the lower level know and try for
1235 * a better route. Also, if we backed off this far,
1236 * our srtt estimate is probably bogus. Clobber it
1237 * so we'll take the next rtt measurement as our srtt;
1238 * move the current srtt into rttvar to keep the current
1239 * retransmit times until then.
1241 if (tp
->t_rxtshift
> TCP_MAXRXTSHIFT
/ 4) {
1244 in6_losing(tp
->t_inpcb
);
1247 in_losing(tp
->t_inpcb
);
1248 tp
->t_rttvar
+= (tp
->t_srtt
>> TCP_RTT_SHIFT
);
1251 tp
->snd_nxt
= tp
->snd_una
;
1253 * Note: We overload snd_recover to function also as the
1254 * snd_last variable described in RFC 2582
1256 tp
->snd_recover
= tp
->snd_max
;
1258 * Force a segment to be sent.
1260 tp
->t_flags
|= TF_ACKNOW
;
1262 /* If timing a segment in this window, stop the timer */
1265 if (!IN_FASTRECOVERY(tp
) && tp
->t_rxtshift
== 1) {
1266 tcpstat
.tcps_tailloss_rto
++;
1271 * RFC 5681 says: when a TCP sender detects segment loss
1272 * using retransmit timer and the given segment has already
1273 * been retransmitted by way of the retransmission timer at
1274 * least once, the value of ssthresh is held constant
1276 if (tp
->t_rxtshift
== 1 &&
1277 CC_ALGO(tp
)->after_timeout
!= NULL
) {
1278 CC_ALGO(tp
)->after_timeout(tp
);
1280 * CWR notifications are to be sent on new data
1281 * right after Fast Retransmits and ECE
1282 * notification receipts.
1284 if (TCP_ECN_ENABLED(tp
)) {
1285 tp
->ecn_flags
|= TE_SENDCWR
;
1289 EXIT_FASTRECOVERY(tp
);
1291 /* Exit cwnd non validated phase */
1292 tp
->t_flagsext
&= ~TF_CWND_NONVALIDATED
;
1296 tcp_ccdbg_trace(tp
, NULL
, TCP_CC_REXMT_TIMEOUT
);
1298 (void) tcp_output(tp
);
1302 * Persistance timer into zero window.
1303 * Force a byte to be output, if possible.
1306 tcpstat
.tcps_persisttimeo
++;
1308 * Hack: if the peer is dead/unreachable, we do not
1309 * time out if the window is closed. After a full
1310 * backoff, drop the connection if the idle time
1311 * (no responses to probes) reaches the maximum
1312 * backoff that we would use if retransmitting.
1314 * Drop the connection if we reached the maximum allowed time for
1315 * Zero Window Probes without a non-zero update from the peer.
1316 * See rdar://5805356
1318 if ((tp
->t_rxtshift
== TCP_MAXRXTSHIFT
&&
1319 (idle_time
>= tcp_maxpersistidle
||
1320 idle_time
>= TCP_REXMTVAL(tp
) * tcp_totbackoff
)) ||
1321 ((tp
->t_persist_stop
!= 0) &&
1322 TSTMP_LEQ(tp
->t_persist_stop
, tcp_now
))) {
1323 tcpstat
.tcps_persistdrop
++;
1324 postevent(so
, 0, EV_TIMEOUT
);
1326 (SO_FILT_HINT_LOCKED
| SO_FILT_HINT_TIMEOUT
));
1327 tp
= tcp_drop(tp
, ETIMEDOUT
);
1331 tp
->t_flagsext
|= TF_FORCE
;
1332 (void) tcp_output(tp
);
1333 tp
->t_flagsext
&= ~TF_FORCE
;
1337 * Keep-alive timer went off; send something
1338 * or drop connection if idle for too long.
1341 tcpstat
.tcps_keeptimeo
++;
1344 * Regular TCP connections do not send keepalives after closing
1345 * MPTCP must not also, after sending Data FINs.
1347 struct mptcb
*mp_tp
= tptomptp(tp
);
1348 if ((tp
->t_mpflags
& TMPF_MPTCP_TRUE
) &&
1349 (tp
->t_state
> TCPS_ESTABLISHED
)) {
1351 } else if (mp_tp
!= NULL
) {
1352 if ((mptcp_ok_to_keepalive(mp_tp
) == 0)) {
1357 if (tp
->t_state
< TCPS_ESTABLISHED
) {
1360 if ((always_keepalive
||
1361 (tp
->t_inpcb
->inp_socket
->so_options
& SO_KEEPALIVE
) ||
1362 (tp
->t_flagsext
& TF_DETECT_READSTALL
) ||
1363 (tp
->t_tfo_probe_state
== TFO_PROBE_PROBING
)) &&
1364 (tp
->t_state
<= TCPS_CLOSING
|| tp
->t_state
== TCPS_FIN_WAIT_2
)) {
1365 if (idle_time
>= TCP_CONN_KEEPIDLE(tp
) + TCP_CONN_MAXIDLE(tp
)) {
1369 * Send a packet designed to force a response
1370 * if the peer is up and reachable:
1371 * either an ACK if the connection is still alive,
1372 * or an RST if the peer has closed the connection
1373 * due to timeout or reboot.
1374 * Using sequence number tp->snd_una-1
1375 * causes the transmitted zero-length segment
1376 * to lie outside the receive window;
1377 * by the protocol spec, this requires the
1378 * correspondent TCP to respond.
1380 tcpstat
.tcps_keepprobe
++;
1381 t_template
= tcp_maketemplate(tp
);
1383 struct inpcb
*inp
= tp
->t_inpcb
;
1384 struct tcp_respond_args tra
;
1386 bzero(&tra
, sizeof(tra
));
1387 tra
.nocell
= INP_NO_CELLULAR(inp
);
1388 tra
.noexpensive
= INP_NO_EXPENSIVE(inp
);
1389 tra
.noconstrained
= INP_NO_CONSTRAINED(inp
);
1390 tra
.awdl_unrestricted
= INP_AWDL_UNRESTRICTED(inp
);
1391 tra
.intcoproc_allowed
= INP_INTCOPROC_ALLOWED(inp
);
1393 if (tp
->t_inpcb
->inp_flags
& INP_BOUND_IF
) {
1394 tra
.ifscope
= tp
->t_inpcb
->inp_boundifp
->if_index
;
1396 tra
.ifscope
= IFSCOPE_NONE
;
1398 tcp_respond(tp
, t_template
->tt_ipgen
,
1399 &t_template
->tt_t
, (struct mbuf
*)NULL
,
1400 tp
->rcv_nxt
, tp
->snd_una
- 1, 0, &tra
);
1401 (void) m_free(dtom(t_template
));
1402 if (tp
->t_flagsext
& TF_DETECT_READSTALL
) {
1403 tp
->t_rtimo_probes
++;
1407 TCP_LOG_KEEP_ALIVE(tp
, idle_time
);
1409 tp
->t_timer
[TCPT_KEEP
] = OFFSET_FROM_START(tp
,
1410 TCP_CONN_KEEPINTVL(tp
));
1412 tp
->t_timer
[TCPT_KEEP
] = OFFSET_FROM_START(tp
,
1413 TCP_CONN_KEEPIDLE(tp
));
1415 if (tp
->t_flagsext
& TF_DETECT_READSTALL
) {
1416 struct ifnet
*outifp
= tp
->t_inpcb
->inp_last_outifp
;
1417 bool reenable_probe
= false;
1419 * The keep alive packets sent to detect a read
1420 * stall did not get a response from the
1421 * peer. Generate more keep-alives to confirm this.
1422 * If the number of probes sent reaches the limit,
1423 * generate an event.
1425 if (tp
->t_adaptive_rtimo
> 0) {
1426 if (tp
->t_rtimo_probes
> tp
->t_adaptive_rtimo
) {
1427 /* Generate an event */
1429 (SO_FILT_HINT_LOCKED
|
1430 SO_FILT_HINT_ADAPTIVE_RTIMO
));
1431 tcp_keepalive_reset(tp
);
1433 reenable_probe
= true;
1435 } else if (outifp
!= NULL
&&
1436 (outifp
->if_eflags
& IFEF_PROBE_CONNECTIVITY
) &&
1437 tp
->t_rtimo_probes
<= TCP_CONNECTIVITY_PROBES_MAX
) {
1438 reenable_probe
= true;
1440 tp
->t_flagsext
&= ~TF_DETECT_READSTALL
;
1442 if (reenable_probe
) {
1443 int ind
= min(tp
->t_rtimo_probes
,
1445 tp
->t_timer
[TCPT_KEEP
] = OFFSET_FROM_START(
1446 tp
, tcp_backoff
[ind
] * TCP_REXMTVAL(tp
));
1449 if (tp
->t_tfo_probe_state
== TFO_PROBE_PROBING
) {
1453 ind
= min(tp
->t_tfo_probes
, TCP_MAXRXTSHIFT
);
1456 * We take the minimum among the time set by true
1457 * keepalive (see above) and the backoff'd RTO. That
1458 * way we backoff in case of packet-loss but will never
1459 * timeout slower than regular keepalive due to the
1462 tp
->t_timer
[TCPT_KEEP
] = min(OFFSET_FROM_START(
1463 tp
, tcp_backoff
[ind
] * TCP_REXMTVAL(tp
)),
1464 tp
->t_timer
[TCPT_KEEP
]);
1465 } else if (!(tp
->t_flagsext
& TF_FASTOPEN_FORCE_ENABLE
) &&
1466 !(tp
->t_tfo_flags
& TFO_F_HEURISTIC_DONE
) &&
1467 tp
->t_tfo_probe_state
== TFO_PROBE_WAIT_DATA
) {
1468 /* Still no data! Let's assume a TFO-error and err out... */
1469 tcp_heuristic_tfo_middlebox(tp
);
1471 so
->so_error
= ENODATA
;
1473 (SO_FILT_HINT_LOCKED
| SO_FILT_HINT_MP_SUB_ERROR
));
1475 tp
->t_tfo_stats
|= TFO_S_RECV_BLACKHOLE
;
1476 tcpstat
.tcps_tfo_blackhole
++;
1480 if (tcp_delack_enabled
&& (tp
->t_flags
& TF_DELACK
)) {
1481 tp
->t_flags
&= ~TF_DELACK
;
1482 tp
->t_timer
[TCPT_DELACK
] = 0;
1483 tp
->t_flags
|= TF_ACKNOW
;
1486 * If delayed ack timer fired while stretching
1487 * acks, count the number of times the streaming
1488 * detection was not correct. If this exceeds a
1489 * threshold, disable strech ack on this
1492 * Also, go back to acking every other packet.
1494 if ((tp
->t_flags
& TF_STRETCHACK
)) {
1495 if (tp
->t_unacksegs
> 1 &&
1496 tp
->t_unacksegs
< maxseg_unacked
) {
1497 tp
->t_stretchack_delayed
++;
1500 if (tp
->t_stretchack_delayed
>
1501 TCP_STRETCHACK_DELAY_THRESHOLD
) {
1502 tp
->t_flagsext
|= TF_DISABLE_STRETCHACK
;
1504 * Note the time at which stretch
1505 * ack was disabled automatically
1507 tp
->rcv_nostrack_ts
= tcp_now
;
1508 tcpstat
.tcps_nostretchack
++;
1509 tp
->t_stretchack_delayed
= 0;
1510 tp
->rcv_nostrack_pkts
= 0;
1512 tcp_reset_stretch_ack(tp
);
1516 * If we are measuring inter packet arrival jitter
1517 * for throttling a connection, this delayed ack
1518 * might be the reason for accumulating some
1519 * jitter. So let's restart the measurement.
1521 CLEAR_IAJ_STATE(tp
);
1523 tcpstat
.tcps_delack
++;
1524 (void) tcp_output(tp
);
1529 case TCPT_JACK_RXMT
:
1530 if ((tp
->t_state
== TCPS_ESTABLISHED
) &&
1531 (tp
->t_mpflags
& TMPF_PREESTABLISHED
) &&
1532 (tp
->t_mpflags
& TMPF_JOINED_FLOW
)) {
1533 if (++tp
->t_mprxtshift
> TCP_MAXRXTSHIFT
) {
1534 tcpstat
.tcps_timeoutdrop
++;
1535 postevent(so
, 0, EV_TIMEOUT
);
1537 (SO_FILT_HINT_LOCKED
|
1538 SO_FILT_HINT_TIMEOUT
));
1539 tp
= tcp_drop(tp
, tp
->t_softerror
?
1540 tp
->t_softerror
: ETIMEDOUT
);
1543 tcpstat
.tcps_join_rxmts
++;
1544 tp
->t_mpflags
|= TMPF_SND_JACK
;
1545 tp
->t_flags
|= TF_ACKNOW
;
1548 * No backoff is implemented for simplicity for this
1551 (void) tcp_output(tp
);
1556 struct mptses
*mpte
= tptomptp(tp
)->mpt_mpte
;
1558 tp
->t_timer
[TCPT_CELLICON
] = 0;
1560 if (mpte
->mpte_cellicon_increments
== 0) {
1561 /* Cell-icon not set by this connection */
1565 if (TSTMP_LT(mpte
->mpte_last_cellicon_set
+ MPTCP_CELLICON_TOGGLE_RATE
, tcp_now
)) {
1566 mptcp_unset_cellicon(mpte
, NULL
, 1);
1569 if (mpte
->mpte_cellicon_increments
) {
1570 tp
->t_timer
[TCPT_CELLICON
] = OFFSET_FROM_START(tp
, MPTCP_CELLICON_TOGGLE_RATE
);
1581 if (!(tp
->t_flagsext
& TF_IF_PROBING
)) {
1582 tp
->t_flagsext
&= ~(TF_SENT_TLPROBE
);
1585 * Check if the connection is in the right state to
1588 if ((tp
->t_state
!= TCPS_ESTABLISHED
||
1589 tp
->t_rxtshift
> 0 ||
1590 tp
->snd_max
== tp
->snd_una
||
1591 !SACK_ENABLED(tp
) ||
1592 !TAILQ_EMPTY(&tp
->snd_holes
) ||
1593 IN_FASTRECOVERY(tp
)) &&
1594 !(tp
->t_flagsext
& TF_IF_PROBING
)) {
1599 * When the interface state is changed explicitly reset the retransmission
1600 * timer state for both SYN and data packets because we do not want to
1601 * wait unnecessarily or timeout too quickly if the link characteristics
1602 * have changed drastically
1604 if (tp
->t_flagsext
& TF_IF_PROBING
) {
1606 if (tp
->t_state
== TCPS_SYN_SENT
) {
1607 tp
->t_stat
.synrxtshift
= tp
->t_rxtshift
;
1610 * Reset to the the default RTO
1612 tp
->t_srtt
= TCPTV_SRTTBASE
;
1614 ((TCPTV_RTOBASE
- TCPTV_SRTTBASE
) << TCP_RTTVAR_SHIFT
) / 4;
1615 tp
->t_rttmin
= tp
->t_flags
& TF_LOCAL
? tcp_TCPTV_MIN
:
1617 TCPT_RANGESET(tp
->t_rxtcur
, TCP_REXMTVAL(tp
),
1618 tp
->t_rttmin
, TCPTV_REXMTMAX
, TCP_ADD_REXMTSLOP(tp
));
1619 TCP_LOG_RTT_INFO(tp
);
1622 if (tp
->t_state
== TCPS_SYN_SENT
) {
1624 * The PTO for SYN_SENT reinitializes TCP as if it was a fresh
1625 * connection attempt
1627 tp
->snd_nxt
= tp
->snd_una
;
1629 * Note: We overload snd_recover to function also as the
1630 * snd_last variable described in RFC 2582
1632 tp
->snd_recover
= tp
->snd_max
;
1634 * Force a segment to be sent.
1636 tp
->t_flags
|= TF_ACKNOW
;
1638 /* If timing a segment in this window, stop the timer */
1644 * If there is no new data to send or if the
1645 * connection is limited by receive window then
1646 * retransmit the last segment, otherwise send
1649 snd_len
= min(so
->so_snd
.sb_cc
, tp
->snd_wnd
)
1650 - (tp
->snd_max
- tp
->snd_una
);
1652 tp
->snd_nxt
= tp
->snd_max
;
1654 snd_len
= min((tp
->snd_max
- tp
->snd_una
),
1656 tp
->snd_nxt
= tp
->snd_max
- snd_len
;
1661 if (tp
->t_flagsext
& TF_IF_PROBING
) {
1662 tcpstat
.tcps_probe_if
++;
1665 /* If timing a segment in this window, stop the timer */
1667 /* Note that tail loss probe is being sent. Exclude IF probe */
1668 if (!(tp
->t_flagsext
& TF_IF_PROBING
)) {
1669 tp
->t_flagsext
|= TF_SENT_TLPROBE
;
1670 tp
->t_tlpstart
= tcp_now
;
1673 tp
->snd_cwnd
+= tp
->t_maxseg
;
1675 * When tail-loss-probe fires, we reset the RTO timer, because
1676 * a probe just got sent, so we are good to push out the timer.
1678 * Set to 0 to ensure that tcp_output() will reschedule it
1680 tp
->t_timer
[TCPT_REXMT
] = 0;
1681 ret
= tcp_output(tp
);
1683 #if (DEBUG || DEVELOPMENT)
1684 if ((tp
->t_flagsext
& TF_IF_PROBING
) &&
1685 ((IFNET_IS_COMPANION_LINK(tp
->t_inpcb
->inp_last_outifp
)) ||
1686 tp
->t_state
== TCPS_SYN_SENT
)) {
1687 if (ret
== 0 && tcp_probe_if_fix_port
> 0 &&
1688 tcp_probe_if_fix_port
<= IPPORT_HILASTAUTO
) {
1689 tp
->t_timer
[TCPT_REXMT
] = 0;
1690 tcp_set_lotimer_index(tp
);
1693 os_log(OS_LOG_DEFAULT
,
1694 "%s: sent %s probe for %u > %u on interface %s"
1697 tp
->t_state
== TCPS_SYN_SENT
? "SYN" : "data",
1698 ntohs(tp
->t_inpcb
->inp_lport
),
1699 ntohs(tp
->t_inpcb
->inp_fport
),
1700 if_name(tp
->t_inpcb
->inp_last_outifp
),
1701 tp
->t_inpcb
->inp_last_outifp
->if_index
,
1702 ret
== 0 ? "succeeded" :"failed", ret
);
1704 #endif /* DEBUG || DEVELOPMENT */
1707 * When the connection is not idle, make sure the retransmission timer
1708 * is armed because it was set to zero above
1710 if ((tp
->t_timer
[TCPT_REXMT
] == 0 || tp
->t_timer
[TCPT_PERSIST
] == 0) &&
1711 (tp
->t_inpcb
->inp_socket
->so_snd
.sb_cc
!= 0 || tp
->t_state
== TCPS_SYN_SENT
||
1712 tp
->t_state
== TCPS_SYN_RECEIVED
)) {
1713 tp
->t_timer
[TCPT_REXMT
] =
1714 OFFSET_FROM_START(tp
, tp
->t_rxtcur
);
1716 os_log(OS_LOG_DEFAULT
,
1717 "%s: tcp_output() returned %u with retransmission timer disabled "
1718 "for %u > %u in state %d, reset timer to %d",
1720 ntohs(tp
->t_inpcb
->inp_lport
),
1721 ntohs(tp
->t_inpcb
->inp_fport
),
1723 tp
->t_timer
[TCPT_REXMT
]);
1725 tcp_check_timer_state(tp
);
1727 tp
->snd_cwnd
-= tp
->t_maxseg
;
1729 if (!(tp
->t_flagsext
& TF_IF_PROBING
)) {
1730 tp
->t_tlphighrxt
= tp
->snd_nxt
;
1735 tp
->t_flagsext
&= ~TF_DELAY_RECOVERY
;
1738 * Don't do anything if one of the following is true:
1739 * - the connection is already in recovery
1740 * - sequence until snd_recover has been acknowledged.
1741 * - retransmit timeout has fired
1743 if (IN_FASTRECOVERY(tp
) ||
1744 SEQ_GEQ(tp
->snd_una
, tp
->snd_recover
) ||
1745 tp
->t_rxtshift
> 0) {
1749 VERIFY(SACK_ENABLED(tp
));
1750 tcp_rexmt_save_state(tp
);
1751 if (CC_ALGO(tp
)->pre_fr
!= NULL
) {
1752 CC_ALGO(tp
)->pre_fr(tp
);
1753 if (TCP_ECN_ENABLED(tp
)) {
1754 tp
->ecn_flags
|= TE_SENDCWR
;
1757 ENTER_FASTRECOVERY(tp
);
1759 tp
->t_timer
[TCPT_REXMT
] = 0;
1760 tcpstat
.tcps_sack_recovery_episode
++;
1761 tp
->t_sack_recovery_episode
++;
1762 tp
->sack_newdata
= tp
->snd_nxt
;
1763 tp
->snd_cwnd
= tp
->t_maxseg
;
1764 tcp_ccdbg_trace(tp
, NULL
, TCP_CC_ENTER_FASTRECOVERY
);
1765 (void) tcp_output(tp
);
1768 tcpstat
.tcps_keepdrops
++;
1769 postevent(so
, 0, EV_TIMEOUT
);
1771 (SO_FILT_HINT_LOCKED
| SO_FILT_HINT_TIMEOUT
));
1772 tp
= tcp_drop(tp
, ETIMEDOUT
);
1776 if (tp
->t_inpcb
->inp_socket
->so_options
& SO_DEBUG
) {
1777 tcp_trace(TA_USER
, ostate
, tp
, (void *)0, (struct tcphdr
*)0,
1784 /* Remove a timer entry from timer list */
1786 tcp_remove_timer(struct tcpcb
*tp
)
1788 struct tcptimerlist
*listp
= &tcp_timer_list
;
1790 socket_lock_assert_owned(tp
->t_inpcb
->inp_socket
);
1791 if (!(TIMER_IS_ON_LIST(tp
))) {
1794 lck_mtx_lock(listp
->mtx
);
1796 /* Check if pcb is on timer list again after acquiring the lock */
1797 if (!(TIMER_IS_ON_LIST(tp
))) {
1798 lck_mtx_unlock(listp
->mtx
);
1802 if (listp
->next_te
!= NULL
&& listp
->next_te
== &tp
->tentry
) {
1803 listp
->next_te
= LIST_NEXT(&tp
->tentry
, le
);
1806 LIST_REMOVE(&tp
->tentry
, le
);
1807 tp
->t_flags
&= ~(TF_TIMER_ONLIST
);
1811 tp
->tentry
.le
.le_next
= NULL
;
1812 tp
->tentry
.le
.le_prev
= NULL
;
1813 lck_mtx_unlock(listp
->mtx
);
1817 * Function to check if the timerlist needs to be rescheduled to run
1818 * the timer entry correctly. Basically, this is to check if we can avoid
1819 * taking the list lock.
1823 need_to_resched_timerlist(u_int32_t runtime
, u_int16_t mode
)
1825 struct tcptimerlist
*listp
= &tcp_timer_list
;
1829 * If the list is being processed then the state of the list is
1830 * in flux. In this case always acquire the lock and set the state
1833 if (listp
->running
) {
1837 if (!listp
->scheduled
) {
1841 diff
= timer_diff(listp
->runtime
, 0, runtime
, 0);
1843 /* The list is going to run before this timer */
1846 if (mode
& TCP_TIMERLIST_10MS_MODE
) {
1847 if (diff
<= TCP_TIMER_10MS_QUANTUM
) {
1850 } else if (mode
& TCP_TIMERLIST_100MS_MODE
) {
1851 if (diff
<= TCP_TIMER_100MS_QUANTUM
) {
1855 if (diff
<= TCP_TIMER_500MS_QUANTUM
) {
1864 tcp_sched_timerlist(uint32_t offset
)
1866 uint64_t deadline
= 0;
1867 struct tcptimerlist
*listp
= &tcp_timer_list
;
1869 LCK_MTX_ASSERT(listp
->mtx
, LCK_MTX_ASSERT_OWNED
);
1871 offset
= min(offset
, TCP_TIMERLIST_MAX_OFFSET
);
1872 listp
->runtime
= tcp_now
+ offset
;
1873 listp
->schedtime
= tcp_now
;
1874 if (listp
->runtime
== 0) {
1879 clock_interval_to_deadline(offset
, USEC_PER_SEC
, &deadline
);
1881 thread_call_enter_delayed(listp
->call
, deadline
);
1882 listp
->scheduled
= TRUE
;
1886 * Function to run the timers for a connection.
1888 * Returns the offset of next timer to be run for this connection which
1889 * can be used to reschedule the timerlist.
1891 * te_mode is an out parameter that indicates the modes of active
1892 * timers for this connection.
1895 tcp_run_conn_timer(struct tcpcb
*tp
, u_int16_t
*te_mode
,
1896 u_int16_t probe_if_index
)
1899 u_int16_t i
= 0, index
= TCPT_NONE
, lo_index
= TCPT_NONE
;
1900 u_int32_t timer_val
, offset
= 0, lo_timer
= 0;
1902 boolean_t needtorun
[TCPT_NTIMERS
];
1906 bzero(needtorun
, sizeof(needtorun
));
1909 socket_lock(tp
->t_inpcb
->inp_socket
, 1);
1911 so
= tp
->t_inpcb
->inp_socket
;
1912 /* Release the want count on inp */
1913 if (in_pcb_checkstate(tp
->t_inpcb
, WNT_RELEASE
, 1)
1915 if (TIMER_IS_ON_LIST(tp
)) {
1916 tcp_remove_timer(tp
);
1919 /* Looks like the TCP connection got closed while we
1920 * were waiting for the lock.. Done
1926 * If this connection is over an interface that needs to
1927 * be probed, send probe packets to reinitiate communication.
1929 if (TCP_IF_STATE_CHANGED(tp
, probe_if_index
)) {
1930 tp
->t_flagsext
|= TF_IF_PROBING
;
1931 tcp_timers(tp
, TCPT_PTO
);
1932 tp
->t_timer
[TCPT_PTO
] = 0;
1933 tp
->t_flagsext
&= ~TF_IF_PROBING
;
1937 * Since the timer thread needs to wait for tcp lock, it may race
1938 * with another thread that can cancel or reschedule the timer
1939 * that is about to run. Check if we need to run anything.
1941 if ((index
= tp
->tentry
.index
) == TCPT_NONE
) {
1945 timer_val
= tp
->t_timer
[index
];
1947 diff
= timer_diff(tp
->tentry
.runtime
, 0, tcp_now
, 0);
1949 if (tp
->tentry
.index
!= TCPT_NONE
) {
1951 *(te_mode
) = tp
->tentry
.mode
;
1956 tp
->t_timer
[index
] = 0;
1957 if (timer_val
> 0) {
1958 tp
= tcp_timers(tp
, index
);
1965 * Check if there are any other timers that need to be run.
1966 * While doing it, adjust the timer values wrt tcp_now.
1968 tp
->tentry
.mode
= 0;
1969 for (i
= 0; i
< TCPT_NTIMERS
; ++i
) {
1970 if (tp
->t_timer
[i
] != 0) {
1971 diff
= timer_diff(tp
->tentry
.timer_start
,
1972 tp
->t_timer
[i
], tcp_now
, 0);
1974 needtorun
[i
] = TRUE
;
1977 tp
->t_timer
[i
] = diff
;
1978 needtorun
[i
] = FALSE
;
1979 if (lo_timer
== 0 || diff
< lo_timer
) {
1983 TCP_SET_TIMER_MODE(tp
->tentry
.mode
, i
);
1988 tp
->tentry
.timer_start
= tcp_now
;
1989 tp
->tentry
.index
= lo_index
;
1990 VERIFY(tp
->tentry
.index
== TCPT_NONE
|| tp
->tentry
.mode
> 0);
1992 if (tp
->tentry
.index
!= TCPT_NONE
) {
1993 tp
->tentry
.runtime
= tp
->tentry
.timer_start
+
1994 tp
->t_timer
[tp
->tentry
.index
];
1995 if (tp
->tentry
.runtime
== 0) {
1996 tp
->tentry
.runtime
++;
2001 /* run any other timers outstanding at this time. */
2002 for (i
= 0; i
< TCPT_NTIMERS
; ++i
) {
2005 tp
= tcp_timers(tp
, i
);
2013 tcp_set_lotimer_index(tp
);
2016 if (tp
->tentry
.index
< TCPT_NONE
) {
2017 offset
= tp
->t_timer
[tp
->tentry
.index
];
2018 *(te_mode
) = tp
->tentry
.mode
;
2022 if (tp
!= NULL
&& tp
->tentry
.index
== TCPT_NONE
) {
2023 tcp_remove_timer(tp
);
2027 socket_unlock(so
, 1);
2032 tcp_run_timerlist(void * arg1
, void * arg2
)
2034 #pragma unused(arg1, arg2)
2035 struct tcptimerentry
*te
, *next_te
;
2036 struct tcptimerlist
*listp
= &tcp_timer_list
;
2038 uint32_t next_timer
= 0; /* offset of the next timer on the list */
2039 u_int16_t te_mode
= 0; /* modes of all active timers in a tcpcb */
2040 u_int16_t list_mode
= 0; /* cumulative of modes of all tcpcbs */
2041 uint32_t active_count
= 0;
2043 calculate_tcp_clock();
2045 lck_mtx_lock(listp
->mtx
);
2047 int32_t drift
= tcp_now
- listp
->runtime
;
2049 tcpstat
.tcps_timer_drift_le_1_ms
++;
2050 } else if (drift
<= 10) {
2051 tcpstat
.tcps_timer_drift_le_10_ms
++;
2052 } else if (drift
<= 20) {
2053 tcpstat
.tcps_timer_drift_le_20_ms
++;
2054 } else if (drift
<= 50) {
2055 tcpstat
.tcps_timer_drift_le_50_ms
++;
2056 } else if (drift
<= 100) {
2057 tcpstat
.tcps_timer_drift_le_100_ms
++;
2058 } else if (drift
<= 200) {
2059 tcpstat
.tcps_timer_drift_le_200_ms
++;
2060 } else if (drift
<= 500) {
2061 tcpstat
.tcps_timer_drift_le_500_ms
++;
2062 } else if (drift
<= 1000) {
2063 tcpstat
.tcps_timer_drift_le_1000_ms
++;
2065 tcpstat
.tcps_timer_drift_gt_1000_ms
++;
2068 listp
->running
= TRUE
;
2070 LIST_FOREACH_SAFE(te
, &listp
->lhead
, le
, next_te
) {
2071 uint32_t offset
= 0;
2072 uint32_t runtime
= te
->runtime
;
2074 tp
= TIMERENTRY_TO_TP(te
);
2077 * An interface probe may need to happen before the previously scheduled runtime
2079 if (te
->index
< TCPT_NONE
&& TSTMP_GT(runtime
, tcp_now
) &&
2080 !TCP_IF_STATE_CHANGED(tp
, listp
->probe_if_index
)) {
2081 offset
= timer_diff(runtime
, 0, tcp_now
, 0);
2082 if (next_timer
== 0 || offset
< next_timer
) {
2083 next_timer
= offset
;
2085 list_mode
|= te
->mode
;
2090 * Acquire an inp wantcnt on the inpcb so that the socket
2091 * won't get detached even if tcp_close is called
2093 if (in_pcb_checkstate(tp
->t_inpcb
, WNT_ACQUIRE
, 0)
2096 * Some how this pcb went into dead state while
2097 * on the timer list, just take it off the list.
2098 * Since the timer list entry pointers are
2099 * protected by the timer list lock, we can
2100 * do it here without the socket lock.
2102 if (TIMER_IS_ON_LIST(tp
)) {
2103 tp
->t_flags
&= ~(TF_TIMER_ONLIST
);
2104 LIST_REMOVE(&tp
->tentry
, le
);
2107 tp
->tentry
.le
.le_next
= NULL
;
2108 tp
->tentry
.le
.le_prev
= NULL
;
2115 * Store the next timerentry pointer before releasing the
2116 * list lock. If that entry has to be removed when we
2117 * release the lock, this pointer will be updated to the
2118 * element after that.
2120 listp
->next_te
= next_te
;
2122 VERIFY_NEXT_LINK(&tp
->tentry
, le
);
2123 VERIFY_PREV_LINK(&tp
->tentry
, le
);
2125 lck_mtx_unlock(listp
->mtx
);
2127 offset
= tcp_run_conn_timer(tp
, &te_mode
,
2128 listp
->probe_if_index
);
2130 lck_mtx_lock(listp
->mtx
);
2132 next_te
= listp
->next_te
;
2133 listp
->next_te
= NULL
;
2135 if (offset
> 0 && te_mode
!= 0) {
2136 list_mode
|= te_mode
;
2138 if (next_timer
== 0 || offset
< next_timer
) {
2139 next_timer
= offset
;
2144 if (!LIST_EMPTY(&listp
->lhead
)) {
2145 u_int16_t next_mode
= 0;
2146 if ((list_mode
& TCP_TIMERLIST_10MS_MODE
) ||
2147 (listp
->pref_mode
& TCP_TIMERLIST_10MS_MODE
)) {
2148 next_mode
= TCP_TIMERLIST_10MS_MODE
;
2149 } else if ((list_mode
& TCP_TIMERLIST_100MS_MODE
) ||
2150 (listp
->pref_mode
& TCP_TIMERLIST_100MS_MODE
)) {
2151 next_mode
= TCP_TIMERLIST_100MS_MODE
;
2153 next_mode
= TCP_TIMERLIST_500MS_MODE
;
2156 if (next_mode
!= TCP_TIMERLIST_500MS_MODE
) {
2157 listp
->idleruns
= 0;
2160 * the next required mode is slow mode, but if
2161 * the last one was a faster mode and we did not
2162 * have enough idle runs, repeat the last mode.
2164 * We try to keep the timer list in fast mode for
2165 * some idle time in expectation of new data.
2167 if (listp
->mode
!= next_mode
&&
2168 listp
->idleruns
< timer_fastmode_idlemax
) {
2170 next_mode
= listp
->mode
;
2171 next_timer
= TCP_TIMER_100MS_QUANTUM
;
2173 listp
->idleruns
= 0;
2176 listp
->mode
= next_mode
;
2177 if (listp
->pref_offset
!= 0) {
2178 next_timer
= min(listp
->pref_offset
, next_timer
);
2181 if (listp
->mode
== TCP_TIMERLIST_500MS_MODE
) {
2182 next_timer
= max(next_timer
,
2183 TCP_TIMER_500MS_QUANTUM
);
2186 tcp_sched_timerlist(next_timer
);
2189 * No need to reschedule this timer, but always run
2190 * periodically at a much higher granularity.
2192 tcp_sched_timerlist(TCP_TIMERLIST_MAX_OFFSET
);
2195 listp
->running
= FALSE
;
2196 listp
->pref_mode
= 0;
2197 listp
->pref_offset
= 0;
2198 listp
->probe_if_index
= 0;
2200 lck_mtx_unlock(listp
->mtx
);
2204 * Function to check if the timerlist needs to be rescheduled to run this
2205 * connection's timers correctly.
2208 tcp_sched_timers(struct tcpcb
*tp
)
2210 struct tcptimerentry
*te
= &tp
->tentry
;
2211 u_int16_t index
= te
->index
;
2212 u_int16_t mode
= te
->mode
;
2213 struct tcptimerlist
*listp
= &tcp_timer_list
;
2215 boolean_t list_locked
= FALSE
;
2217 if (tp
->t_inpcb
->inp_state
== INPCB_STATE_DEAD
) {
2218 /* Just return without adding the dead pcb to the list */
2219 if (TIMER_IS_ON_LIST(tp
)) {
2220 tcp_remove_timer(tp
);
2225 if (index
== TCPT_NONE
) {
2226 /* Nothing to run */
2227 tcp_remove_timer(tp
);
2232 * compute the offset at which the next timer for this connection
2235 offset
= timer_diff(te
->runtime
, 0, tcp_now
, 0);
2238 tcp_timer_advanced
++;
2241 if (!TIMER_IS_ON_LIST(tp
)) {
2243 lck_mtx_lock(listp
->mtx
);
2247 if (!TIMER_IS_ON_LIST(tp
)) {
2248 LIST_INSERT_HEAD(&listp
->lhead
, te
, le
);
2249 tp
->t_flags
|= TF_TIMER_ONLIST
;
2252 if (listp
->entries
> listp
->maxentries
) {
2253 listp
->maxentries
= listp
->entries
;
2256 /* if the list is not scheduled, just schedule it */
2257 if (!listp
->scheduled
) {
2264 * Timer entry is currently on the list, check if the list needs
2265 * to be rescheduled.
2267 if (need_to_resched_timerlist(te
->runtime
, mode
)) {
2268 tcp_resched_timerlist
++;
2271 lck_mtx_lock(listp
->mtx
);
2275 VERIFY_NEXT_LINK(te
, le
);
2276 VERIFY_PREV_LINK(te
, le
);
2278 if (listp
->running
) {
2279 listp
->pref_mode
|= mode
;
2280 if (listp
->pref_offset
== 0 ||
2281 offset
< listp
->pref_offset
) {
2282 listp
->pref_offset
= offset
;
2286 * The list could have got rescheduled while
2287 * this thread was waiting for the lock
2289 if (listp
->scheduled
) {
2291 diff
= timer_diff(listp
->runtime
, 0,
2307 * Since a connection with timers is getting scheduled, the timer
2308 * list moves from idle to active state and that is why idlegen is
2311 if (mode
& TCP_TIMERLIST_10MS_MODE
) {
2312 listp
->mode
= TCP_TIMERLIST_10MS_MODE
;
2313 listp
->idleruns
= 0;
2314 offset
= min(offset
, TCP_TIMER_10MS_QUANTUM
);
2315 } else if (mode
& TCP_TIMERLIST_100MS_MODE
) {
2316 if (listp
->mode
> TCP_TIMERLIST_100MS_MODE
) {
2317 listp
->mode
= TCP_TIMERLIST_100MS_MODE
;
2319 listp
->idleruns
= 0;
2320 offset
= min(offset
, TCP_TIMER_100MS_QUANTUM
);
2322 tcp_sched_timerlist(offset
);
2326 lck_mtx_unlock(listp
->mtx
);
2333 tcp_set_lotimer_index(struct tcpcb
*tp
)
2335 uint16_t i
, lo_index
= TCPT_NONE
, mode
= 0;
2336 uint32_t lo_timer
= 0;
2337 for (i
= 0; i
< TCPT_NTIMERS
; ++i
) {
2338 if (tp
->t_timer
[i
] != 0) {
2339 TCP_SET_TIMER_MODE(mode
, i
);
2340 if (lo_timer
== 0 || tp
->t_timer
[i
] < lo_timer
) {
2341 lo_timer
= tp
->t_timer
[i
];
2346 tp
->tentry
.index
= lo_index
;
2347 tp
->tentry
.mode
= mode
;
2348 VERIFY(tp
->tentry
.index
== TCPT_NONE
|| tp
->tentry
.mode
> 0);
2350 if (tp
->tentry
.index
!= TCPT_NONE
) {
2351 tp
->tentry
.runtime
= tp
->tentry
.timer_start
2352 + tp
->t_timer
[tp
->tentry
.index
];
2353 if (tp
->tentry
.runtime
== 0) {
2354 tp
->tentry
.runtime
++;
2360 tcp_check_timer_state(struct tcpcb
*tp
)
2362 socket_lock_assert_owned(tp
->t_inpcb
->inp_socket
);
2364 if (tp
->t_inpcb
->inp_flags2
& INP2_TIMEWAIT
) {
2368 tcp_set_lotimer_index(tp
);
2370 tcp_sched_timers(tp
);
2375 tcp_cumulative_stat(u_int32_t cur
, u_int32_t
*prev
, u_int32_t
*dest
)
2377 /* handle wrap around */
2378 int32_t diff
= (int32_t) (cur
- *prev
);
2389 tcp_cumulative_stat64(u_int64_t cur
, u_int64_t
*prev
, u_int64_t
*dest
)
2391 /* handle wrap around */
2392 int64_t diff
= (int64_t) (cur
- *prev
);
2402 __private_extern__
void
2403 tcp_report_stats(void)
2405 struct nstat_sysinfo_data data
;
2406 struct sockaddr_in dst
;
2407 struct sockaddr_in6 dst6
;
2408 struct rtentry
*rt
= NULL
;
2409 static struct tcp_last_report_stats prev
;
2410 u_int64_t var
, uptime
;
2412 #define stat data.u.tcp_stats
2413 if (((uptime
= net_uptime()) - tcp_last_report_time
) <
2414 tcp_report_stats_interval
) {
2418 tcp_last_report_time
= uptime
;
2420 bzero(&data
, sizeof(data
));
2421 data
.flags
= NSTAT_SYSINFO_TCP_STATS
;
2423 bzero(&dst
, sizeof(dst
));
2424 dst
.sin_len
= sizeof(dst
);
2425 dst
.sin_family
= AF_INET
;
2428 lck_mtx_lock(rnh_lock
);
2429 rt
= rt_lookup(TRUE
, (struct sockaddr
*)&dst
, NULL
,
2430 rt_tables
[AF_INET
], IFSCOPE_NONE
);
2431 lck_mtx_unlock(rnh_lock
);
2434 if (rt_primary_default(rt
, rt_key(rt
)) &&
2435 rt
->rt_stats
!= NULL
) {
2436 stat
.ipv4_avgrtt
= rt
->rt_stats
->nstat_avg_rtt
;
2444 bzero(&dst6
, sizeof(dst6
));
2445 dst6
.sin6_len
= sizeof(dst6
);
2446 dst6
.sin6_family
= AF_INET6
;
2448 lck_mtx_lock(rnh_lock
);
2449 rt
= rt_lookup(TRUE
, (struct sockaddr
*)&dst6
, NULL
,
2450 rt_tables
[AF_INET6
], IFSCOPE_NONE
);
2451 lck_mtx_unlock(rnh_lock
);
2454 if (rt_primary_default(rt
, rt_key(rt
)) &&
2455 rt
->rt_stats
!= NULL
) {
2456 stat
.ipv6_avgrtt
= rt
->rt_stats
->nstat_avg_rtt
;
2463 /* send packet loss rate, shift by 10 for precision */
2464 if (tcpstat
.tcps_sndpack
> 0 && tcpstat
.tcps_sndrexmitpack
> 0) {
2465 var
= tcpstat
.tcps_sndrexmitpack
<< 10;
2466 stat
.send_plr
= (var
* 100) / tcpstat
.tcps_sndpack
;
2469 /* recv packet loss rate, shift by 10 for precision */
2470 if (tcpstat
.tcps_rcvpack
> 0 && tcpstat
.tcps_recovered_pkts
> 0) {
2471 var
= tcpstat
.tcps_recovered_pkts
<< 10;
2472 stat
.recv_plr
= (var
* 100) / tcpstat
.tcps_rcvpack
;
2475 /* RTO after tail loss, shift by 10 for precision */
2476 if (tcpstat
.tcps_sndrexmitpack
> 0
2477 && tcpstat
.tcps_tailloss_rto
> 0) {
2478 var
= tcpstat
.tcps_tailloss_rto
<< 10;
2479 stat
.send_tlrto_rate
=
2480 (var
* 100) / tcpstat
.tcps_sndrexmitpack
;
2483 /* packet reordering */
2484 if (tcpstat
.tcps_sndpack
> 0 && tcpstat
.tcps_reordered_pkts
> 0) {
2485 var
= tcpstat
.tcps_reordered_pkts
<< 10;
2486 stat
.send_reorder_rate
=
2487 (var
* 100) / tcpstat
.tcps_sndpack
;
2490 if (tcp_ecn_outbound
== 1) {
2491 stat
.ecn_client_enabled
= 1;
2493 if (tcp_ecn_inbound
== 1) {
2494 stat
.ecn_server_enabled
= 1;
2496 tcp_cumulative_stat(tcpstat
.tcps_connattempt
,
2497 &prev
.tcps_connattempt
, &stat
.connection_attempts
);
2498 tcp_cumulative_stat(tcpstat
.tcps_accepts
,
2499 &prev
.tcps_accepts
, &stat
.connection_accepts
);
2500 tcp_cumulative_stat(tcpstat
.tcps_ecn_client_setup
,
2501 &prev
.tcps_ecn_client_setup
, &stat
.ecn_client_setup
);
2502 tcp_cumulative_stat(tcpstat
.tcps_ecn_server_setup
,
2503 &prev
.tcps_ecn_server_setup
, &stat
.ecn_server_setup
);
2504 tcp_cumulative_stat(tcpstat
.tcps_ecn_client_success
,
2505 &prev
.tcps_ecn_client_success
, &stat
.ecn_client_success
);
2506 tcp_cumulative_stat(tcpstat
.tcps_ecn_server_success
,
2507 &prev
.tcps_ecn_server_success
, &stat
.ecn_server_success
);
2508 tcp_cumulative_stat(tcpstat
.tcps_ecn_not_supported
,
2509 &prev
.tcps_ecn_not_supported
, &stat
.ecn_not_supported
);
2510 tcp_cumulative_stat(tcpstat
.tcps_ecn_lost_syn
,
2511 &prev
.tcps_ecn_lost_syn
, &stat
.ecn_lost_syn
);
2512 tcp_cumulative_stat(tcpstat
.tcps_ecn_lost_synack
,
2513 &prev
.tcps_ecn_lost_synack
, &stat
.ecn_lost_synack
);
2514 tcp_cumulative_stat(tcpstat
.tcps_ecn_recv_ce
,
2515 &prev
.tcps_ecn_recv_ce
, &stat
.ecn_recv_ce
);
2516 tcp_cumulative_stat(tcpstat
.tcps_ecn_recv_ece
,
2517 &prev
.tcps_ecn_recv_ece
, &stat
.ecn_recv_ece
);
2518 tcp_cumulative_stat(tcpstat
.tcps_ecn_recv_ece
,
2519 &prev
.tcps_ecn_recv_ece
, &stat
.ecn_recv_ece
);
2520 tcp_cumulative_stat(tcpstat
.tcps_ecn_sent_ece
,
2521 &prev
.tcps_ecn_sent_ece
, &stat
.ecn_sent_ece
);
2522 tcp_cumulative_stat(tcpstat
.tcps_ecn_sent_ece
,
2523 &prev
.tcps_ecn_sent_ece
, &stat
.ecn_sent_ece
);
2524 tcp_cumulative_stat(tcpstat
.tcps_ecn_conn_recv_ce
,
2525 &prev
.tcps_ecn_conn_recv_ce
, &stat
.ecn_conn_recv_ce
);
2526 tcp_cumulative_stat(tcpstat
.tcps_ecn_conn_recv_ece
,
2527 &prev
.tcps_ecn_conn_recv_ece
, &stat
.ecn_conn_recv_ece
);
2528 tcp_cumulative_stat(tcpstat
.tcps_ecn_conn_plnoce
,
2529 &prev
.tcps_ecn_conn_plnoce
, &stat
.ecn_conn_plnoce
);
2530 tcp_cumulative_stat(tcpstat
.tcps_ecn_conn_pl_ce
,
2531 &prev
.tcps_ecn_conn_pl_ce
, &stat
.ecn_conn_pl_ce
);
2532 tcp_cumulative_stat(tcpstat
.tcps_ecn_conn_nopl_ce
,
2533 &prev
.tcps_ecn_conn_nopl_ce
, &stat
.ecn_conn_nopl_ce
);
2534 tcp_cumulative_stat(tcpstat
.tcps_ecn_fallback_synloss
,
2535 &prev
.tcps_ecn_fallback_synloss
, &stat
.ecn_fallback_synloss
);
2536 tcp_cumulative_stat(tcpstat
.tcps_ecn_fallback_reorder
,
2537 &prev
.tcps_ecn_fallback_reorder
, &stat
.ecn_fallback_reorder
);
2538 tcp_cumulative_stat(tcpstat
.tcps_ecn_fallback_ce
,
2539 &prev
.tcps_ecn_fallback_ce
, &stat
.ecn_fallback_ce
);
2540 tcp_cumulative_stat(tcpstat
.tcps_tfo_syn_data_rcv
,
2541 &prev
.tcps_tfo_syn_data_rcv
, &stat
.tfo_syn_data_rcv
);
2542 tcp_cumulative_stat(tcpstat
.tcps_tfo_cookie_req_rcv
,
2543 &prev
.tcps_tfo_cookie_req_rcv
, &stat
.tfo_cookie_req_rcv
);
2544 tcp_cumulative_stat(tcpstat
.tcps_tfo_cookie_sent
,
2545 &prev
.tcps_tfo_cookie_sent
, &stat
.tfo_cookie_sent
);
2546 tcp_cumulative_stat(tcpstat
.tcps_tfo_cookie_invalid
,
2547 &prev
.tcps_tfo_cookie_invalid
, &stat
.tfo_cookie_invalid
);
2548 tcp_cumulative_stat(tcpstat
.tcps_tfo_cookie_req
,
2549 &prev
.tcps_tfo_cookie_req
, &stat
.tfo_cookie_req
);
2550 tcp_cumulative_stat(tcpstat
.tcps_tfo_cookie_rcv
,
2551 &prev
.tcps_tfo_cookie_rcv
, &stat
.tfo_cookie_rcv
);
2552 tcp_cumulative_stat(tcpstat
.tcps_tfo_syn_data_sent
,
2553 &prev
.tcps_tfo_syn_data_sent
, &stat
.tfo_syn_data_sent
);
2554 tcp_cumulative_stat(tcpstat
.tcps_tfo_syn_data_acked
,
2555 &prev
.tcps_tfo_syn_data_acked
, &stat
.tfo_syn_data_acked
);
2556 tcp_cumulative_stat(tcpstat
.tcps_tfo_syn_loss
,
2557 &prev
.tcps_tfo_syn_loss
, &stat
.tfo_syn_loss
);
2558 tcp_cumulative_stat(tcpstat
.tcps_tfo_blackhole
,
2559 &prev
.tcps_tfo_blackhole
, &stat
.tfo_blackhole
);
2560 tcp_cumulative_stat(tcpstat
.tcps_tfo_cookie_wrong
,
2561 &prev
.tcps_tfo_cookie_wrong
, &stat
.tfo_cookie_wrong
);
2562 tcp_cumulative_stat(tcpstat
.tcps_tfo_no_cookie_rcv
,
2563 &prev
.tcps_tfo_no_cookie_rcv
, &stat
.tfo_no_cookie_rcv
);
2564 tcp_cumulative_stat(tcpstat
.tcps_tfo_heuristics_disable
,
2565 &prev
.tcps_tfo_heuristics_disable
, &stat
.tfo_heuristics_disable
);
2566 tcp_cumulative_stat(tcpstat
.tcps_tfo_sndblackhole
,
2567 &prev
.tcps_tfo_sndblackhole
, &stat
.tfo_sndblackhole
);
2570 tcp_cumulative_stat(tcpstat
.tcps_mptcp_handover_attempt
,
2571 &prev
.tcps_mptcp_handover_attempt
, &stat
.mptcp_handover_attempt
);
2572 tcp_cumulative_stat(tcpstat
.tcps_mptcp_interactive_attempt
,
2573 &prev
.tcps_mptcp_interactive_attempt
, &stat
.mptcp_interactive_attempt
);
2574 tcp_cumulative_stat(tcpstat
.tcps_mptcp_aggregate_attempt
,
2575 &prev
.tcps_mptcp_aggregate_attempt
, &stat
.mptcp_aggregate_attempt
);
2576 tcp_cumulative_stat(tcpstat
.tcps_mptcp_fp_handover_attempt
,
2577 &prev
.tcps_mptcp_fp_handover_attempt
, &stat
.mptcp_fp_handover_attempt
);
2578 tcp_cumulative_stat(tcpstat
.tcps_mptcp_fp_interactive_attempt
,
2579 &prev
.tcps_mptcp_fp_interactive_attempt
, &stat
.mptcp_fp_interactive_attempt
);
2580 tcp_cumulative_stat(tcpstat
.tcps_mptcp_fp_aggregate_attempt
,
2581 &prev
.tcps_mptcp_fp_aggregate_attempt
, &stat
.mptcp_fp_aggregate_attempt
);
2582 tcp_cumulative_stat(tcpstat
.tcps_mptcp_heuristic_fallback
,
2583 &prev
.tcps_mptcp_heuristic_fallback
, &stat
.mptcp_heuristic_fallback
);
2584 tcp_cumulative_stat(tcpstat
.tcps_mptcp_fp_heuristic_fallback
,
2585 &prev
.tcps_mptcp_fp_heuristic_fallback
, &stat
.mptcp_fp_heuristic_fallback
);
2586 tcp_cumulative_stat(tcpstat
.tcps_mptcp_handover_success_wifi
,
2587 &prev
.tcps_mptcp_handover_success_wifi
, &stat
.mptcp_handover_success_wifi
);
2588 tcp_cumulative_stat(tcpstat
.tcps_mptcp_handover_success_cell
,
2589 &prev
.tcps_mptcp_handover_success_cell
, &stat
.mptcp_handover_success_cell
);
2590 tcp_cumulative_stat(tcpstat
.tcps_mptcp_interactive_success
,
2591 &prev
.tcps_mptcp_interactive_success
, &stat
.mptcp_interactive_success
);
2592 tcp_cumulative_stat(tcpstat
.tcps_mptcp_aggregate_success
,
2593 &prev
.tcps_mptcp_aggregate_success
, &stat
.mptcp_aggregate_success
);
2594 tcp_cumulative_stat(tcpstat
.tcps_mptcp_fp_handover_success_wifi
,
2595 &prev
.tcps_mptcp_fp_handover_success_wifi
, &stat
.mptcp_fp_handover_success_wifi
);
2596 tcp_cumulative_stat(tcpstat
.tcps_mptcp_fp_handover_success_cell
,
2597 &prev
.tcps_mptcp_fp_handover_success_cell
, &stat
.mptcp_fp_handover_success_cell
);
2598 tcp_cumulative_stat(tcpstat
.tcps_mptcp_fp_interactive_success
,
2599 &prev
.tcps_mptcp_fp_interactive_success
, &stat
.mptcp_fp_interactive_success
);
2600 tcp_cumulative_stat(tcpstat
.tcps_mptcp_fp_aggregate_success
,
2601 &prev
.tcps_mptcp_fp_aggregate_success
, &stat
.mptcp_fp_aggregate_success
);
2602 tcp_cumulative_stat(tcpstat
.tcps_mptcp_handover_cell_from_wifi
,
2603 &prev
.tcps_mptcp_handover_cell_from_wifi
, &stat
.mptcp_handover_cell_from_wifi
);
2604 tcp_cumulative_stat(tcpstat
.tcps_mptcp_handover_wifi_from_cell
,
2605 &prev
.tcps_mptcp_handover_wifi_from_cell
, &stat
.mptcp_handover_wifi_from_cell
);
2606 tcp_cumulative_stat(tcpstat
.tcps_mptcp_interactive_cell_from_wifi
,
2607 &prev
.tcps_mptcp_interactive_cell_from_wifi
, &stat
.mptcp_interactive_cell_from_wifi
);
2608 tcp_cumulative_stat64(tcpstat
.tcps_mptcp_handover_cell_bytes
,
2609 &prev
.tcps_mptcp_handover_cell_bytes
, &stat
.mptcp_handover_cell_bytes
);
2610 tcp_cumulative_stat64(tcpstat
.tcps_mptcp_interactive_cell_bytes
,
2611 &prev
.tcps_mptcp_interactive_cell_bytes
, &stat
.mptcp_interactive_cell_bytes
);
2612 tcp_cumulative_stat64(tcpstat
.tcps_mptcp_aggregate_cell_bytes
,
2613 &prev
.tcps_mptcp_aggregate_cell_bytes
, &stat
.mptcp_aggregate_cell_bytes
);
2614 tcp_cumulative_stat64(tcpstat
.tcps_mptcp_handover_all_bytes
,
2615 &prev
.tcps_mptcp_handover_all_bytes
, &stat
.mptcp_handover_all_bytes
);
2616 tcp_cumulative_stat64(tcpstat
.tcps_mptcp_interactive_all_bytes
,
2617 &prev
.tcps_mptcp_interactive_all_bytes
, &stat
.mptcp_interactive_all_bytes
);
2618 tcp_cumulative_stat64(tcpstat
.tcps_mptcp_aggregate_all_bytes
,
2619 &prev
.tcps_mptcp_aggregate_all_bytes
, &stat
.mptcp_aggregate_all_bytes
);
2620 tcp_cumulative_stat(tcpstat
.tcps_mptcp_back_to_wifi
,
2621 &prev
.tcps_mptcp_back_to_wifi
, &stat
.mptcp_back_to_wifi
);
2622 tcp_cumulative_stat(tcpstat
.tcps_mptcp_wifi_proxy
,
2623 &prev
.tcps_mptcp_wifi_proxy
, &stat
.mptcp_wifi_proxy
);
2624 tcp_cumulative_stat(tcpstat
.tcps_mptcp_cell_proxy
,
2625 &prev
.tcps_mptcp_cell_proxy
, &stat
.mptcp_cell_proxy
);
2626 tcp_cumulative_stat(tcpstat
.tcps_mptcp_triggered_cell
,
2627 &prev
.tcps_mptcp_triggered_cell
, &stat
.mptcp_triggered_cell
);
2629 nstat_sysinfo_send_data(&data
);
2635 tcp_interface_send_probe(u_int16_t probe_if_index
)
2638 struct tcptimerlist
*listp
= &tcp_timer_list
;
2640 /* Make sure TCP clock is up to date */
2641 calculate_tcp_clock();
2643 lck_mtx_lock(listp
->mtx
);
2644 if (listp
->probe_if_index
> 0 && listp
->probe_if_index
!= probe_if_index
) {
2645 tcpstat
.tcps_probe_if_conflict
++;
2646 os_log(OS_LOG_DEFAULT
,
2647 "%s: probe_if_index %u conflicts with %u, tcps_probe_if_conflict %u\n",
2648 __func__
, probe_if_index
, listp
->probe_if_index
,
2649 tcpstat
.tcps_probe_if_conflict
);
2653 listp
->probe_if_index
= probe_if_index
;
2654 if (listp
->running
) {
2655 os_log(OS_LOG_DEFAULT
, "%s: timer list already running for if_index %u\n",
2656 __func__
, probe_if_index
);
2661 * Reschedule the timerlist to run within the next 10ms, which is
2662 * the fastest that we can do.
2664 offset
= TCP_TIMER_10MS_QUANTUM
;
2665 if (listp
->scheduled
) {
2667 diff
= timer_diff(listp
->runtime
, 0, tcp_now
, offset
);
2669 /* The timer will fire sooner than what's needed */
2670 os_log(OS_LOG_DEFAULT
,
2671 "%s: timer will fire sooner than needed for if_index %u\n",
2672 __func__
, probe_if_index
);
2676 listp
->mode
= TCP_TIMERLIST_10MS_MODE
;
2677 listp
->idleruns
= 0;
2679 tcp_sched_timerlist(offset
);
2682 lck_mtx_unlock(listp
->mtx
);
2687 * Enable read probes on this connection, if:
2688 * - it is in established state
2689 * - doesn't have any data outstanding
2690 * - the outgoing ifp matches
2691 * - we have not already sent any read probes
2694 tcp_enable_read_probe(struct tcpcb
*tp
, struct ifnet
*ifp
)
2696 if (tp
->t_state
== TCPS_ESTABLISHED
&&
2697 tp
->snd_max
== tp
->snd_una
&&
2698 tp
->t_inpcb
->inp_last_outifp
== ifp
&&
2699 !(tp
->t_flagsext
& TF_DETECT_READSTALL
) &&
2700 tp
->t_rtimo_probes
== 0) {
2701 tp
->t_flagsext
|= TF_DETECT_READSTALL
;
2702 tp
->t_rtimo_probes
= 0;
2703 tp
->t_timer
[TCPT_KEEP
] = OFFSET_FROM_START(tp
,
2704 TCP_TIMER_10MS_QUANTUM
);
2705 if (tp
->tentry
.index
== TCPT_NONE
) {
2706 tp
->tentry
.index
= TCPT_KEEP
;
2707 tp
->tentry
.runtime
= tcp_now
+
2708 TCP_TIMER_10MS_QUANTUM
;
2712 /* Reset runtime to be in next 10ms */
2713 diff
= timer_diff(tp
->tentry
.runtime
, 0,
2714 tcp_now
, TCP_TIMER_10MS_QUANTUM
);
2716 tp
->tentry
.index
= TCPT_KEEP
;
2717 tp
->tentry
.runtime
= tcp_now
+
2718 TCP_TIMER_10MS_QUANTUM
;
2719 if (tp
->tentry
.runtime
== 0) {
2720 tp
->tentry
.runtime
++;
2728 * Disable read probe and reset the keep alive timer
2731 tcp_disable_read_probe(struct tcpcb
*tp
)
2733 if (tp
->t_adaptive_rtimo
== 0 &&
2734 ((tp
->t_flagsext
& TF_DETECT_READSTALL
) ||
2735 tp
->t_rtimo_probes
> 0)) {
2736 tcp_keepalive_reset(tp
);
2739 mptcp_reset_keepalive(tp
);
2745 * Reschedule the tcp timerlist in the next 10ms to re-enable read/write
2746 * probes on connections going over a particular interface.
2749 tcp_probe_connectivity(struct ifnet
*ifp
, u_int32_t enable
)
2752 struct tcptimerlist
*listp
= &tcp_timer_list
;
2753 struct inpcbinfo
*pcbinfo
= &tcbinfo
;
2754 struct inpcb
*inp
, *nxt
;
2761 calculate_tcp_clock();
2764 * Enable keep alive timer on all connections that are
2765 * active/established on this interface.
2767 lck_rw_lock_shared(pcbinfo
->ipi_lock
);
2769 LIST_FOREACH_SAFE(inp
, pcbinfo
->ipi_listhead
, inp_list
, nxt
) {
2770 struct tcpcb
*tp
= NULL
;
2771 if (in_pcb_checkstate(inp
, WNT_ACQUIRE
, 0) ==
2776 /* Acquire lock to look at the state of the connection */
2777 socket_lock(inp
->inp_socket
, 1);
2779 /* Release the want count */
2780 if (inp
->inp_ppcb
== NULL
||
2781 (in_pcb_checkstate(inp
, WNT_RELEASE
, 1) == WNT_STOPUSING
)) {
2782 socket_unlock(inp
->inp_socket
, 1);
2785 tp
= intotcpcb(inp
);
2787 tcp_enable_read_probe(tp
, ifp
);
2789 tcp_disable_read_probe(tp
);
2792 socket_unlock(inp
->inp_socket
, 1);
2794 lck_rw_done(pcbinfo
->ipi_lock
);
2796 lck_mtx_lock(listp
->mtx
);
2797 if (listp
->running
) {
2798 listp
->pref_mode
|= TCP_TIMERLIST_10MS_MODE
;
2802 /* Reschedule within the next 10ms */
2803 offset
= TCP_TIMER_10MS_QUANTUM
;
2804 if (listp
->scheduled
) {
2806 diff
= timer_diff(listp
->runtime
, 0, tcp_now
, offset
);
2808 /* The timer will fire sooner than what's needed */
2812 listp
->mode
= TCP_TIMERLIST_10MS_MODE
;
2813 listp
->idleruns
= 0;
2815 tcp_sched_timerlist(offset
);
2817 lck_mtx_unlock(listp
->mtx
);
2822 tcp_update_mss_core(struct tcpcb
*tp
, struct ifnet
*ifp
)
2824 struct if_cellular_status_v1
*ifsr
;
2826 ifsr
= &ifp
->if_link_status
->ifsr_u
.ifsr_cell
.if_cell_u
.if_status_v1
;
2827 if (ifsr
->valid_bitmask
& IF_CELL_UL_MSS_RECOMMENDED_VALID
) {
2828 optlen
= tp
->t_maxopd
- tp
->t_maxseg
;
2830 if (ifsr
->mss_recommended
==
2831 IF_CELL_UL_MSS_RECOMMENDED_NONE
&&
2832 tp
->t_cached_maxopd
> 0 &&
2833 tp
->t_maxopd
< tp
->t_cached_maxopd
) {
2834 tp
->t_maxopd
= tp
->t_cached_maxopd
;
2835 tcpstat
.tcps_mss_to_default
++;
2836 } else if (ifsr
->mss_recommended
==
2837 IF_CELL_UL_MSS_RECOMMENDED_MEDIUM
&&
2838 tp
->t_maxopd
> tcp_mss_rec_medium
) {
2839 tp
->t_cached_maxopd
= tp
->t_maxopd
;
2840 tp
->t_maxopd
= tcp_mss_rec_medium
;
2841 tcpstat
.tcps_mss_to_medium
++;
2842 } else if (ifsr
->mss_recommended
==
2843 IF_CELL_UL_MSS_RECOMMENDED_LOW
&&
2844 tp
->t_maxopd
> tcp_mss_rec_low
) {
2845 tp
->t_cached_maxopd
= tp
->t_maxopd
;
2846 tp
->t_maxopd
= tcp_mss_rec_low
;
2847 tcpstat
.tcps_mss_to_low
++;
2849 tp
->t_maxseg
= tp
->t_maxopd
- optlen
;
2852 * clear the cached value if it is same as the current
2854 if (tp
->t_maxopd
== tp
->t_cached_maxopd
) {
2855 tp
->t_cached_maxopd
= 0;
2861 tcp_update_mss_locked(struct socket
*so
, struct ifnet
*ifp
)
2863 struct inpcb
*inp
= sotoinpcb(so
);
2864 struct tcpcb
*tp
= intotcpcb(inp
);
2866 if (ifp
== NULL
&& (ifp
= inp
->inp_last_outifp
) == NULL
) {
2870 if (!IFNET_IS_CELLULAR(ifp
)) {
2872 * This optimization is implemented for cellular
2877 if (tp
->t_state
<= TCPS_CLOSE_WAIT
) {
2879 * If the connection is currently doing or has done PMTU
2880 * blackhole detection, do not change the MSS
2882 if (tp
->t_flags
& TF_BLACKHOLE
) {
2885 if (ifp
->if_link_status
== NULL
) {
2888 tcp_update_mss_core(tp
, ifp
);
2893 tcp_itimer(struct inpcbinfo
*ipi
)
2895 struct inpcb
*inp
, *nxt
;
2897 if (lck_rw_try_lock_exclusive(ipi
->ipi_lock
) == FALSE
) {
2898 if (tcp_itimer_done
== TRUE
) {
2899 tcp_itimer_done
= FALSE
;
2900 atomic_add_32(&ipi
->ipi_timer_req
.intimer_fast
, 1);
2903 /* Upgrade failed, lost lock now take it again exclusive */
2904 lck_rw_lock_exclusive(ipi
->ipi_lock
);
2906 tcp_itimer_done
= TRUE
;
2908 LIST_FOREACH_SAFE(inp
, &tcb
, inp_list
, nxt
) {
2912 if (inp
->inp_ppcb
== NULL
||
2913 in_pcb_checkstate(inp
, WNT_ACQUIRE
, 0) == WNT_STOPUSING
) {
2916 so
= inp
->inp_socket
;
2917 ifp
= inp
->inp_last_outifp
;
2919 if (in_pcb_checkstate(inp
, WNT_RELEASE
, 1) == WNT_STOPUSING
) {
2920 socket_unlock(so
, 1);
2923 so_check_extended_bk_idle_time(so
);
2924 if (ipi
->ipi_flags
& INPCBINFO_UPDATE_MSS
) {
2925 tcp_update_mss_locked(so
, NULL
);
2927 socket_unlock(so
, 1);
2930 * Defunct all system-initiated background sockets if the
2931 * socket is using the cellular interface and the interface
2932 * has its LQM set to abort.
2934 if ((ipi
->ipi_flags
& INPCBINFO_HANDLE_LQM_ABORT
) &&
2935 IS_SO_TC_BACKGROUNDSYSTEM(so
->so_traffic_class
) &&
2936 ifp
!= NULL
&& IFNET_IS_CELLULAR(ifp
) &&
2937 (ifp
->if_interface_state
.valid_bitmask
&
2938 IF_INTERFACE_STATE_LQM_STATE_VALID
) &&
2939 ifp
->if_interface_state
.lqm_state
==
2940 IFNET_LQM_THRESH_ABORT
) {
2941 socket_defunct(current_proc(), so
,
2942 SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL
);
2946 ipi
->ipi_flags
&= ~(INPCBINFO_UPDATE_MSS
| INPCBINFO_HANDLE_LQM_ABORT
);
2947 lck_rw_done(ipi
->ipi_lock
);