+
+/* Remove a timer entry from timer list */
+void
+tcp_remove_timer(struct tcpcb *tp)
+{
+ struct tcptimerlist *listp = &tcp_timer_list;
+
+ lck_mtx_assert(&tp->t_inpcb->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
+ if (!(TIMER_IS_ON_LIST(tp))) {
+ return;
+ }
+ lck_mtx_lock(listp->mtx);
+
+ /* Check if pcb is on timer list again after acquiring the lock */
+ if (!(TIMER_IS_ON_LIST(tp))) {
+ lck_mtx_unlock(listp->mtx);
+ return;
+ }
+
+ if (listp->next_te != NULL && listp->next_te == &tp->tentry)
+ listp->next_te = LIST_NEXT(&tp->tentry, le);
+
+ LIST_REMOVE(&tp->tentry, le);
+ tp->t_flags &= ~(TF_TIMER_ONLIST);
+
+ listp->entries--;
+ lck_mtx_unlock(listp->mtx);
+
+ tp->tentry.le.le_next = NULL;
+ tp->tentry.le.le_prev = NULL;
+}
+
+/* Function to check if the timerlist needs to be rescheduled to run
+ * the timer entry correctly. Basically, this is to check if we can avoid
+ * taking the list lock.
+ */
+
+static boolean_t
+need_to_resched_timerlist(uint32_t runtime, uint16_t index) {
+ struct tcptimerlist *listp = &tcp_timer_list;
+ int32_t diff;
+ boolean_t is_fast;
+
+ if (runtime == 0 || index == TCPT_NONE)
+ return FALSE;
+ is_fast = !(IS_TIMER_SLOW(index));
+
+ /* If the list is being processed then the state of the list is in flux.
+ * In this case always acquire the lock and set the state correctly.
+ */
+ if (listp->running) {
+ return TRUE;
+ }
+
+ diff = timer_diff(listp->runtime, 0, runtime, 0);
+ if (diff <= 0) {
+ /* The list is going to run before this timer */
+ return FALSE;
+ } else {
+ if (is_fast) {
+ if (diff <= listp->fast_quantum)
+ return FALSE;
+ } else {
+ if (diff <= listp->slow_quantum)
+ return FALSE;
+ }
+ }
+ return TRUE;
+}
+
+void
+tcp_sched_timerlist(uint32_t offset)
+{
+
+ uint64_t deadline = 0;
+ struct tcptimerlist *listp = &tcp_timer_list;
+
+ lck_mtx_assert(listp->mtx, LCK_MTX_ASSERT_OWNED);
+
+ listp->runtime = tcp_now + offset;
+
+ clock_interval_to_deadline(offset, NSEC_PER_SEC / TCP_RETRANSHZ,
+ &deadline);
+
+ thread_call_enter_delayed(listp->call, deadline);
+}
+
+/* Function to run the timers for a connection.
+ *
+ * Returns the offset of next timer to be run for this connection which
+ * can be used to reschedule the timerlist.
+ */
+uint32_t
+tcp_run_conn_timer(struct tcpcb *tp, uint16_t *next_index) {
+
+ struct socket *so;
+ uint16_t i = 0, index = TCPT_NONE, lo_index = TCPT_NONE;
+ uint32_t timer_val, offset = 0, lo_timer = 0;
+ int32_t diff;
+ boolean_t needtorun[TCPT_NTIMERS];
+ int count = 0;
+
+ VERIFY(tp != NULL);
+ bzero(needtorun, sizeof(needtorun));
+
+ tcp_lock(tp->t_inpcb->inp_socket, 1, 0);
+
+ so = tp->t_inpcb->inp_socket;
+ /* Release the want count on inp */
+ if (in_pcb_checkstate(tp->t_inpcb, WNT_RELEASE, 1) == WNT_STOPUSING) {
+ if (TIMER_IS_ON_LIST(tp)) {
+ tcp_remove_timer(tp);
+ }
+
+ /* Looks like the TCP connection got closed while we
+ * were waiting for the lock.. Done
+ */
+ goto done;
+ }
+
+ /* Since the timer thread needs to wait for tcp lock, it may race
+ * with another thread that can cancel or reschedule the timer that is
+ * about to run. Check if we need to run anything.
+ */
+ index = tp->tentry.index;
+ timer_val = tp->t_timer[index];
+
+ if (index == TCPT_NONE || tp->tentry.runtime == 0)
+ goto done;
+
+ diff = timer_diff(tp->tentry.runtime, 0, tcp_now, 0);
+ if (diff > 0) {
+ if (tp->tentry.index != TCPT_NONE) {
+ offset = diff;
+ *(next_index) = tp->tentry.index;
+ }
+ goto done;
+ }
+
+ tp->t_timer[index] = 0;
+ if (timer_val > 0) {
+ tp = tcp_timers(tp, index);
+ if (tp == NULL)
+ goto done;
+ }
+
+ /* Check if there are any other timers that need to be run. While doing it,
+ * adjust the timer values wrt tcp_now.
+ */
+ for (i = 0; i < TCPT_NTIMERS; ++i) {
+ if (tp->t_timer[i] != 0) {
+ diff = timer_diff(tp->tentry.timer_start, tp->t_timer[i], tcp_now, 0);
+ if (diff <= 0) {
+ tp->t_timer[i] = 0;
+ needtorun[i] = TRUE;
+ count++;
+ } else {
+ tp->t_timer[i] = diff;
+ needtorun[i] = FALSE;
+ if (lo_timer == 0 || diff < lo_timer) {
+ lo_timer = diff;
+ lo_index = i;
+ }
+ }
+ }
+ }
+
+ tp->tentry.timer_start = tcp_now;
+ tp->tentry.index = lo_index;
+ if (lo_index != TCPT_NONE) {
+ tp->tentry.runtime = tp->tentry.timer_start + tp->t_timer[lo_index];
+ } else {
+ tp->tentry.runtime = 0;
+ }
+
+ if (count > 0) {
+ /* run any other timers that are also outstanding at this time. */
+ for (i = 0; i < TCPT_NTIMERS; ++i) {
+ if (needtorun[i]) {
+ tp->t_timer[i] = 0;
+ tp = tcp_timers(tp, i);
+ if (tp == NULL)
+ goto done;
+ }
+ }
+ tcp_set_lotimer_index(tp);
+ }
+
+ if (tp->tentry.index < TCPT_NONE) {
+ offset = tp->t_timer[tp->tentry.index];
+ *(next_index) = tp->tentry.index;
+ }
+
+done:
+ if (tp != NULL && tp->tentry.index == TCPT_NONE) {
+ tcp_remove_timer(tp);
+ }
+ tcp_unlock(so, 1, 0);
+ return offset;
+}
+
+void
+tcp_run_timerlist(void * arg1, void * arg2) {
+
+#pragma unused(arg1, arg2)
+
+ struct tcptimerentry *te, *next_te;
+ struct tcptimerlist *listp = &tcp_timer_list;
+ struct tcpcb *tp;
+ uint32_t next_timer = 0;
+ uint16_t index = TCPT_NONE;
+ boolean_t need_fast = FALSE;
+ uint32_t active_count = 0;
+ uint32_t mode = TCP_TIMERLIST_FASTMODE;
+
+ calculate_tcp_clock();
+
+ lck_mtx_lock(listp->mtx);
+
+ listp->running = TRUE;
+
+ LIST_FOREACH_SAFE(te, &listp->lhead, le, next_te) {
+ uint32_t offset = 0;
+ uint32_t runtime = te->runtime;
+ if (TSTMP_GT(runtime, tcp_now)) {
+ offset = timer_diff(runtime, 0, tcp_now, 0);
+ if (next_timer == 0 || offset < next_timer) {
+ next_timer = offset;
+ }
+ continue;
+ }
+ active_count++;
+
+ tp = TIMERENTRY_TO_TP(te);
+
+ /* Acquire an inp wantcnt on the inpcb so that the socket won't get
+ * detached even if tcp_close is called
+ */
+ if (in_pcb_checkstate(tp->t_inpcb, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
+ /* Some how this pcb went into dead state while on the timer list,
+ * just take it off the list. Since the timer list entry pointers
+ * are protected by the timer list lock, we can do it here
+ */
+ if (TIMER_IS_ON_LIST(tp)) {
+ tp->t_flags &= ~(TF_TIMER_ONLIST);
+ LIST_REMOVE(&tp->tentry, le);
+ listp->entries--;
+
+ tp->tentry.le.le_next = NULL;
+ tp->tentry.le.le_prev = NULL;
+ }
+ continue;
+ }
+
+ /* Store the next timerentry pointer before releasing the list lock.
+ * If that entry has to be removed when we release the lock, this
+ * pointer will be updated to the element after that.
+ */
+ listp->next_te = next_te;
+
+ VERIFY_NEXT_LINK(&tp->tentry, le);
+ VERIFY_PREV_LINK(&tp->tentry, le);
+
+ lck_mtx_unlock(listp->mtx);
+
+ index = TCPT_NONE;
+ offset = tcp_run_conn_timer(tp, &index);
+
+ lck_mtx_lock(listp->mtx);
+
+ next_te = listp->next_te;
+ listp->next_te = NULL;
+
+ if (offset > 0) {
+ if (index < TCPT_NONE) {
+ /* Check if this is a fast_timer. */
+ if (!need_fast && !(IS_TIMER_SLOW(index))) {
+ need_fast = TRUE;
+ }
+
+ if (next_timer == 0 || offset < next_timer) {
+ next_timer = offset;
+ }
+ }
+ }
+ }
+
+ if (!LIST_EMPTY(&listp->lhead)) {
+ if (listp->mode == TCP_TIMERLIST_FASTMODE) {
+ if (need_fast || active_count > 0 ||
+ listp->pref_mode == TCP_TIMERLIST_FASTMODE) {
+ listp->idlegen = 0;
+ } else {
+ listp->idlegen++;
+ if (listp->idlegen > timer_fastmode_idlemax) {
+ mode = TCP_TIMERLIST_SLOWMODE;
+ listp->idlegen = 0;
+ }
+ }
+ } else {
+ if (!need_fast) {
+ mode = TCP_TIMERLIST_SLOWMODE;
+ }
+ }
+
+ if (mode == TCP_TIMERLIST_FASTMODE ||
+ listp->pref_mode == TCP_TIMERLIST_FASTMODE) {
+ next_timer = listp->fast_quantum;
+ } else {
+ if (listp->pref_offset != 0 &&
+ listp->pref_offset < next_timer)
+ next_timer = listp->pref_offset;
+ if (next_timer < listp->slow_quantum)
+ next_timer = listp->slow_quantum;
+ }
+
+ listp->mode = mode;
+
+ tcp_sched_timerlist(next_timer);
+ } else {
+ /* No need to reschedule this timer */
+ listp->runtime = 0;
+ }
+
+ listp->running = FALSE;
+ listp->pref_mode = 0;
+ listp->pref_offset = 0;
+
+ lck_mtx_unlock(listp->mtx);
+}
+
+/* Function to verify if a change in timer state is required for a connection */
+void
+tcp_sched_timers(struct tcpcb *tp)
+{
+ struct tcptimerentry *te = &tp->tentry;
+ uint16_t index = te->index;
+ struct tcptimerlist *listp = &tcp_timer_list;
+ uint32_t offset = 0;
+ boolean_t is_fast;
+ int list_locked = 0;
+
+ if (tp->t_inpcb->inp_state == INPCB_STATE_DEAD) {
+ /* Just return without adding the dead pcb to the list */
+ if (TIMER_IS_ON_LIST(tp)) {
+ tcp_remove_timer(tp);
+ }
+ return;
+ }
+
+ if (index == TCPT_NONE) {
+ tcp_remove_timer(tp);
+ return;
+ }
+
+ is_fast = !(IS_TIMER_SLOW(index));
+ offset = te->runtime - tcp_now;
+ if (offset == 0) {
+ offset = 1;
+ tcp_timer_advanced++;
+ }
+ if (is_fast)
+ offset = listp->fast_quantum;
+
+ if (!TIMER_IS_ON_LIST(tp)) {
+ if (!list_locked) {
+ lck_mtx_lock(listp->mtx);
+ list_locked = 1;
+ }
+
+ LIST_INSERT_HEAD(&listp->lhead, te, le);
+ tp->t_flags |= TF_TIMER_ONLIST;
+
+ listp->entries++;
+ if (listp->entries > listp->maxentries)
+ listp->maxentries = listp->entries;
+
+ /* if the list is not scheduled, just schedule it */
+ if (listp->runtime == 0)
+ goto schedule;
+
+ }
+
+
+ /* timer entry is currently on the list */
+ if (need_to_resched_timerlist(te->runtime, index)) {
+ tcp_resched_timerlist++;
+
+ if (!list_locked) {
+ lck_mtx_lock(listp->mtx);
+ list_locked = 1;
+ }
+
+ VERIFY_NEXT_LINK(te, le);
+ VERIFY_PREV_LINK(te, le);
+
+ if (listp->running) {
+ if (is_fast) {
+ listp->pref_mode = TCP_TIMERLIST_FASTMODE;
+ } else if (listp->pref_offset == 0 ||
+ ((int)offset) < listp->pref_offset) {
+ listp->pref_offset = offset;
+ }
+ } else {
+ int32_t diff;
+ diff = timer_diff(listp->runtime, 0, tcp_now, offset);
+ if (diff <= 0) {
+ /* The list is going to run before this timer */
+ goto done;
+ } else {
+ goto schedule;
+ }
+ }
+ }
+ goto done;
+
+schedule:
+ if (is_fast) {
+ listp->mode = TCP_TIMERLIST_FASTMODE;
+ listp->idlegen = 0;
+ }
+ tcp_sched_timerlist(offset);
+
+done:
+ if (list_locked)
+ lck_mtx_unlock(listp->mtx);
+
+ return;
+}
+
+void
+tcp_set_lotimer_index(struct tcpcb *tp) {
+ uint16_t i, lo_index = TCPT_NONE;
+ uint32_t lo_timer = 0;
+ for (i = 0; i < TCPT_NTIMERS; ++i) {
+ if (tp->t_timer[i] != 0 &&
+ (lo_timer == 0 || tp->t_timer[i] < lo_timer)) {
+ lo_timer = tp->t_timer[i];
+ lo_index = i;
+ }
+ }
+ tp->tentry.index = lo_index;
+ if (lo_index != TCPT_NONE) {
+ tp->tentry.runtime = tp->tentry.timer_start + tp->t_timer[lo_index];
+ } else {
+ tp->tentry.runtime = 0;
+ }
+}
+
+void
+tcp_check_timer_state(struct tcpcb *tp) {
+
+ lck_mtx_assert(&tp->t_inpcb->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
+
+ tcp_set_lotimer_index(tp);
+
+ tcp_sched_timers(tp);
+ return;
+}