+
+#define TIMEVAL_TO_TCPHZ(_tv_) ((_tv_).tv_sec * TCP_RETRANSHZ + (_tv_).tv_usec / TCP_RETRANSHZ_TO_USEC)
+
+/* Function to calculate the tcp clock. The tcp clock will get updated
+ * at the boundaries of the tcp layer. This is done at 3 places:
+ * 1. Right before processing an input tcp packet
+ * 2. Whenever a connection wants to access the network using tcp_usrreqs
+ * 3. When a tcp timer fires or before tcp slow timeout
+ *
+ */
+
+void
+calculate_tcp_clock()
+{
+ struct timeval tv = tcp_uptime;
+ struct timeval interval = {0, TCP_RETRANSHZ_TO_USEC};
+ struct timeval now, hold_now;
+ uint32_t incr = 0;
+
+ microuptime(&now);
+
+ /*
+ * Update coarse-grained networking timestamp (in sec.); the idea
+ * is to update the counter returnable via net_uptime() when
+ * we read time.
+ */
+ net_update_uptime_secs(now.tv_sec);
+
+ timevaladd(&tv, &interval);
+ if (timevalcmp(&now, &tv, >)) {
+ /* time to update the clock */
+ lck_spin_lock(tcp_uptime_lock);
+ if (timevalcmp(&tcp_uptime, &now, >=)) {
+ /* clock got updated while we were waiting for the lock */
+ lck_spin_unlock(tcp_uptime_lock);
+ return;
+ }
+
+ microuptime(&now);
+ hold_now = now;
+ tv = tcp_uptime;
+ timevalsub(&now, &tv);
+
+ incr = TIMEVAL_TO_TCPHZ(now);
+ if (incr > 0) {
+ tcp_uptime = hold_now;
+ tcp_now += incr;
+ }
+
+ lck_spin_unlock(tcp_uptime_lock);
+ }
+ return;
+}
+
+/* Compute receive window scaling that we are going to request
+ * for this connection based on sb_hiwat. Try to leave some
+ * room to potentially increase the window size upto a maximum
+ * defined by the constant tcp_autorcvbuf_max.
+ */
+void
+tcp_set_max_rwinscale(struct tcpcb *tp, struct socket *so) {
+ u_int32_t maxsockbufsize;
+
+ tp->request_r_scale = max(tcp_win_scale, tp->request_r_scale);
+ maxsockbufsize = ((so->so_rcv.sb_flags & SB_USRSIZE) != 0) ?
+ so->so_rcv.sb_hiwat : tcp_autorcvbuf_max;
+
+ while (tp->request_r_scale < TCP_MAX_WINSHIFT &&
+ (TCP_MAXWIN << tp->request_r_scale) < maxsockbufsize)
+ tp->request_r_scale++;
+ tp->request_r_scale = min(tp->request_r_scale, TCP_MAX_WINSHIFT);
+
+}
+
+int
+tcp_notsent_lowat_check(struct socket *so) {
+ struct inpcb *inp = sotoinpcb(so);
+ struct tcpcb *tp = NULL;
+ int notsent = 0;
+ if (inp != NULL) {
+ tp = intotcpcb(inp);
+ }
+
+ notsent = so->so_snd.sb_cc -
+ (tp->snd_nxt - tp->snd_una);
+
+ /* When we send a FIN or SYN, not_sent can be negative.
+ * In that case also we need to send a write event to the
+ * process if it is waiting. In the FIN case, it will
+ * get an error from send because cantsendmore will be set.
+ */
+ if (notsent <= tp->t_notsent_lowat) {
+ return(1);
+ }
+
+ /* When Nagle's algorithm is not disabled, it is better
+ * to wakeup the client until there is atleast one
+ * maxseg of data to write.
+ */
+ if ((tp->t_flags & TF_NODELAY) == 0 &&
+ notsent > 0 && notsent < tp->t_maxseg) {
+ return(1);
+ }
+ return(0);
+}
+
+