+int path_mtu_discovery = 1;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, path_mtu_discovery, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &path_mtu_discovery, 1, "Enable Path MTU Discovery");
+
+int ss_fltsz = 1;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, slowstart_flightsize, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &ss_fltsz, 1, "Slow start flight size");
+
+int ss_fltsz_local = 8; /* starts with eight segments max */
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, local_slowstart_flightsize, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &ss_fltsz_local, 1, "Slow start flight size for local networks");
+
+int tcp_do_tso = 1;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, tso, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcp_do_tso, 0, "Enable TCP Segmentation Offload");
+
+
+int tcp_ecn_outbound = 0;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, ecn_initiate_out, CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_ecn_outbound,
+ 0, "Initiate ECN for outbound connections");
+
+int tcp_ecn_inbound = 0;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, ecn_negotiate_in, CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_ecn_inbound,
+ 0, "Allow ECN negotiation for inbound connections");
+
+int tcp_packet_chaining = 50;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, packetchain, CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_packet_chaining,
+ 0, "Enable TCP output packet chaining");
+
+int tcp_output_unlocked = 1;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, socket_unlocked_on_output, CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_output_unlocked,
+ 0, "Unlock TCP when sending packets down to IP");
+
+int tcp_do_rfc3390 = 1;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcp_do_rfc3390, 1, "Calculate intial slowstart cwnd depending on MSS");
+
+int tcp_min_iaj_win = MIN_IAJ_WIN;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, min_iaj_win, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcp_min_iaj_win, 1, "Minimum recv win based on inter-packet arrival jitter");
+
+int tcp_acc_iaj_react_limit = ACC_IAJ_REACT_LIMIT;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, acc_iaj_react_limit, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcp_acc_iaj_react_limit, 1, "Accumulated IAJ when receiver starts to react");
+
+uint32_t tcp_do_autosendbuf = 1;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, doautosndbuf, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcp_do_autosendbuf, 1, "Enable send socket buffer auto-tuning");
+
+uint32_t tcp_autosndbuf_inc = 8 * 1024;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, autosndbufinc, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcp_autosndbuf_inc, 1, "Increment in send socket bufffer size");
+
+uint32_t tcp_autosndbuf_max = 512 * 1024;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, autosndbufmax, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcp_autosndbuf_max, 1, "Maximum send socket buffer size");
+
+uint32_t tcp_prioritize_acks = 1;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, ack_prioritize, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &tcp_prioritize_acks, 1, "Prioritize pure acks");
+
+static int32_t packchain_newlist = 0;
+static int32_t packchain_looped = 0;
+static int32_t packchain_sent = 0;
+
+/* temporary: for testing */
+#if IPSEC
+extern int ipsec_bypass;
+#endif
+
+extern int slowlink_wsize; /* window correction for slow links */
+#if IPFIREWALL
+extern int fw_enable; /* firewall check for packet chaining */
+extern int fw_bypass; /* firewall check: disable packet chaining if there is rules */
+#endif /* IPFIREWALL */
+
+extern vm_size_t so_cache_zone_element_size;
+#if RANDOM_IP_ID
+extern int ip_use_randomid;
+#endif /* RANDOM_IP_ID */
+extern u_int32_t dlil_filter_count;
+extern u_int32_t kipf_count;
+extern int tcp_recv_bg;
+extern int maxseg_unacked;
+
+static int tcp_ip_output(struct socket *, struct tcpcb *, struct mbuf *, int,
+ struct mbuf *, int, int, int32_t, boolean_t);
+
+extern uint32_t get_base_rtt(struct tcpcb *tp);
+static struct mbuf* tcp_send_lroacks(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th);
+
+static __inline__ u_int16_t
+get_socket_id(struct socket * s)
+{
+ u_int16_t val;
+
+ if (so_cache_zone_element_size == 0) {
+ return (0);
+ }
+ val = (u_int16_t)(((uintptr_t)s) / so_cache_zone_element_size);
+ if (val == 0) {
+ val = 0xffff;
+ }
+ return (val);
+}