/*
- * Copyright (c) 2011-2012 Apple Inc. All rights reserved.
+ * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/cdefs.h>
#include <sys/param.h>
-#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <net/if_var.h>
#include <net/if_types.h>
#include <net/dlil.h>
+#include <net/flowadv.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <net/classq/classq_sfb.h>
#include <net/flowhash.h>
#include <net/net_osdep.h>
+#include <dev/random/randomdev.h>
/*
* Stochastic Fair Blue
#define PBOXTIME_MIN (30ULL * 1000 * 1000) /* 30ms */
#define PBOXTIME_MAX (300ULL * 1000 * 1000) /* 300ms */
+/*
+ * Target queueing delay is the amount of extra delay that can be added
+ * to accommodate variations in the link bandwidth. The queue should be
+ * large enough to induce this much delay and nothing more than that.
+ */
+#define TARGET_QDELAY_BASE (10ULL * 1000 * 1000) /* 10ms */
+#define TARGET_QDELAY_MIN (10ULL * 1000) /* 10us */
+#define TARGET_QDELAY_MAX (20ULL * 1000 * 1000 * 1000) /* 20s */
+
+/*
+ * Update interval for checking the extra delay added by the queue. This
+ * should be 90-95 percentile of RTT experienced by any TCP connection
+ * so that it will take care of the burst traffic.
+ */
+#define UPDATE_INTERVAL_BASE (100ULL * 1000 * 1000) /* 100ms */
+#define UPDATE_INTERVAL_MIN (100ULL * 1000 * 1000) /* 100ms */
+#define UPDATE_INTERVAL_MAX (10ULL * 1000 * 1000 * 1000) /* 10s */
+
#define SFB_RANDOM(sp, tmin, tmax) ((sfb_random(sp) % (tmax)) + (tmin))
-#define SFB_PKT_PBOX PF_TAG_QUEUE1 /* in penalty box */
+#define SFB_PKT_PBOX 0x1 /* in penalty box */
/* The following mantissa values are in SFB_FP_SHIFT Q format */
#define SFB_MAX_PMARK (1 << SFB_FP_SHIFT) /* Q14 representation of 1.00 */
} \
} while (0)
+/* Minimum nuber of bytes in queue to get flow controlled */
+#define SFB_MIN_FC_THRESHOLD_BYTES 7500
+
+#define SFB_SET_DELAY_HIGH(_sp_, _q_) do { \
+ (_sp_)->sfb_flags |= SFBF_DELAYHIGH; \
+ (_sp_)->sfb_fc_threshold = max(SFB_MIN_FC_THRESHOLD_BYTES, \
+ (qsize((_q_)) >> 3)); \
+} while (0)
+
+#define SFB_QUEUE_DELAYBASED(_sp_) ((_sp_)->sfb_flags & SFBF_DELAYBASED)
+#define SFB_IS_DELAYHIGH(_sp_) ((_sp_)->sfb_flags & SFBF_DELAYHIGH)
+#define SFB_QUEUE_DELAYBASED_MAXSIZE 2048 /* max pkts */
+
#define HINTERVAL_MIN (10) /* 10 seconds */
#define HINTERVAL_MAX (20) /* 20 seconds */
#define SFB_HINTERVAL(sp) ((sfb_random(sp) % HINTERVAL_MAX) + HINTERVAL_MIN)
#define ABS(v) (((v) > 0) ? (v) : -(v))
-#define SFB_ZONE_MAX 32 /* maximum elements in zone */
-#define SFB_ZONE_NAME "classq_sfb" /* zone name */
+#define SFB_ZONE_MAX 32 /* maximum elements in zone */
+#define SFB_ZONE_NAME "classq_sfb" /* zone name */
+
+#define SFB_BINS_ZONE_MAX 32 /* maximum elements in zone */
+#define SFB_BINS_ZONE_NAME "classq_sfb_bins" /* zone name */
+
+#define SFB_FCL_ZONE_MAX 32 /* maximum elements in zone */
+#define SFB_FCL_ZONE_NAME "classq_sfb_fcl" /* zone name */
/* Place the flow control entries in current bin on level 0 */
#define SFB_FC_LEVEL 0
+/* Store SFB hash and flags in the module private scratch space */
+#define pkt_sfb_hash8 pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val8
+#define pkt_sfb_hash16 pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val16
+#define pkt_sfb_hash32 pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val32
+#define pkt_sfb_flags pkt_mpriv.__mpriv_u.__mpriv32[1].__mpriv32_u.__val32
+
static unsigned int sfb_size; /* size of zone element */
static struct zone *sfb_zone; /* zone for sfb */
+static unsigned int sfb_bins_size; /* size of zone element */
+static struct zone *sfb_bins_zone; /* zone for sfb_bins */
+
+static unsigned int sfb_fcl_size; /* size of zone element */
+static struct zone *sfb_fcl_zone; /* zone for sfb_fc_lists */
+
/* internal function prototypes */
static u_int32_t sfb_random(struct sfb *);
static struct mbuf *sfb_getq_flow(struct sfb *, class_queue_t *, u_int32_t,
static void sfb_calc_holdtime(struct sfb *, u_int64_t);
static void sfb_calc_pboxtime(struct sfb *, u_int64_t);
static void sfb_calc_hinterval(struct sfb *, u_int64_t *);
+static void sfb_calc_target_qdelay(struct sfb *, u_int64_t);
+static void sfb_calc_update_interval(struct sfb *, u_int64_t);
static void sfb_swap_bins(struct sfb *, u_int32_t);
-static inline int sfb_pcheck(struct sfb *, struct pf_mtag *);
-static int sfb_penalize(struct sfb *, struct pf_mtag *, struct timespec *);
+static inline int sfb_pcheck(struct sfb *, struct pkthdr *);
+static int sfb_penalize(struct sfb *, struct pkthdr *, struct timespec *);
static void sfb_adjust_bin(struct sfb *, struct sfbbinstats *,
struct timespec *, struct timespec *, boolean_t);
static void sfb_decrement_bin(struct sfb *, struct sfbbinstats *,
struct timespec *, struct timespec *);
static void sfb_increment_bin(struct sfb *, struct sfbbinstats *,
struct timespec *, struct timespec *);
-static inline void sfb_dq_update_bins(struct sfb *, struct pf_mtag *,
- struct timespec *);
-static inline void sfb_eq_update_bins(struct sfb *, struct pf_mtag *);
-static int sfb_drop_early(struct sfb *, struct pf_mtag *, u_int16_t *,
+static inline void sfb_dq_update_bins(struct sfb *, struct pkthdr *,
+ struct timespec *, u_int32_t qsize);
+static inline void sfb_eq_update_bins(struct sfb *, struct pkthdr *);
+static int sfb_drop_early(struct sfb *, struct pkthdr *, u_int16_t *,
struct timespec *);
-static boolean_t sfb_bin_addfcentry(struct sfb *, struct pf_mtag *);
-static void sfb_fclist_append(struct sfb *, struct sfb_fc_list *);
+static boolean_t sfb_bin_addfcentry(struct sfb *, struct pkthdr *);
+static void sfb_fclist_append(struct sfb *, struct sfb_fcl *);
static void sfb_fclists_clean(struct sfb *sp);
+static int sfb_bin_mark_or_drop(struct sfb *sp, struct sfbbinstats *bin);
+static void sfb_detect_dequeue_stall(struct sfb *sp, class_queue_t *,
+ struct timespec *);
SYSCTL_NODE(_net_classq, OID_AUTO, sfb, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "SFB");
SYSCTL_QUAD(_net_classq_sfb, OID_AUTO, hinterval, CTLFLAG_RW|CTLFLAG_LOCKED,
&sfb_hinterval, "SFB hash interval in nanoseconds");
+static u_int64_t sfb_target_qdelay;
+SYSCTL_QUAD(_net_classq_sfb, OID_AUTO, target_qdelay, CTLFLAG_RW|CTLFLAG_LOCKED,
+ &sfb_target_qdelay, "SFB target queue delay in milliseconds");
+
+static u_int64_t sfb_update_interval;
+SYSCTL_QUAD(_net_classq_sfb, OID_AUTO, update_interval,
+ CTLFLAG_RW|CTLFLAG_LOCKED, &sfb_update_interval, "SFB update interval");
+
static u_int32_t sfb_increment = SFB_INCREMENT;
SYSCTL_UINT(_net_classq_sfb, OID_AUTO, increment, CTLFLAG_RW|CTLFLAG_LOCKED,
&sfb_increment, SFB_INCREMENT, "SFB increment [d1]");
SYSCTL_UINT(_net_classq_sfb, OID_AUTO, ratelimit, CTLFLAG_RW|CTLFLAG_LOCKED,
&sfb_ratelimit, 0, "SFB rate limit");
-#define MBPS (1ULL * 1000 * 1000)
-#define GBPS (MBPS * 1000)
+#define KBPS (1ULL * 1000) /* 1 Kbits per second */
+#define MBPS (1ULL * 1000 * 1000) /* 1 Mbits per second */
+#define GBPS (MBPS * 1000) /* 1 Gbits per second */
struct sfb_time_tbl {
u_int64_t speed; /* uplink speed */
}
zone_change(sfb_zone, Z_EXPAND, TRUE);
zone_change(sfb_zone, Z_CALLERACCT, TRUE);
+
+ sfb_bins_size = sizeof (*((struct sfb *)0)->sfb_bins);
+ sfb_bins_zone = zinit(sfb_bins_size, SFB_BINS_ZONE_MAX * sfb_bins_size,
+ 0, SFB_BINS_ZONE_NAME);
+ if (sfb_bins_zone == NULL) {
+ panic("%s: failed allocating %s", __func__, SFB_BINS_ZONE_NAME);
+ /* NOTREACHED */
+ }
+ zone_change(sfb_bins_zone, Z_EXPAND, TRUE);
+ zone_change(sfb_bins_zone, Z_CALLERACCT, TRUE);
+
+ sfb_fcl_size = sizeof (*((struct sfb *)0)->sfb_fc_lists);
+ sfb_fcl_zone = zinit(sfb_fcl_size, SFB_FCL_ZONE_MAX * sfb_fcl_size,
+ 0, SFB_FCL_ZONE_NAME);
+ if (sfb_fcl_zone == NULL) {
+ panic("%s: failed allocating %s", __func__, SFB_FCL_ZONE_NAME);
+ /* NOTREACHED */
+ }
+ zone_change(sfb_fcl_zone, Z_EXPAND, TRUE);
+ zone_change(sfb_fcl_zone, Z_CALLERACCT, TRUE);
}
static u_int32_t
sfb_random(struct sfb *sp)
{
IFCQ_CONVERT_LOCK(&sp->sfb_ifp->if_snd);
- return (random());
+ return (RandomULong());
}
static void
net_timeradd(&now, &sp->sfb_hinterval, &sp->sfb_nextreset);
}
+static void
+sfb_calc_target_qdelay(struct sfb *sp, u_int64_t out_bw)
+{
+#pragma unused(out_bw)
+ u_int64_t target_qdelay = 0;
+ struct ifnet *ifp = sp->sfb_ifp;
+
+ target_qdelay = IFCQ_TARGET_QDELAY(&ifp->if_snd);
+
+ if (sfb_target_qdelay != 0)
+ target_qdelay = sfb_target_qdelay;
+
+ /*
+ * If we do not know the effective bandwidth, use the default
+ * target queue delay.
+ */
+ if (target_qdelay == 0)
+ target_qdelay = IFQ_TARGET_DELAY;
+
+ sp->sfb_target_qdelay = target_qdelay;
+}
+
+static void
+sfb_calc_update_interval(struct sfb *sp, u_int64_t out_bw)
+{
+#pragma unused(out_bw)
+ u_int64_t update_interval = 0;
+
+ /* If the system-level override is set, use it */
+ if (sfb_update_interval != 0)
+ update_interval = sfb_update_interval;
+ /*
+ * If we do not know the effective bandwidth, use the default
+ * update interval.
+ */
+ if (update_interval == 0)
+ update_interval = IFQ_UPDATE_INTERVAL;
+
+ net_nsectimer(&update_interval, &sp->sfb_update_interval);
+}
+
/*
* sfb support routines
*/
sfb_alloc(struct ifnet *ifp, u_int32_t qid, u_int32_t qlim, u_int32_t flags)
{
struct sfb *sp;
+ int i;
VERIFY(ifp != NULL && qlim > 0);
log(LOG_ERR, "%s: SFB unable to allocate\n", if_name(ifp));
return (NULL);
}
-
bzero(sp, sfb_size);
- if ((sp->sfb_bins = _MALLOC(sizeof (*sp->sfb_bins), M_DEVBUF,
- M_WAITOK|M_ZERO)) == NULL) {
+
+ if ((sp->sfb_bins = zalloc(sfb_bins_zone)) == NULL) {
log(LOG_ERR, "%s: SFB unable to allocate bins\n", if_name(ifp));
sfb_destroy(sp);
return (NULL);
}
+ bzero(sp->sfb_bins, sfb_bins_size);
- if ((sp->sfb_fc_lists = _MALLOC(sizeof (*sp->sfb_fc_lists), M_DEVBUF,
- M_WAITOK|M_ZERO)) == NULL) {
+ if ((sp->sfb_fc_lists = zalloc(sfb_fcl_zone)) == NULL) {
log(LOG_ERR, "%s: SFB unable to allocate flow control lists\n",
if_name(ifp));
sfb_destroy(sp);
return(NULL);
}
+ bzero(sp->sfb_fc_lists, sfb_fcl_size);
+
+ for (i = 0; i < SFB_BINS; ++i)
+ STAILQ_INIT(&SFB_FC_LIST(sp, i)->fclist);
- sp->sfb_flags = (flags & SFBF_USERFLAGS);
sp->sfb_ifp = ifp;
sp->sfb_qlim = qlim;
sp->sfb_qid = qid;
+ sp->sfb_flags = (flags & SFBF_USERFLAGS);
+#if !PF_ECN
+ if (sp->sfb_flags & SFBF_ECN) {
+ sp->sfb_flags &= ~SFBF_ECN;
+ log(LOG_ERR, "%s: SFB qid=%d, ECN not available; ignoring "
+ "SFBF_ECN flag!\n", if_name(ifp), sp->sfb_qid);
+ }
+#endif /* !PF_ECN */
sfb_resetq(sp, -1);
}
static void
-sfb_fclist_append(struct sfb *sp, struct sfb_fc_list *fcl)
+sfb_fclist_append(struct sfb *sp, struct sfb_fcl *fcl)
{
IFCQ_CONVERT_LOCK(&sp->sfb_ifp->if_snd);
- ifnet_fclist_append(sp, fcl);
+
+ VERIFY(STAILQ_EMPTY(&fcl->fclist) || fcl->cnt > 0);
+ sp->sfb_stats.flow_feedback += fcl->cnt;
+ fcl->cnt = 0;
+
+ flowadv_add(&fcl->fclist);
+ VERIFY(fcl->cnt == 0 && STAILQ_EMPTY(&fcl->fclist));
}
static void
{
int i;
- /* Move all the flow control entries to the ifnet list */
+ /* Move all the flow control entries to the flowadv list */
for (i = 0; i < SFB_BINS; ++i) {
- struct sfb_fc_list *fcl = SFB_FC_LIST(sp, i);
- if (!SLIST_EMPTY(fcl))
+ struct sfb_fcl *fcl = SFB_FC_LIST(sp, i);
+ if (!STAILQ_EMPTY(&fcl->fclist))
sfb_fclist_append(sp, fcl);
}
}
{
sfb_fclists_clean(sp);
if (sp->sfb_bins != NULL) {
- _FREE(sp->sfb_bins, M_DEVBUF);
+ zfree(sfb_bins_zone, sp->sfb_bins);
sp->sfb_bins = NULL;
}
if (sp->sfb_fc_lists != NULL) {
- _FREE(sp->sfb_fc_lists, M_DEVBUF);
+ zfree(sfb_fcl_zone, sp->sfb_fc_lists);
sp->sfb_fc_lists = NULL;
}
zfree(sfb_zone, sp);
sfb_calc_holdtime(sp, eff_rate);
sfb_calc_pboxtime(sp, eff_rate);
sfb_calc_hinterval(sp, NULL);
+ sfb_calc_target_qdelay(sp, eff_rate);
+ sfb_calc_update_interval(sp, eff_rate);
if (ev == CLASSQ_EV_LINK_DOWN ||
ev == CLASSQ_EV_LINK_UP)
log(LOG_DEBUG, "%s: SFB qid=%d, holdtime=%llu nsec, "
"pboxtime=%llu nsec, allocation=%d, drop_thresh=%d, "
- "hinterval=%d sec, sfb_bins=%d bytes, eff_rate=%llu bps\n",
+ "hinterval=%d sec, sfb_bins=%d bytes, eff_rate=%llu bps"
+ "target_qdelay= %llu nsec "
+ "update_interval=%llu sec %llu nsec flags=0x%x\n",
if_name(ifp), sp->sfb_qid, (u_int64_t)sp->sfb_holdtime.tv_nsec,
(u_int64_t)sp->sfb_pboxtime.tv_nsec,
(u_int32_t)sp->sfb_allocation, (u_int32_t)sp->sfb_drop_thresh,
(int)sp->sfb_hinterval.tv_sec, (int)sizeof (*sp->sfb_bins),
- eff_rate);
+ eff_rate, (u_int64_t)sp->sfb_target_qdelay,
+ (u_int64_t)sp->sfb_update_interval.tv_sec,
+ (u_int64_t)sp->sfb_update_interval.tv_nsec, sp->sfb_flags);
}
void
sps->dropthresh = sp->sfb_drop_thresh;
sps->clearpkts = sp->sfb_clearpkts;
sps->current = sp->sfb_current;
+ sps->target_qdelay = sp->sfb_target_qdelay;
+ sps->min_estdelay = sp->sfb_min_qdelay;
+ sps->delay_fcthreshold = sp->sfb_fc_threshold;
+ sps->flags = sp->sfb_flags;
net_timernsec(&sp->sfb_holdtime, &sp->sfb_stats.hold_time);
net_timernsec(&sp->sfb_pboxtime, &sp->sfb_stats.pbox_time);
net_timernsec(&sp->sfb_hinterval, &sp->sfb_stats.rehash_intval);
+ net_timernsec(&sp->sfb_update_interval, &sps->update_interval);
*(&(sps->sfbstats)) = *(&(sp->sfb_stats));
_CASSERT(sizeof ((*sp->sfb_bins)[0].stats) ==
/* clear/adjust bin statistics and flow control lists */
for (i = 0; i < SFB_BINS; i++) {
- struct sfb_fc_list *fcl = SFB_FC_LIST(sp, i);
+ struct sfb_fcl *fcl = SFB_FC_LIST(sp, i);
- if (!SLIST_EMPTY(fcl))
+ if (!STAILQ_EMPTY(&fcl->fclist))
sfb_fclist_append(sp, fcl);
for (j = 0; j < SFB_LEVELS; j++) {
wbin = SFB_BINST(sp, j, i, s ^ 1); /* warm-up */
cbin->pkts = 0;
+ cbin->bytes = 0;
if (cbin->pmark > SFB_MAX_PMARK)
cbin->pmark = SFB_MAX_PMARK;
if (cbin->pmark < 0)
}
static inline int
-sfb_pcheck(struct sfb *sp, struct pf_mtag *t)
+sfb_pcheck(struct sfb *sp, struct pkthdr *pkt)
{
#if SFB_LEVELS != 2
int i, n;
* Level 0: bin index at [0] for set 0; [2] for set 1
* Level 1: bin index at [1] for set 0; [3] for set 1
*/
- if (SFB_BINST(sp, 0, SFB_BINMASK(t->pftag_qpriv8[(s << 1)]),
+ if (SFB_BINST(sp, 0, SFB_BINMASK(pkt->pkt_sfb_hash8[(s << 1)]),
s)->pmark < SFB_PMARK_TH ||
- SFB_BINST(sp, 1, SFB_BINMASK(t->pftag_qpriv8[(s << 1) + 1]),
+ SFB_BINST(sp, 1, SFB_BINMASK(pkt->pkt_sfb_hash8[(s << 1) + 1]),
s)->pmark < SFB_PMARK_TH)
return (0);
#else /* SFB_LEVELS != 2 */
for (i = 0; i < SFB_LEVELS; i++) {
if (s == 0) /* set 0, bin index [0,1] */
- n = SFB_BINMASK(t->pftag_qpriv8[i]);
+ n = SFB_BINMASK(pkt->pkt_sfb_hash8[i]);
else /* set 1, bin index [2,3] */
- n = SFB_BINMASK(t->pftag_qpriv8[i + 2]);
+ n = SFB_BINMASK(pkt->pkt_sfb_hash8[i + 2]);
if (SFB_BINST(sp, i, n, s)->pmark < SFB_PMARK_TH)
return (0);
}
static int
-sfb_penalize(struct sfb *sp, struct pf_mtag *t, struct timespec *now)
+sfb_penalize(struct sfb *sp, struct pkthdr *pkt, struct timespec *now)
{
struct timespec delta = { 0, 0 };
/* If minimum pmark of current bins is < SFB_PMARK_TH, we're done */
- if (!sfb_ratelimit || !sfb_pcheck(sp, t))
+ if (!sfb_ratelimit || !sfb_pcheck(sp, pkt))
return (0);
net_timersub(now, &sp->sfb_pboxfreeze, &delta);
*/
#if SFB_LEVELS == 2
/* Level 0: bin index at [0] for set 0; [2] for set 1 */
- n = SFB_BINMASK(t->pftag_qpriv8[(w << 1)]);
+ n = SFB_BINMASK(pkt->pkt_sfb_hash8[(w << 1)]);
bin = SFB_BINST(sp, 0, n, w);
if (bin->pkts >= sp->sfb_allocation)
sfb_increment_bin(sp, bin, SFB_BINFT(sp, 0, n, w), now);
/* Level 0: bin index at [1] for set 0; [3] for set 1 */
- n = SFB_BINMASK(t->pftag_qpriv8[(w << 1) + 1]);
+ n = SFB_BINMASK(pkt->pkt_sfb_hash8[(w << 1) + 1]);
bin = SFB_BINST(sp, 1, n, w);
if (bin->pkts >= sp->sfb_allocation)
sfb_increment_bin(sp, bin, SFB_BINFT(sp, 1, n, w), now);
#else /* SFB_LEVELS != 2 */
for (i = 0; i < SFB_LEVELS; i++) {
if (w == 0) /* set 0, bin index [0,1] */
- n = SFB_BINMASK(t->pftag_qpriv8[i]);
+ n = SFB_BINMASK(pkt->pkt_sfb_hash8[i]);
else /* set 1, bin index [2,3] */
- n = SFB_BINMASK(t->pftag_qpriv8[i + 2]);
+ n = SFB_BINMASK(pkt->pkt_sfb_hash8[i + 2]);
bin = SFB_BINST(sp, i, n, w);
if (bin->pkts >= sp->sfb_allocation) {
}
/* non-conformant or else misclassified flow; queue it anyway */
- t->pftag_flags |= SFB_PKT_PBOX;
+ pkt->pkt_sfb_flags |= SFB_PKT_PBOX;
*(&sp->sfb_pboxfreeze) = *now;
return (0);
}
static inline void
-sfb_dq_update_bins(struct sfb *sp, struct pf_mtag *t, struct timespec *now)
+sfb_dq_update_bins(struct sfb *sp, struct pkthdr *pkt,
+ struct timespec *now, u_int32_t qsize)
{
#if SFB_LEVELS != 2 || SFB_FC_LEVEL != 0
int i;
#endif /* SFB_LEVELS != 2 || SFB_FC_LEVEL != 0 */
struct sfbbinstats *bin;
int s, n;
- struct sfb_fc_list *fcl = NULL;
+ struct sfb_fcl *fcl = NULL;
s = sp->sfb_current;
VERIFY((s + (s ^ 1)) == 1);
*/
#if SFB_LEVELS == 2 && SFB_FC_LEVEL == 0
/* Level 0: bin index at [0] for set 0; [2] for set 1 */
- n = SFB_BINMASK(t->pftag_qpriv8[(s << 1)]);
+ n = SFB_BINMASK(pkt->pkt_sfb_hash8[(s << 1)]);
bin = SFB_BINST(sp, 0, n, s);
- VERIFY(bin->pkts > 0);
- if (--bin->pkts == 0) {
+ VERIFY(bin->pkts > 0 && bin->bytes >= (u_int32_t)pkt->len);
+ bin->pkts--;
+ bin->bytes -= pkt->len;
+
+ if (bin->pkts == 0)
sfb_decrement_bin(sp, bin, SFB_BINFT(sp, 0, n, s), now);
- }
- if (bin->pkts <= (sp->sfb_allocation >> 2)) {
- /* deliver flow control feedback to the sockets */
- fcl = SFB_FC_LIST(sp, n);
- if (!SLIST_EMPTY(fcl))
- sfb_fclist_append(sp, fcl);
+
+ /* Deliver flow control feedback to the sockets */
+ if (SFB_QUEUE_DELAYBASED(sp)) {
+ if (!(SFB_IS_DELAYHIGH(sp)) ||
+ bin->bytes <= sp->sfb_fc_threshold ||
+ bin->pkts == 0 || qsize == 0)
+ fcl = SFB_FC_LIST(sp, n);
+ } else if (bin->pkts <= (sp->sfb_allocation >> 2)) {
+ fcl = SFB_FC_LIST(sp, n);
}
+ if (fcl != NULL && !STAILQ_EMPTY(&fcl->fclist))
+ sfb_fclist_append(sp, fcl);
+ fcl = NULL;
+
/* Level 1: bin index at [1] for set 0; [3] for set 1 */
- n = SFB_BINMASK(t->pftag_qpriv8[(s << 1) + 1]);
+ n = SFB_BINMASK(pkt->pkt_sfb_hash8[(s << 1) + 1]);
bin = SFB_BINST(sp, 1, n, s);
- VERIFY(bin->pkts > 0);
- if (--bin->pkts == 0)
+ VERIFY(bin->pkts > 0 && bin->bytes >= (u_int64_t)pkt->len);
+ bin->pkts--;
+ bin->bytes -= pkt->len;
+ if (bin->pkts == 0)
sfb_decrement_bin(sp, bin, SFB_BINFT(sp, 1, n, s), now);
#else /* SFB_LEVELS != 2 || SFB_FC_LEVEL != 0 */
for (i = 0; i < SFB_LEVELS; i++) {
if (s == 0) /* set 0, bin index [0,1] */
- n = SFB_BINMASK(t->pftag_qpriv8[i]);
+ n = SFB_BINMASK(pkt->pkt_sfb_hash8[i]);
else /* set 1, bin index [2,3] */
- n = SFB_BINMASK(t->pftag_qpriv8[i + 2]);
+ n = SFB_BINMASK(pkt->pkt_sfb_hash8[i + 2]);
bin = SFB_BINST(sp, i, n, s);
- VERIFY(bin->pkts > 0);
- if (--bin->pkts == 0) {
+ VERIFY(bin->pkts > 0 && bin->bytes >= pkt->len);
+ bin->pkts--;
+ bin->bytes -= pkt->len;
+ if (bin->pkts == 0)
sfb_decrement_bin(sp, bin,
SFB_BINFT(sp, i, n, s), now);
- }
- if (bin->pkts <= (sp->sfb_allocation >> 2)) {
- /* deliver flow control feedback to the sockets */
- if (i == SFB_FC_LEVEL) {
+ if (i != SFB_FC_LEVEL)
+ continue;
+ if (SFB_QUEUE_DELAYBASED(sp)) {
+ if (!(SFB_IS_DELAYHIGH(sp)) ||
+ bin->bytes <= sp->sfb_fc_threshold)
fcl = SFB_FC_LIST(sp, n);
- if (!SLIST_EMPTY(fcl))
- sfb_fclist_append(sp, fcl);
- }
+ } else if (bin->pkts <= (sp->sfb_allocation >> 2)) {
+ fcl = SFB_FC_LIST(sp, n);
}
+ if (fcl != NULL && !STAILQ_EMPTY(&fcl->fclist))
+ sfb_fclist_append(sp, fcl);
+ fcl = NULL;
}
#endif /* SFB_LEVELS != 2 || SFB_FC_LEVEL != 0 */
}
static inline void
-sfb_eq_update_bins(struct sfb *sp, struct pf_mtag *t)
+sfb_eq_update_bins(struct sfb *sp, struct pkthdr *pkt)
{
#if SFB_LEVELS != 2
int i, n;
#endif /* SFB_LEVELS != 2 */
int s;
-
+ struct sfbbinstats *bin;
s = sp->sfb_current;
VERIFY((s + (s ^ 1)) == 1);
*/
#if SFB_LEVELS == 2
/* Level 0: bin index at [0] for set 0; [2] for set 1 */
- SFB_BINST(sp, 0, SFB_BINMASK(t->pftag_qpriv8[(s << 1)]), s)->pkts++;
+ bin = SFB_BINST(sp, 0,
+ SFB_BINMASK(pkt->pkt_sfb_hash8[(s << 1)]), s);
+ bin->pkts++;
+ bin->bytes += pkt->len;
/* Level 1: bin index at [1] for set 0; [3] for set 1 */
- SFB_BINST(sp, 1, SFB_BINMASK(t->pftag_qpriv8[(s << 1) + 1]), s)->pkts++;
+ bin = SFB_BINST(sp, 1,
+ SFB_BINMASK(pkt->pkt_sfb_hash8[(s << 1) + 1]), s);
+ bin->pkts++;
+ bin->bytes += pkt->len;
+
#else /* SFB_LEVELS != 2 */
for (i = 0; i < SFB_LEVELS; i++) {
if (s == 0) /* set 0, bin index [0,1] */
- n = SFB_BINMASK(t->pftag_qpriv8[i]);
+ n = SFB_BINMASK(pkt->pkt_sfb_hash8[i]);
else /* set 1, bin index [2,3] */
- n = SFB_BINMASK(t->pftag_qpriv8[i + 2]);
+ n = SFB_BINMASK(pkt->pkt_sfb_hash8[i + 2]);
- SFB_BINST(sp, i, n, s)->pkts++;
+ bin = SFB_BINST(sp, i, n, s);
+ bin->pkts++;
+ bin->bytes += pkt->len;
}
#endif /* SFB_LEVELS != 2 */
}
static boolean_t
-sfb_bin_addfcentry(struct sfb *sp, struct pf_mtag *t)
+sfb_bin_addfcentry(struct sfb *sp, struct pkthdr *pkt)
{
- struct sfb_bin_fcentry *fce;
- u_int32_t flowhash;
- struct sfb_fc_list *fcl;
+ struct flowadv_fcentry *fce;
+ u_int32_t flowsrc, flowid;
+ struct sfb_fcl *fcl;
int s;
s = sp->sfb_current;
VERIFY((s + (s ^ 1)) == 1);
- flowhash = t->pftag_flowhash;
+ flowsrc = pkt->pkt_flowsrc;
+ flowid = pkt->pkt_flowid;
- if (flowhash == 0) {
- sp->sfb_stats.null_flowhash++;
+ if (flowid == 0) {
+ sp->sfb_stats.null_flowid++;
return (FALSE);
}
* Use value at index 0 for set 0 and
* value at index 2 for set 1
*/
- fcl = SFB_FC_LIST(sp, SFB_BINMASK(t->pftag_qpriv8[(s << 1)]));
- SLIST_FOREACH(fce, fcl, fce_link) {
- if (fce->fce_flowhash == flowhash) {
+ fcl = SFB_FC_LIST(sp, SFB_BINMASK(pkt->pkt_sfb_hash8[(s << 1)]));
+ STAILQ_FOREACH(fce, &fcl->fclist, fce_link) {
+ if (fce->fce_flowsrc == flowsrc &&
+ fce->fce_flowid == flowid) {
/* Already on flow control list; just return */
return (TRUE);
}
}
IFCQ_CONVERT_LOCK(&sp->sfb_ifp->if_snd);
- fce = ifnet_fce_alloc(M_WAITOK);
+ fce = flowadv_alloc_entry(M_WAITOK);
if (fce != NULL) {
- fce->fce_flowhash = flowhash;
- SLIST_INSERT_HEAD(fcl, fce, fce_link);
+ fce->fce_flowsrc = flowsrc;
+ fce->fce_flowid = flowid;
+ STAILQ_INSERT_TAIL(&fcl->fclist, fce, fce_link);
+ fcl->cnt++;
sp->sfb_stats.flow_controlled++;
}
return (fce != NULL);
}
+/*
+ * check if this flow needs to be flow-controlled or if this
+ * packet needs to be dropped.
+ */
+static int
+sfb_bin_mark_or_drop(struct sfb *sp, struct sfbbinstats *bin)
+{
+ int ret = 0;
+ if (SFB_QUEUE_DELAYBASED(sp)) {
+ /*
+ * Mark or drop if this bin has more
+ * bytes than the flowcontrol threshold.
+ */
+ if (SFB_IS_DELAYHIGH(sp) &&
+ bin->bytes >= (sp->sfb_fc_threshold << 1))
+ ret = 1;
+ } else {
+ if (bin->pkts >= sp->sfb_allocation &&
+ bin->pkts >= sp->sfb_drop_thresh)
+ ret = 1; /* drop or mark */
+ }
+ return (ret);
+}
+
/*
* early-drop probability is kept in pmark of each bin of the flow
*/
static int
-sfb_drop_early(struct sfb *sp, struct pf_mtag *t, u_int16_t *pmin,
+sfb_drop_early(struct sfb *sp, struct pkthdr *pkt, u_int16_t *pmin,
struct timespec *now)
{
#if SFB_LEVELS != 2
*/
#if SFB_LEVELS == 2
/* Level 0: bin index at [0] for set 0; [2] for set 1 */
- n = SFB_BINMASK(t->pftag_qpriv8[(s << 1)]);
+ n = SFB_BINMASK(pkt->pkt_sfb_hash8[(s << 1)]);
bin = SFB_BINST(sp, 0, n, s);
if (*pmin > (u_int16_t)bin->pmark)
*pmin = (u_int16_t)bin->pmark;
- if (bin->pkts >= sp->sfb_allocation) {
- if (bin->pkts >= sp->sfb_drop_thresh)
- ret = 1; /* drop or mark */
+
+ /* Update SFB probability */
+ if (bin->pkts >= sp->sfb_allocation)
sfb_increment_bin(sp, bin, SFB_BINFT(sp, 0, n, s), now);
- }
+
+ ret = sfb_bin_mark_or_drop(sp, bin);
/* Level 1: bin index at [1] for set 0; [3] for set 1 */
- n = SFB_BINMASK(t->pftag_qpriv8[(s << 1) + 1]);
+ n = SFB_BINMASK(pkt->pkt_sfb_hash8[(s << 1) + 1]);
bin = SFB_BINST(sp, 1, n, s);
if (*pmin > (u_int16_t)bin->pmark)
*pmin = (u_int16_t)bin->pmark;
- if (bin->pkts >= sp->sfb_allocation) {
- if (bin->pkts >= sp->sfb_drop_thresh)
- ret = 1; /* drop or mark */
+ if (bin->pkts >= sp->sfb_allocation)
sfb_increment_bin(sp, bin, SFB_BINFT(sp, 1, n, s), now);
- }
#else /* SFB_LEVELS != 2 */
for (i = 0; i < SFB_LEVELS; i++) {
if (s == 0) /* set 0, bin index [0,1] */
- n = SFB_BINMASK(t->pftag_qpriv8[i]);
+ n = SFB_BINMASK(pkt->pkt_sfb_hash8[i]);
else /* set 1, bin index [2,3] */
- n = SFB_BINMASK(t->pftag_qpriv8[i + 2]);
+ n = SFB_BINMASK(pkt->pkt_sfb_hash8[i + 2]);
bin = SFB_BINST(sp, i, n, s);
if (*pmin > (u_int16_t)bin->pmark)
*pmin = (u_int16_t)bin->pmark;
- if (bin->pkts >= sp->sfb_allocation) {
- if (bin->pkts >= sp->sfb_drop_thresh)
- ret = 1; /* drop or mark */
+ if (bin->pkts >= sp->sfb_allocation)
sfb_increment_bin(sp, bin,
SFB_BINFT(sp, i, n, s), now);
- }
+ if (i == SFB_FC_LEVEL)
+ ret = sfb_bin_mark_or_drop(sp, bin);
}
#endif /* SFB_LEVELS != 2 */
return (ret);
}
+void
+sfb_detect_dequeue_stall(struct sfb *sp, class_queue_t *q,
+ struct timespec *now)
+{
+ struct timespec max_getqtime;
+
+ if (!SFB_QUEUE_DELAYBASED(sp) || SFB_IS_DELAYHIGH(sp) ||
+ qsize(q) <= SFB_MIN_FC_THRESHOLD_BYTES ||
+ !net_timerisset(&sp->sfb_getqtime))
+ return;
+
+ net_timeradd(&sp->sfb_getqtime, &sp->sfb_update_interval,
+ &max_getqtime);
+ if (net_timercmp(now, &max_getqtime, >)) {
+ /*
+ * No packets have been dequeued in an update interval
+ * worth of time. It means that the queue is stalled
+ */
+ SFB_SET_DELAY_HIGH(sp, q);
+ sp->sfb_stats.dequeue_stall++;
+ }
+}
+
#define DTYPE_NODROP 0 /* no drop */
#define DTYPE_FORCED 1 /* a "forced" drop */
#define DTYPE_EARLY 2 /* an "unforced" (early) drop */
int
sfb_addq(struct sfb *sp, class_queue_t *q, struct mbuf *m, struct pf_mtag *t)
{
+#if !PF_ECN
+#pragma unused(t)
+#endif /* !PF_ECN */
+ struct pkthdr *pkt = &m->m_pkthdr;
struct timespec now;
int droptype, s;
u_int16_t pmin;
s = sp->sfb_current;
VERIFY((s + (s ^ 1)) == 1);
+ /* See comments in <rdar://problem/14040693> */
+ VERIFY(!(pkt->pkt_flags & PKTF_PRIV_GUARDED));
+ pkt->pkt_flags |= PKTF_PRIV_GUARDED;
+
/* time to swap the bins? */
if (net_timercmp(&now, &sp->sfb_nextreset, >=)) {
net_timeradd(&now, &sp->sfb_hinterval, &sp->sfb_nextreset);
VERIFY((s + (s ^ 1)) == 1);
}
- t->pftag_flags &= ~SFB_PKT_PBOX;
- t->pftag_qpriv16[s] =
- (SFB_HASH(&t->pftag_flowhash, sizeof (t->pftag_flowhash),
+ if (!net_timerisset(&sp->sfb_update_time)) {
+ net_timeradd(&now, &sp->sfb_update_interval,
+ &sp->sfb_update_time);
+ }
+
+ pkt->pkt_sfb_flags = 0;
+ pkt->pkt_sfb_hash16[s] =
+ (SFB_HASH(&pkt->pkt_flowid, sizeof (pkt->pkt_flowid),
(*sp->sfb_bins)[s].fudge) & SFB_HASHMASK);
- t->pftag_qpriv16[s ^ 1] =
- (SFB_HASH(&t->pftag_flowhash, sizeof (t->pftag_flowhash),
+ pkt->pkt_sfb_hash16[s ^ 1] =
+ (SFB_HASH(&pkt->pkt_flowid, sizeof (pkt->pkt_flowid),
(*sp->sfb_bins)[s ^ 1].fudge) & SFB_HASHMASK);
+ /* check if the queue has been stalled */
+ sfb_detect_dequeue_stall(sp, q, &now);
+
/* see if we drop early */
droptype = DTYPE_NODROP;
- if (sfb_drop_early(sp, t, &pmin, &now)) {
+ if (sfb_drop_early(sp, pkt, &pmin, &now)) {
/* flow control, mark or drop by sfb */
if ((sp->sfb_flags & SFBF_FLOWCTL) &&
- (t->pftag_flags & PF_TAG_FLOWADV)) {
+ (pkt->pkt_flags & PKTF_FLOW_ADV)) {
fc_adv = 1;
/* drop all during suspension or for non-TCP */
if ((sp->sfb_flags & SFBF_SUSPENDED) ||
- !(t->pftag_flags & PF_TAG_TCP)) {
+ pkt->pkt_proto != IPPROTO_TCP) {
droptype = DTYPE_EARLY;
sp->sfb_stats.drop_early++;
}
- } else if ((sp->sfb_flags & SFBF_ECN) &&
- (t->pftag_flags & PF_TAG_TCP) && /* only for TCP */
+ }
+#if PF_ECN
+ else if ((sp->sfb_flags & SFBF_ECN) &&
+ (pkt->pkt_proto == IPPROTO_TCP) && /* only for TCP */
((sfb_random(sp) & SFB_MAX_PMARK) <= pmin) &&
mark_ecn(m, t, sp->sfb_flags) &&
!(sp->sfb_flags & SFBF_SUSPENDED)) {
/* successfully marked; do not drop. */
sp->sfb_stats.marked_packets++;
- } else {
+ }
+#endif /* PF_ECN */
+ else {
/* unforced drop by sfb */
droptype = DTYPE_EARLY;
sp->sfb_stats.drop_early++;
}
/* non-responsive flow penalty? */
- if (droptype == DTYPE_NODROP && sfb_penalize(sp, t, &now)) {
+ if (droptype == DTYPE_NODROP && sfb_penalize(sp, pkt, &now)) {
droptype = DTYPE_FORCED;
sp->sfb_stats.drop_pbox++;
}
- /* if the queue length hits the hard limit, it's a forced drop */
- if (droptype == DTYPE_NODROP && qlen(q) >= qlimit(q)) {
+ /*
+ * if max queue size is static, make it a forced drop
+ * when the queue length hits the queue limit
+ */
+ if (!(SFB_QUEUE_DELAYBASED(sp)) &&
+ droptype == DTYPE_NODROP && qlen(q) >= qlimit(q)) {
+ droptype = DTYPE_FORCED;
+ sp->sfb_stats.drop_queue++;
+ }
+
+ /*
+ * delay based queues have a larger maximum size to
+ * allow for bursts
+ */
+ if (SFB_QUEUE_DELAYBASED(sp) &&
+ droptype == DTYPE_NODROP &&
+ qlen(q) >= SFB_QUEUE_DELAYBASED_MAXSIZE) {
droptype = DTYPE_FORCED;
sp->sfb_stats.drop_queue++;
}
if (fc_adv == 1 && droptype != DTYPE_FORCED &&
- sfb_bin_addfcentry(sp, t)) {
+ sfb_bin_addfcentry(sp, pkt)) {
/* deliver flow control advisory error */
if (droptype == DTYPE_NODROP) {
ret = CLASSQEQ_SUCCESS_FC;
ret = CLASSQEQ_DROPPED_FC;
}
}
-
/* if successful enqueue this packet, else drop it */
if (droptype == DTYPE_NODROP) {
+ net_timernsec(&now, &pkt->pkt_enqueue_ts);
_addq(q, m);
} else {
IFCQ_CONVERT_LOCK(&sp->sfb_ifp->if_snd);
return ((ret != CLASSQEQ_SUCCESS) ? ret : CLASSQEQ_DROPPED);
}
- if (!(t->pftag_flags & SFB_PKT_PBOX))
- sfb_eq_update_bins(sp, t);
+ if (!(pkt->pkt_sfb_flags & SFB_PKT_PBOX))
+ sfb_eq_update_bins(sp, pkt);
else
sp->sfb_stats.pbox_packets++;
{
struct timespec now;
struct mbuf *m;
- struct pf_mtag *t;
+ struct pkthdr *pkt;
if (!purge && (sp->sfb_flags & SFBF_SUSPENDED))
return (NULL);
VERIFY(m->m_flags & M_PKTHDR);
- t = m_pftag(m);
+ pkt = &m->m_pkthdr;
+ VERIFY(pkt->pkt_flags & PKTF_PRIV_GUARDED);
if (!purge) {
/* calculate EWMA of dequeues */
if (net_timerisset(&sp->sfb_getqtime)) {
struct timespec delta;
u_int64_t avg, new;
-
net_timersub(&now, &sp->sfb_getqtime, &delta);
net_timernsec(&delta, &new);
avg = sp->sfb_stats.dequeue_avg;
/*
* If the time since last dequeue is
* significantly greater than the current
- * average, weight the average more against
+ * average, weigh the average more against
* the old value.
*/
if (DEQUEUE_SPIKE(new, avg))
*(&sp->sfb_getqtime) = *(&now);
}
+ if (!purge && SFB_QUEUE_DELAYBASED(sp)) {
+ u_int64_t dequeue_ns, queue_delay = 0;
+ net_timernsec(&now, &dequeue_ns);
+ if (dequeue_ns > pkt->pkt_enqueue_ts)
+ queue_delay = dequeue_ns - pkt->pkt_enqueue_ts;
+
+ if (sp->sfb_min_qdelay == 0 ||
+ (queue_delay > 0 && queue_delay < sp->sfb_min_qdelay))
+ sp->sfb_min_qdelay = queue_delay;
+ if (net_timercmp(&now, &sp->sfb_update_time, >=)) {
+ if (sp->sfb_min_qdelay > sp->sfb_target_qdelay) {
+ if (!SFB_IS_DELAYHIGH(sp))
+ SFB_SET_DELAY_HIGH(sp, q);
+ } else {
+ sp->sfb_flags &= ~(SFBF_DELAYHIGH);
+ sp->sfb_fc_threshold = 0;
+
+ }
+ net_timeradd(&now, &sp->sfb_update_interval,
+ &sp->sfb_update_time);
+ sp->sfb_min_qdelay = 0;
+ }
+ }
+
/*
* Clearpkts are the ones which were in the queue when the hash
* function was perturbed. Since the perturbation value (fudge),
* this reason. A rule of thumb is to set it to K*D, where D is
* the time taken to drain queue.
*/
- if (t->pftag_flags & SFB_PKT_PBOX) {
- t->pftag_flags &= ~SFB_PKT_PBOX;
+ if (pkt->pkt_sfb_flags & SFB_PKT_PBOX) {
+ pkt->pkt_sfb_flags &= ~SFB_PKT_PBOX;
if (sp->sfb_clearpkts > 0)
sp->sfb_clearpkts--;
} else if (sp->sfb_clearpkts > 0) {
sp->sfb_clearpkts--;
} else {
- sfb_dq_update_bins(sp, t, &now);
+ sfb_dq_update_bins(sp, pkt, &now, qsize(q));
+ }
+
+ /* See comments in <rdar://problem/14040693> */
+ pkt->pkt_flags &= ~PKTF_PRIV_GUARDED;
+
+ /*
+ * If the queue becomes empty before the update interval, reset
+ * the flow control threshold
+ */
+ if (qsize(q) == 0) {
+ sp->sfb_flags &= ~SFBF_DELAYHIGH;
+ sp->sfb_min_qdelay = 0;
+ sp->sfb_fc_threshold = 0;
+ net_timerclear(&sp->sfb_update_time);
}
return (m);
VERIFY(ifp != NULL);
switch (ev) {
- case CLASSQ_EV_LINK_SPEED: {
+ case CLASSQ_EV_LINK_BANDWIDTH: {
u_int64_t eff_rate = ifnet_output_linkrate(ifp);
/* update parameters only if rate has changed */
}
sfb_calc_holdtime(sp, eff_rate);
sfb_calc_pboxtime(sp, eff_rate);
+ sfb_calc_target_qdelay(sp, eff_rate);
+ sfb_calc_update_interval(sp, eff_rate);
break;
}
sfb_resetq(sp, ev);
break;
+ case CLASSQ_EV_LINK_LATENCY:
case CLASSQ_EV_LINK_MTU:
default:
break;