/*
- * Copyright (c) 2016-2017 Apple Inc. All rights reserved.
+ * Copyright (c) 2016-2020 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <net/classq/classq_fq_codel.h>
#include <net/pktsched/pktsched_fq_codel.h>
-static size_t fq_if_size;
-static struct zone *fq_if_zone;
+static ZONE_DECLARE(fq_if_zone, "pktsched_fq_if", sizeof(fq_if_t), ZC_ZFREE_CLEARMEM);
static fq_if_t *fq_if_alloc(struct ifnet *, classq_pkt_type_t);
static void fq_if_destroy(fq_if_t *fqs);
-static void fq_if_classq_init(fq_if_t *fqs, u_int32_t priority,
- u_int32_t quantum, u_int32_t drr_max, u_int32_t svc_class);
-static int fq_if_enqueue_classq(struct ifclassq *ifq, void *p,
- classq_pkt_type_t ptype, boolean_t *pdrop);
-static void *fq_if_dequeue_classq(struct ifclassq *, classq_pkt_type_t *);
-static int fq_if_dequeue_classq_multi(struct ifclassq *, u_int32_t,
- u_int32_t, void **, void **, u_int32_t *, u_int32_t *, classq_pkt_type_t *);
-static void *fq_if_dequeue_sc_classq(struct ifclassq *, mbuf_svc_class_t,
- classq_pkt_type_t *);
-static int fq_if_dequeue_sc_classq_multi(struct ifclassq *,
- mbuf_svc_class_t, u_int32_t, u_int32_t, void **,
- void **, u_int32_t *, u_int32_t *, classq_pkt_type_t *);
-static void fq_if_dequeue(fq_if_t *, fq_if_classq_t *, u_int32_t,
- u_int32_t, void **, void **, u_int32_t *, u_int32_t *,
- boolean_t drvmgmt, classq_pkt_type_t *);
-static int fq_if_request_classq(struct ifclassq *ifq, cqrq_t op, void *arg);
+static void fq_if_classq_init(fq_if_t *fqs, uint32_t priority,
+ uint16_t quantum, uint32_t drr_max, uint32_t svc_class);
+static void fq_if_dequeue(fq_if_t *, fq_if_classq_t *, uint32_t,
+ int64_t, classq_pkt_t *, classq_pkt_t *, uint32_t *,
+ uint32_t *, boolean_t drvmgmt);
void fq_if_stat_sc(fq_if_t *fqs, cqrq_stat_sc_t *stat);
static void fq_if_purge(fq_if_t *);
static void fq_if_purge_classq(fq_if_t *, fq_if_classq_t *);
static void fq_if_empty_old_flow(fq_if_t *fqs, fq_if_classq_t *fq_cl,
fq_t *fq, bool remove_hash);
-#define FQ_IF_ZONE_MAX 32 /* Maximum elements in zone */
-#define FQ_IF_ZONE_NAME "pktsched_fq_if" /* zone for fq_if class */
-
-#define FQ_IF_FLOW_HASH_ID(_flowid_) \
+#define FQ_IF_FLOW_HASH_ID(_flowid_) \
(((_flowid_) >> FQ_IF_HASH_TAG_SHIFT) & FQ_IF_HASH_TAG_MASK)
-#define FQ_IF_CLASSQ_IDLE(_fcl_) \
+#define FQ_IF_CLASSQ_IDLE(_fcl_) \
(STAILQ_EMPTY(&(_fcl_)->fcl_new_flows) && \
STAILQ_EMPTY(&(_fcl_)->fcl_old_flows))
-typedef void (* fq_if_append_pkt_t)(void *, void *);
+typedef void (* fq_if_append_pkt_t)(classq_pkt_t *, classq_pkt_t *);
typedef boolean_t (* fq_getq_flow_t)(fq_if_t *, fq_if_classq_t *, fq_t *,
- u_int32_t, u_int32_t, void **, void **, u_int32_t *, u_int32_t *,
- boolean_t *, u_int32_t);
+ int64_t, u_int32_t, classq_pkt_t *, classq_pkt_t *, u_int32_t *,
+ u_int32_t *, boolean_t *, u_int32_t);
static void
-fq_if_append_mbuf(void *pkt, void *next_pkt)
+fq_if_append_mbuf(classq_pkt_t *pkt, classq_pkt_t *next_pkt)
{
- ((mbuf_t)pkt)->m_nextpkt = (mbuf_t)next_pkt;
+ pkt->cp_mbuf->m_nextpkt = next_pkt->cp_mbuf;
}
static boolean_t
fq_getq_flow_mbuf(fq_if_t *fqs, fq_if_classq_t *fq_cl, fq_t *fq,
- u_int32_t byte_limit, u_int32_t pkt_limit, void **top, void **last,
- u_int32_t *byte_cnt, u_int32_t *pkt_cnt, boolean_t *qempty,
- u_int32_t pflags)
+ int64_t byte_limit, u_int32_t pkt_limit, classq_pkt_t *top,
+ classq_pkt_t *last, u_int32_t *byte_cnt, u_int32_t *pkt_cnt,
+ boolean_t *qempty, u_int32_t pflags)
{
- struct mbuf *m;
u_int32_t plen;
pktsched_pkt_t pkt;
boolean_t limit_reached = FALSE;
while (fq->fq_deficit > 0 && limit_reached == FALSE &&
!MBUFQ_EMPTY(&fq->fq_mbufq)) {
-
_PKTSCHED_PKT_INIT(&pkt);
- m = fq_getq_flow(fqs, fq, &pkt);
+ fq_getq_flow(fqs, fq, &pkt);
ASSERT(pkt.pktsched_ptype == QP_MBUF);
plen = pktsched_get_pkt_len(&pkt);
fq->fq_deficit -= plen;
- m->m_pkthdr.pkt_flags |= pflags;
+ pkt.pktsched_pkt_mbuf->m_pkthdr.pkt_flags |= pflags;
- if (*top == NULL) {
- *top = m;
+ if (top->cp_mbuf == NULL) {
+ *top = pkt.pktsched_pkt;
} else {
- ASSERT(*last != NULL);
- ASSERT((*(struct mbuf **)last)->m_nextpkt == NULL);
- (*(struct mbuf **)last)->m_nextpkt = m;
+ ASSERT(last->cp_mbuf != NULL);
+ ASSERT(last->cp_mbuf->m_nextpkt == NULL);
+ last->cp_mbuf->m_nextpkt = pkt.pktsched_pkt_mbuf;
}
- *last = m;
- (*(mbuf_t *)last)->m_nextpkt = NULL;
+ *last = pkt.pktsched_pkt;
+ last->cp_mbuf->m_nextpkt = NULL;
fq_cl->fcl_stat.fcl_dequeue++;
fq_cl->fcl_stat.fcl_dequeue_bytes += plen;
*pkt_cnt += 1;
*byte_cnt += plen;
- ifclassq_set_packet_metadata(ifq, ifp, m, QP_MBUF);
+ ifclassq_set_packet_metadata(ifq, ifp, &pkt.pktsched_pkt);
/* Check if the limit is reached */
- if (*pkt_cnt >= pkt_limit || *byte_cnt >= byte_limit)
+ if (*pkt_cnt >= pkt_limit || *byte_cnt >= byte_limit) {
limit_reached = TRUE;
+ }
}
*qempty = MBUFQ_EMPTY(&fq->fq_mbufq);
- return (limit_reached);
-}
-
-void
-fq_codel_scheduler_init(void)
-{
- /* Initialize the zone for flow queue structures */
- fq_codel_init();
-
- fq_if_size = sizeof (fq_if_t);
- fq_if_zone = zinit(fq_if_size, (FQ_IF_ZONE_MAX * fq_if_size), 0,
- FQ_IF_ZONE_NAME);
- if (fq_if_zone == NULL) {
- panic("%s: failed allocating from %s", __func__,
- (FQ_IF_ZONE_NAME));
- }
- zone_change(fq_if_zone, Z_EXPAND, TRUE);
- zone_change(fq_if_zone, Z_CALLERACCT, TRUE);
-
+ return limit_reached;
}
fq_if_t *
fq_if_alloc(struct ifnet *ifp, classq_pkt_type_t ptype)
{
fq_if_t *fqs;
- fqs = zalloc(fq_if_zone);
- if (fqs == NULL)
- return (NULL);
- bzero(fqs, fq_if_size);
+ fqs = zalloc_flags(fq_if_zone, Z_WAITOK | Z_ZERO);
fqs->fqs_ifq = &ifp->if_snd;
fqs->fqs_ptype = ptype;
/* Configure packet drop limit across all queues */
fqs->fqs_pkt_droplimit = IFCQ_PKT_DROP_LIMIT(&ifp->if_snd);
STAILQ_INIT(&fqs->fqs_fclist);
- return (fqs);
+ return fqs;
}
void
zfree(fq_if_zone, fqs);
}
-static inline u_int32_t
+static inline uint8_t
fq_if_service_to_priority(fq_if_t *fqs, mbuf_svc_class_t svc)
{
- u_int32_t pri;
+ uint8_t pri;
if (fqs->fqs_flags & FQS_DRIVER_MANAGED) {
switch (svc) {
pri = FQ_IF_BE_INDEX; /* Use best effort by default */
break;
}
- return (pri);
+ return pri;
}
/* scheduler is not managed by the driver */
pri = FQ_IF_BE_INDEX; /* Use best effort by default */
break;
}
- return (pri);
+ return pri;
}
-void
-fq_if_classq_init(fq_if_t *fqs, u_int32_t pri, u_int32_t quantum,
- u_int32_t drr_max, u_int32_t svc_class)
+static void
+fq_if_classq_init(fq_if_t *fqs, uint32_t pri, uint16_t quantum,
+ uint32_t drr_max, uint32_t svc_class)
{
fq_if_classq_t *fq_cl;
-
+ VERIFY(pri < FQ_IF_MAX_CLASSES);
fq_cl = &fqs->fqs_classq[pri];
- VERIFY(pri >= 0 && pri < FQ_IF_MAX_CLASSES &&
- fq_cl->fcl_quantum == 0);
+ VERIFY(fq_cl->fcl_quantum == 0);
fq_cl->fcl_quantum = quantum;
fq_cl->fcl_pri = pri;
fq_cl->fcl_drr_max = drr_max;
}
int
-fq_if_enqueue_classq(struct ifclassq *ifq, void *p, classq_pkt_type_t ptype,
- boolean_t *pdrop)
+fq_if_enqueue_classq(struct ifclassq *ifq, classq_pkt_t *head,
+ classq_pkt_t *tail, uint32_t cnt, uint32_t bytes, boolean_t *pdrop)
{
- u_int32_t pri;
+ uint8_t pri;
fq_if_t *fqs;
fq_if_classq_t *fq_cl;
- int ret, len;
+ int ret;
mbuf_svc_class_t svc;
pktsched_pkt_t pkt;
- IFCQ_LOCK_ASSERT_HELD(ifq);
- if ((ptype == QP_MBUF) && !(((mbuf_t)p)->m_flags & M_PKTHDR)) {
- IFCQ_CONVERT_LOCK(ifq);
- m_freem((mbuf_t)p);
- *pdrop = TRUE;
- return (ENOBUFS);
- }
- pktsched_pkt_encap(&pkt, ptype, p);
+ pktsched_pkt_encap_chain(&pkt, head, tail, cnt, bytes);
fqs = (fq_if_t *)ifq->ifcq_disc;
svc = pktsched_get_pkt_svc(&pkt);
pri = fq_if_service_to_priority(fqs, svc);
- VERIFY(pri >= 0 && pri < FQ_IF_MAX_CLASSES);
+ VERIFY(pri < FQ_IF_MAX_CLASSES);
fq_cl = &fqs->fqs_classq[pri];
- if (svc == MBUF_SC_BK_SYS && fqs->fqs_throttle == 1) {
+ if (__improbable(svc == MBUF_SC_BK_SYS && fqs->fqs_throttle == 1)) {
/* BK_SYS is currently throttled */
- fq_cl->fcl_stat.fcl_throttle_drops++;
- IFCQ_CONVERT_LOCK(ifq);
+ atomic_add_32(&fq_cl->fcl_stat.fcl_throttle_drops, 1);
pktsched_free_pkt(&pkt);
*pdrop = TRUE;
- return (EQSUSPENDED);
+ ret = EQSUSPENDED;
+ goto done;
}
- len = pktsched_get_pkt_len(&pkt);
+ IFCQ_LOCK_SPIN(ifq);
ret = fq_addq(fqs, &pkt, fq_cl);
if (!(fqs->fqs_flags & FQS_DRIVER_MANAGED) &&
!FQ_IF_CLASSQ_IDLE(fq_cl)) {
}
}
- if (ret != 0) {
+ if (__improbable(ret != 0)) {
if (ret == CLASSQEQ_SUCCESS_FC) {
/* packet enqueued, return advisory feedback */
ret = EQFULL;
*pdrop = FALSE;
+ } else if (ret == CLASSQEQ_COMPRESSED) {
+ ret = 0;
+ *pdrop = FALSE;
} else {
+ IFCQ_UNLOCK(ifq);
*pdrop = TRUE;
- VERIFY(ret == CLASSQEQ_DROP ||
- ret == CLASSQEQ_DROP_FC ||
- ret == CLASSQEQ_DROP_SP);
pktsched_free_pkt(&pkt);
switch (ret) {
case CLASSQEQ_DROP:
- return (ENOBUFS);
+ ret = ENOBUFS;
+ goto done;
case CLASSQEQ_DROP_FC:
- return (EQFULL);
+ ret = EQFULL;
+ goto done;
case CLASSQEQ_DROP_SP:
- return (EQSUSPENDED);
+ ret = EQSUSPENDED;
+ goto done;
+ default:
+ VERIFY(0);
+ /* NOTREACHED */
+ __builtin_unreachable();
}
+ /* NOTREACHED */
+ __builtin_unreachable();
}
} else {
*pdrop = FALSE;
}
- IFCQ_INC_LEN(ifq);
- IFCQ_INC_BYTES(ifq, len);
- return (ret);
+ IFCQ_ADD_LEN(ifq, cnt);
+ IFCQ_INC_BYTES(ifq, bytes);
+ IFCQ_UNLOCK(ifq);
+done:
+ return ret;
}
-static void *
-fq_if_dequeue_classq(struct ifclassq *ifq, classq_pkt_type_t *ptype)
+void
+fq_if_dequeue_classq(struct ifclassq *ifq, classq_pkt_t *pkt)
{
- void *top;
-
(void) fq_if_dequeue_classq_multi(ifq, 1,
- CLASSQ_DEQUEUE_MAX_BYTE_LIMIT, &top, NULL, NULL, NULL, ptype);
- return (top);
+ CLASSQ_DEQUEUE_MAX_BYTE_LIMIT, pkt, NULL, NULL, NULL);
}
-static void *
+void
fq_if_dequeue_sc_classq(struct ifclassq *ifq, mbuf_svc_class_t svc,
- classq_pkt_type_t *ptype)
+ classq_pkt_t *pkt)
{
- void *top;
fq_if_t *fqs = (fq_if_t *)ifq->ifcq_disc;
+ uint32_t total_pktcnt = 0, total_bytecnt = 0;
fq_if_classq_t *fq_cl;
- u_int32_t pri;
+ uint8_t pri;
pri = fq_if_service_to_priority(fqs, svc);
fq_cl = &fqs->fqs_classq[pri];
fq_if_dequeue(fqs, fq_cl, 1, CLASSQ_DEQUEUE_MAX_BYTE_LIMIT,
- &top, NULL, NULL, NULL, TRUE, ptype);
- return (top);
+ pkt, NULL, &total_pktcnt, &total_bytecnt, TRUE);
+
+ IFCQ_XMIT_ADD(ifq, total_pktcnt, total_bytecnt);
}
int
fq_if_dequeue_classq_multi(struct ifclassq *ifq, u_int32_t maxpktcnt,
- u_int32_t maxbytecnt, void **first_packet,
- void **last_packet, u_int32_t *retpktcnt, u_int32_t *retbytecnt,
- classq_pkt_type_t *ptype)
+ u_int32_t maxbytecnt, classq_pkt_t *first_packet,
+ classq_pkt_t *last_packet, u_int32_t *retpktcnt,
+ u_int32_t *retbytecnt)
{
- void *top = NULL, *tail = NULL, *first, *last;
- u_int32_t pktcnt = 0, bytecnt = 0, total_pktcnt, total_bytecnt;
- fq_if_t *fqs;
+ u_int32_t pktcnt = 0, bytecnt = 0, total_pktcnt = 0, total_bytecnt = 0;
+ classq_pkt_t first = CLASSQ_PKT_INITIALIZER(fisrt);
+ classq_pkt_t last = CLASSQ_PKT_INITIALIZER(last);
+ classq_pkt_t tmp = CLASSQ_PKT_INITIALIZER(tmp);
+ fq_if_append_pkt_t append_pkt;
fq_if_classq_t *fq_cl;
+ fq_if_t *fqs;
int pri;
- fq_if_append_pkt_t append_pkt;
IFCQ_LOCK_ASSERT_HELD(ifq);
default:
VERIFY(0);
/* NOTREACHED */
+ __builtin_unreachable();
}
- first = last = NULL;
- total_pktcnt = total_bytecnt = 0;
- *ptype = fqs->fqs_ptype;
-
for (;;) {
- classq_pkt_type_t tmp_ptype;
+ classq_pkt_t top = CLASSQ_PKT_INITIALIZER(top);
+ classq_pkt_t tail = CLASSQ_PKT_INITIALIZER(tail);
+
if (fqs->fqs_bitmaps[FQ_IF_ER] == 0 &&
fqs->fqs_bitmaps[FQ_IF_EB] == 0) {
fqs->fqs_bitmaps[FQ_IF_EB] = fqs->fqs_bitmaps[FQ_IF_IB];
fqs->fqs_bitmaps[FQ_IF_IB] = 0;
- if (fqs->fqs_bitmaps[FQ_IF_EB] == 0)
+ if (fqs->fqs_bitmaps[FQ_IF_EB] == 0) {
break;
+ }
}
pri = pktsched_ffs(fqs->fqs_bitmaps[FQ_IF_ER]);
if (pri == 0) {
fq_cl->fcl_budget += (min(fq_cl->fcl_drr_max,
fq_cl->fcl_stat.fcl_flows_cnt) *
fq_cl->fcl_quantum);
- if (fq_cl->fcl_budget <= 0)
+ if (fq_cl->fcl_budget <= 0) {
goto state_change;
+ }
}
fq_if_dequeue(fqs, fq_cl, (maxpktcnt - total_pktcnt),
(maxbytecnt - total_bytecnt), &top, &tail, &pktcnt,
- &bytecnt, FALSE, &tmp_ptype);
- if (top != NULL) {
- ASSERT(tmp_ptype == *ptype);
+ &bytecnt, FALSE);
+ if (top.cp_mbuf != NULL) {
ASSERT(pktcnt > 0 && bytecnt > 0);
- if (first == NULL) {
+ if (first.cp_mbuf == NULL) {
first = top;
- last = tail;
total_pktcnt = pktcnt;
total_bytecnt = bytecnt;
} else {
- append_pkt(last, top);
- last = tail;
+ ASSERT(last.cp_mbuf != NULL);
+ append_pkt(&last, &top);
total_pktcnt += pktcnt;
total_bytecnt += bytecnt;
}
- append_pkt(last, NULL);
+ last = tail;
+ append_pkt(&last, &tmp);
fq_cl->fcl_budget -= bytecnt;
pktcnt = 0;
bytecnt = 0;
pktsched_bit_clr(pri, &fqs->fqs_bitmaps[FQ_IF_ER]);
VERIFY(((fqs->fqs_bitmaps[FQ_IF_ER] |
fqs->fqs_bitmaps[FQ_IF_EB] |
- fqs->fqs_bitmaps[FQ_IF_IB])&(1 << pri)) == 0);
+ fqs->fqs_bitmaps[FQ_IF_IB]) & (1 << pri)) == 0);
fq_cl->fcl_budget = 0;
}
- if (total_pktcnt >= maxpktcnt || total_bytecnt >= maxbytecnt)
+ if (total_pktcnt >= maxpktcnt || total_bytecnt >= maxbytecnt) {
break;
+ }
}
- if (first != NULL) {
- if (first_packet != NULL)
- *first_packet = first;
- if (last_packet != NULL)
- *last_packet = last;
- if (retpktcnt != NULL)
- *retpktcnt = total_pktcnt;
- if (retbytecnt != NULL)
- *retbytecnt = total_bytecnt;
- IFCQ_XMIT_ADD(ifq, total_pktcnt, total_bytecnt);
- } else {
- if (first_packet != NULL)
- *first_packet = NULL;
- if (last_packet != NULL)
- *last_packet = NULL;
- if (retpktcnt != NULL)
- *retpktcnt = 0;
- if (retbytecnt != NULL)
- *retbytecnt = 0;
- }
- return (0);
+
+ if (__probable(first_packet != NULL)) {
+ *first_packet = first;
+ }
+ if (last_packet != NULL) {
+ *last_packet = last;
+ }
+ if (retpktcnt != NULL) {
+ *retpktcnt = total_pktcnt;
+ }
+ if (retbytecnt != NULL) {
+ *retbytecnt = total_bytecnt;
+ }
+
+ IFCQ_XMIT_ADD(ifq, total_pktcnt, total_bytecnt);
+ return 0;
}
int
fq_if_dequeue_sc_classq_multi(struct ifclassq *ifq, mbuf_svc_class_t svc,
- u_int32_t maxpktcnt, u_int32_t maxbytecnt, void **first_packet,
- void **last_packet, u_int32_t *retpktcnt, u_int32_t *retbytecnt,
- classq_pkt_type_t *ptype)
+ u_int32_t maxpktcnt, u_int32_t maxbytecnt, classq_pkt_t *first_packet,
+ classq_pkt_t *last_packet, u_int32_t *retpktcnt, u_int32_t *retbytecnt)
{
-#pragma unused(maxpktcnt, maxbytecnt, first_packet, last_packet, retpktcnt, retbytecnt)
fq_if_t *fqs = (fq_if_t *)ifq->ifcq_disc;
- u_int32_t pri;
+ uint8_t pri;
u_int32_t total_pktcnt = 0, total_bytecnt = 0;
fq_if_classq_t *fq_cl;
- void *first = NULL, *last = NULL;
+ classq_pkt_t first = CLASSQ_PKT_INITIALIZER(fisrt);
+ classq_pkt_t last = CLASSQ_PKT_INITIALIZER(last);
fq_if_append_pkt_t append_pkt;
switch (fqs->fqs_ptype) {
default:
VERIFY(0);
/* NOTREACHED */
+ __builtin_unreachable();
}
pri = fq_if_service_to_priority(fqs, svc);
fq_cl = &fqs->fqs_classq[pri];
-
/*
* Now we have the queue for a particular service class. We need
* to dequeue as many packets as needed, first from the new flows
*/
while (total_pktcnt < maxpktcnt && total_bytecnt < maxbytecnt &&
fq_cl->fcl_stat.fcl_pkt_cnt > 0) {
- void *top, *tail;
+ classq_pkt_t top = CLASSQ_PKT_INITIALIZER(top);
+ classq_pkt_t tail = CLASSQ_PKT_INITIALIZER(tail);
u_int32_t pktcnt = 0, bytecnt = 0;
+
fq_if_dequeue(fqs, fq_cl, (maxpktcnt - total_pktcnt),
(maxbytecnt - total_bytecnt), &top, &tail, &pktcnt,
- &bytecnt, TRUE, ptype);
- if (first == NULL) {
- first = top;
- total_pktcnt = pktcnt;
- total_bytecnt = bytecnt;
- } else {
- append_pkt(last, top);
- total_pktcnt += pktcnt;
- total_bytecnt += bytecnt;
+ &bytecnt, TRUE);
+ if (top.cp_mbuf != NULL) {
+ if (first.cp_mbuf == NULL) {
+ first = top;
+ total_pktcnt = pktcnt;
+ total_bytecnt = bytecnt;
+ } else {
+ ASSERT(last.cp_mbuf != NULL);
+ append_pkt(&last, &top);
+ total_pktcnt += pktcnt;
+ total_bytecnt += bytecnt;
+ }
+ last = tail;
}
- last = tail;
- }
- if (first != NULL) {
- if (first_packet != NULL)
- *first_packet = first;
- if (last_packet != NULL)
- *last_packet = last;
- if (retpktcnt != NULL)
- *retpktcnt = total_pktcnt;
- if (retbytecnt != NULL)
- *retbytecnt = total_bytecnt;
- } else {
- if (first_packet != NULL)
- *first_packet = NULL;
- if (last_packet != NULL)
- *last_packet = NULL;
- if (retpktcnt != NULL)
- *retpktcnt = 0;
- if (retbytecnt != NULL)
- *retbytecnt = 0;
- }
- return (0);
+ }
+
+ if (__probable(first_packet != NULL)) {
+ *first_packet = first;
+ }
+ if (last_packet != NULL) {
+ *last_packet = last;
+ }
+ if (retpktcnt != NULL) {
+ *retpktcnt = total_pktcnt;
+ }
+ if (retbytecnt != NULL) {
+ *retbytecnt = total_bytecnt;
+ }
+
+ IFCQ_XMIT_ADD(ifq, total_pktcnt, total_bytecnt);
+
+ return 0;
}
static void
fq_cl = &fqs->fqs_classq[fq->fq_sc_index];
pkts = bytes = 0;
_PKTSCHED_PKT_INIT(&pkt);
- while (fq_getq_flow(fqs, fq, &pkt) != NULL) {
+ for (;;) {
+ fq_getq_flow(fqs, fq, &pkt);
+ if (pkt.pktsched_pkt_mbuf == NULL) {
+ VERIFY(pkt.pktsched_ptype == QP_INVALID);
+ break;
+ }
pkts++;
bytes += pktsched_get_pkt_len(&pkt);
pktsched_free_pkt(&pkt);
&fqs->fqs_bitmaps[i]);
}
}
- if (pktsp != NULL)
+ if (pktsp != NULL) {
*pktsp = pkts;
- if (bytesp != NULL)
+ }
+ if (bytesp != NULL) {
*bytesp = bytes;
+ }
}
static void
VERIFY(SLIST_EMPTY(&fqs->fqs_flows[i]));
}
- bzero(&fqs->fqs_bitmaps, sizeof (fqs->fqs_bitmaps));
+ bzero(&fqs->fqs_bitmaps, sizeof(fqs->fqs_bitmaps));
IFCQ_LEN(fqs->fqs_ifq) = 0;
IFCQ_BYTES(fqs->fqs_ifq) = 0;
/* packet type is needed only if we want to create a flow queue */
fq = fq_if_hash_pkt(fqs, req->flow, req->sc, 0, FALSE, QP_INVALID);
- if (fq != NULL)
+ if (fq != NULL) {
fq_if_purge_flow(fqs, fq, &req->packets, &req->bytes);
+ }
}
static void
fq_if_throttle(fq_if_t *fqs, cqrq_throttle_t *tr)
{
struct ifclassq *ifq = fqs->fqs_ifq;
- int index;
+ uint8_t index;
#if !MACH_ASSERT
#pragma unused(ifq)
#endif
if (!tr->set) {
tr->level = fqs->fqs_throttle;
- return (0);
+ return 0;
}
- if (tr->level == fqs->fqs_throttle)
- return (EALREADY);
+ if (tr->level == fqs->fqs_throttle) {
+ return EALREADY;
+ }
/* Throttling is allowed on BK_SYS class only */
index = fq_if_service_to_priority(fqs, MBUF_SC_BK_SYS);
default:
break;
}
- return (0);
+ return 0;
}
void
fq_if_stat_sc(fq_if_t *fqs, cqrq_stat_sc_t *stat)
{
- u_int32_t pri;
+ uint8_t pri;
fq_if_classq_t *fq_cl;
- if (stat == NULL)
+ if (stat == NULL) {
return;
+ }
pri = fq_if_service_to_priority(fqs, stat->sc);
fq_cl = &fqs->fqs_classq[pri];
- stat->packets = fq_cl->fcl_stat.fcl_pkt_cnt;
- stat->bytes = fq_cl->fcl_stat.fcl_byte_cnt;
+ stat->packets = (uint32_t)fq_cl->fcl_stat.fcl_pkt_cnt;
+ stat->bytes = (uint32_t)fq_cl->fcl_stat.fcl_byte_cnt;
}
int
fq_if_stat_sc(fqs, (cqrq_stat_sc_t *)arg);
break;
}
- return (err);
+ return err;
}
int
VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE);
fqs = fq_if_alloc(ifp, ptype);
- if (fqs == NULL)
- return (ENOMEM);
+ if (fqs == NULL) {
+ return ENOMEM;
+ }
if (flags & PKTSCHEDF_QALG_DRIVER_MANAGED) {
fqs->fqs_flags |= FQS_DRIVER_MANAGED;
8, MBUF_SC_CTL);
}
- err = ifclassq_attach(ifq, PKTSCHEDT_FQ_CODEL, fqs,
- fq_if_enqueue_classq, fq_if_dequeue_classq,
- fq_if_dequeue_sc_classq, fq_if_dequeue_classq_multi,
- fq_if_dequeue_sc_classq_multi, fq_if_request_classq);
+ err = ifclassq_attach(ifq, PKTSCHEDT_FQ_CODEL, fqs);
if (err != 0) {
printf("%s: error from ifclassq_attach, "
"failed to attach fq_if: %d\n", __func__, err);
fq_if_destroy(fqs);
}
- return (err);
+ return err;
}
fq_t *
SLIST_FOREACH(fq, fq_list, fq_hashlink) {
if (fq->fq_flowhash == flowid &&
- fq->fq_sc_index == scidx)
+ fq->fq_sc_index == scidx) {
break;
+ }
}
if (fq == NULL && create == TRUE) {
ASSERT(ptype == QP_MBUF);
* If getq time is not set because this is the first packet or after
* idle time, set it now so that we can detect a stall.
*/
- if (fq != NULL && fq->fq_getqtime == 0)
+ if (fq != NULL && fq->fq_getqtime == 0) {
fq->fq_getqtime = now;
+ }
- return (fq);
+ return fq;
}
void
fq_cl->fcl_stat.fcl_flows_cnt--;
IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
fq_destroy(fq);
-
}
inline boolean_t
fq_if_at_drop_limit(fq_if_t *fqs)
{
- return (((IFCQ_LEN(fqs->fqs_ifq) >= fqs->fqs_pkt_droplimit) ?
- TRUE : FALSE));
+ return (IFCQ_LEN(fqs->fqs_ifq) >= fqs->fqs_pkt_droplimit) ?
+ TRUE : FALSE;
}
static void
fq_t *fq = fqs->fqs_large_flow;
fq_if_classq_t *fq_cl;
pktsched_pkt_t pkt;
- uint32_t *pkt_flags;
+ volatile uint32_t *pkt_flags;
uint64_t *pkt_timestamp;
- if (fq == NULL)
+ if (fq == NULL) {
return;
+ }
/* queue can not be empty on the largest flow */
VERIFY(!fq_empty(fq));
fq_cl = &fqs->fqs_classq[fq->fq_sc_index];
_PKTSCHED_PKT_INIT(&pkt);
- (void)fq_getq_flow_internal(fqs, fq, &pkt);
+ fq_getq_flow_internal(fqs, fq, &pkt);
+ ASSERT(pkt.pktsched_ptype != QP_INVALID);
pktsched_get_pkt_vars(&pkt, &pkt_flags, &pkt_timestamp, NULL, NULL,
NULL, NULL);
IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
*pkt_timestamp = 0;
- if (pkt.pktsched_ptype == QP_MBUF)
+ switch (pkt.pktsched_ptype) {
+ case QP_MBUF:
*pkt_flags &= ~PKTF_PRIV_GUARDED;
+ break;
+ default:
+ VERIFY(0);
+ /* NOTREACHED */
+ __builtin_unreachable();
+ }
if (fq_empty(fq)) {
fqs->fqs_large_flow = NULL;
fq_t *prev_fq;
if (fqs->fqs_large_flow != NULL &&
- fqs->fqs_large_flow->fq_bytes < FQ_IF_LARGE_FLOW_BYTE_LIMIT)
+ fqs->fqs_large_flow->fq_bytes < FQ_IF_LARGE_FLOW_BYTE_LIMIT) {
fqs->fqs_large_flow = NULL;
+ }
- if (fq == NULL || fq->fq_bytes < FQ_IF_LARGE_FLOW_BYTE_LIMIT)
+ if (fq == NULL || fq->fq_bytes < FQ_IF_LARGE_FLOW_BYTE_LIMIT) {
return;
+ }
prev_fq = fqs->fqs_large_flow;
if (prev_fq == NULL) {
- if (!fq_empty(fq))
+ if (!fq_empty(fq)) {
fqs->fqs_large_flow = fq;
+ }
return;
} else if (fq->fq_bytes > prev_fq->fq_bytes) {
fqs->fqs_large_flow = fq;
if ((uint8_t)fce->fce_flowsrc_type == flowsrc &&
fce->fce_flowid == flowid) {
/* Already on flowcontrol list */
- return (TRUE);
+ return TRUE;
}
}
IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
STAILQ_INSERT_TAIL(&fqs->fqs_fclist, fce, fce_link);
fq_cl->fcl_stat.fcl_flow_control++;
}
- return ((fce != NULL) ? TRUE : FALSE);
+ return (fce != NULL) ? TRUE : FALSE;
}
void
IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
STAILQ_FOREACH(fce, &fqs->fqs_fclist, fce_link) {
- if (fce->fce_flowid == fq->fq_flowhash)
+ if (fce->fce_flowid == fq->fq_flowhash) {
break;
+ }
}
if (fce != NULL) {
STAILQ_REMOVE(&fqs->fqs_fclist, fce, flowadv_fcentry,
}
void
-fq_if_dequeue(fq_if_t *fqs, fq_if_classq_t *fq_cl, u_int32_t pktlimit,
- u_int32_t bytelimit, void **top, void **tail,
- u_int32_t *retpktcnt, u_int32_t *retbytecnt, boolean_t drvmgmt,
- classq_pkt_type_t *ptype)
+fq_if_dequeue(fq_if_t *fqs, fq_if_classq_t *fq_cl, uint32_t pktlimit,
+ int64_t bytelimit, classq_pkt_t *top, classq_pkt_t *tail,
+ uint32_t *retpktcnt, uint32_t *retbytecnt, boolean_t drvmgmt)
{
fq_t *fq = NULL, *tfq = NULL;
flowq_stailq_t temp_stailq;
u_int32_t pktcnt, bytecnt;
boolean_t qempty, limit_reached = FALSE;
- void *last = NULL;
+ classq_pkt_t last = CLASSQ_PKT_INITIALIZER(last);
fq_getq_flow_t fq_getq_flow_fn;
switch (fqs->fqs_ptype) {
default:
VERIFY(0);
/* NOTREACHED */
+ __builtin_unreachable();
}
/*
* maximum byte limit should not be greater than the budget for
* this class
*/
- if ((int32_t)bytelimit > fq_cl->fcl_budget && !drvmgmt)
+ if (bytelimit > fq_cl->fcl_budget && !drvmgmt) {
bytelimit = fq_cl->fcl_budget;
+ }
VERIFY(pktlimit > 0 && bytelimit > 0 && top != NULL);
-
- *top = NULL;
- *ptype = fqs->fqs_ptype;
pktcnt = bytecnt = 0;
STAILQ_INIT(&temp_stailq);
STAILQ_FOREACH_SAFE(fq, &fq_cl->fcl_new_flows, fq_actlink, tfq) {
- ASSERT((fq->fq_flags & (FQF_NEW_FLOW|FQF_OLD_FLOW)) ==
+ ASSERT((fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW)) ==
FQF_NEW_FLOW);
limit_reached = fq_getq_flow_fn(fqs, fq_cl, fq, bytelimit,
pktlimit, top, &last, &bytecnt, &pktcnt, &qempty,
PKTF_NEW_FLOW);
- if (fq->fq_deficit <= 0 || qempty)
+ if (fq->fq_deficit <= 0 || qempty) {
fq_if_empty_new_flow(fq, fq_cl, true);
+ }
fq->fq_deficit += fq_cl->fcl_quantum;
- if (limit_reached)
+ if (limit_reached) {
goto done;
+ }
}
STAILQ_FOREACH_SAFE(fq, &fq_cl->fcl_old_flows, fq_actlink, tfq) {
- VERIFY((fq->fq_flags & (FQF_NEW_FLOW|FQF_OLD_FLOW)) ==
+ VERIFY((fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW)) ==
FQF_OLD_FLOW);
limit_reached = fq_getq_flow_fn(fqs, fq_cl, fq, bytelimit,
STAILQ_INSERT_TAIL(&temp_stailq, fq, fq_actlink);
fq->fq_deficit += fq_cl->fcl_quantum;
}
- if (limit_reached)
+ if (limit_reached) {
break;
+ }
}
done:
fq_cl->fcl_old_flows = temp_stailq;
}
- if (last != NULL) {
- VERIFY(*top != NULL);
- if (tail != NULL)
+ if (last.cp_mbuf != NULL) {
+ VERIFY(top->cp_mbuf != NULL);
+ if (tail != NULL) {
*tail = last;
- if (retpktcnt != NULL)
+ }
+ if (retpktcnt != NULL) {
*retpktcnt = pktcnt;
- if (retbytecnt != NULL)
+ }
+ if (retbytecnt != NULL) {
*retbytecnt = bytecnt;
+ }
}
}
-int
+void
fq_if_teardown_ifclassq(struct ifclassq *ifq)
{
fq_if_t *fqs = (fq_if_t *)ifq->ifcq_disc;
fq_if_destroy(fqs);
ifq->ifcq_disc = NULL;
- return (ifclassq_detach(ifq));
+ ifclassq_detach(ifq);
}
static void
fq_export_flowstats(fq_if_t *fqs, fq_t *fq,
struct fq_codel_flowstats *flowstat)
{
- bzero(flowstat, sizeof (*flowstat));
- flowstat->fqst_min_qdelay = fq->fq_min_qdelay;
+ bzero(flowstat, sizeof(*flowstat));
+ flowstat->fqst_min_qdelay = (uint32_t)fq->fq_min_qdelay;
flowstat->fqst_bytes = fq->fq_bytes;
flowstat->fqst_flowhash = fq->fq_flowhash;
- if (fq->fq_flags & FQF_NEW_FLOW)
+ if (fq->fq_flags & FQF_NEW_FLOW) {
flowstat->fqst_flags |= FQ_FLOWSTATS_NEW_FLOW;
- if (fq->fq_flags & FQF_OLD_FLOW)
+ }
+ if (fq->fq_flags & FQF_OLD_FLOW) {
flowstat->fqst_flags |= FQ_FLOWSTATS_OLD_FLOW;
- if (fq->fq_flags & FQF_DELAY_HIGH)
+ }
+ if (fq->fq_flags & FQF_DELAY_HIGH) {
flowstat->fqst_flags |= FQ_FLOWSTATS_DELAY_HIGH;
- if (fq->fq_flags & FQF_FLOWCTL_ON)
+ }
+ if (fq->fq_flags & FQF_FLOWCTL_ON) {
flowstat->fqst_flags |= FQ_FLOWSTATS_FLOWCTL_ON;
- if (fqs->fqs_large_flow == fq)
+ }
+ if (fqs->fqs_large_flow == fq) {
flowstat->fqst_flags |= FQ_FLOWSTATS_LARGE_FLOW;
+ }
}
int
fq_t *fq = NULL;
u_int32_t i, flowstat_cnt;
- if (qid >= FQ_IF_MAX_CLASSES)
- return (EINVAL);
+ if (qid >= FQ_IF_MAX_CLASSES) {
+ return EINVAL;
+ }
fqs = (fq_if_t *)ifq->ifcq_disc;
fcls = &ifqs->ifqs_fq_codel_stats;
fcls->fcls_throttle_off = fq_cl->fcl_stat.fcl_throttle_off;
fcls->fcls_throttle_drops = fq_cl->fcl_stat.fcl_throttle_drops;
fcls->fcls_dup_rexmts = fq_cl->fcl_stat.fcl_dup_rexmts;
+ fcls->fcls_pkts_compressible = fq_cl->fcl_stat.fcl_pkts_compressible;
+ fcls->fcls_pkts_compressed = fq_cl->fcl_stat.fcl_pkts_compressed;
/* Gather per flow stats */
flowstat_cnt = min((fcls->fcls_newflows_cnt +
fcls->fcls_oldflows_cnt), FQ_IF_MAX_FLOWSTATS);
i = 0;
STAILQ_FOREACH(fq, &fq_cl->fcl_new_flows, fq_actlink) {
- if (i >= fcls->fcls_newflows_cnt || i >= flowstat_cnt)
+ if (i >= fcls->fcls_newflows_cnt || i >= flowstat_cnt) {
break;
+ }
/* leave space for a few old flows */
if ((flowstat_cnt - i) < fcls->fcls_oldflows_cnt &&
- i >= (FQ_IF_MAX_FLOWSTATS >> 1))
+ i >= (FQ_IF_MAX_FLOWSTATS >> 1)) {
break;
+ }
fq_export_flowstats(fqs, fq, &fcls->fcls_flowstats[i]);
i++;
}
STAILQ_FOREACH(fq, &fq_cl->fcl_old_flows, fq_actlink) {
- if (i >= flowstat_cnt)
+ if (i >= flowstat_cnt) {
break;
+ }
fq_export_flowstats(fqs, fq, &fcls->fcls_flowstats[i]);
i++;
}
VERIFY(i <= flowstat_cnt);
fcls->fcls_flowstats_cnt = i;
- return (0);
+ return 0;
}