X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/316670eb35587141e969394ae8537d66b9211e80..e8c3f78193f1895ea514044358b93b1add9322f3:/bsd/net/classq/classq_subr.c diff --git a/bsd/net/classq/classq_subr.c b/bsd/net/classq/classq_subr.c index 738c86e23..7c93bc65e 100644 --- a/bsd/net/classq/classq_subr.c +++ b/bsd/net/classq/classq_subr.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011 Apple Inc. All rights reserved. + * Copyright (c) 2011-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -39,31 +39,31 @@ #include #include #include -#if CLASSQ_RED -#include -#endif /* CLASSQ_RED */ -#if CLASSQ_RIO -#include -#endif /* CLASSQ_RIO */ -#if CLASSQ_BLUE -#include -#endif /* CLASSQ_BLUE */ +#include #include +#include #include +#include #include -#if PF_ALTQ -#include -#endif /* PF_ALTQ */ static errno_t ifclassq_dequeue_common(struct ifclassq *, mbuf_svc_class_t, - u_int32_t, struct mbuf **, struct mbuf **, u_int32_t *, u_int32_t *, - boolean_t); -static struct mbuf *ifclassq_poll_common(struct ifclassq *, - mbuf_svc_class_t, boolean_t); -static struct mbuf *ifclassq_tbr_dequeue_common(struct ifclassq *, int, - mbuf_svc_class_t, boolean_t); + u_int32_t, u_int32_t, void **, void **, u_int32_t *, u_int32_t *, + boolean_t, classq_pkt_type_t *); +static void *ifclassq_tbr_dequeue_common(struct ifclassq *, mbuf_svc_class_t, + boolean_t, classq_pkt_type_t *); + +static u_int64_t ifclassq_target_qdelay = 0; +SYSCTL_QUAD(_net_classq, OID_AUTO, target_qdelay, CTLFLAG_RW|CTLFLAG_LOCKED, + &ifclassq_target_qdelay, "target queue delay in nanoseconds"); + +static u_int64_t ifclassq_update_interval = 0; +SYSCTL_QUAD(_net_classq, OID_AUTO, update_interval, + CTLFLAG_RW|CTLFLAG_LOCKED, &ifclassq_update_interval, + "update interval in nanoseconds"); + +static int32_t ifclassq_sched_fq_codel; void classq_init(void) @@ -72,16 +72,12 @@ classq_init(void) _CASSERT(MBUF_SC_BE == 0); _CASSERT(IFCQ_SC_MAX == MBUF_SC_MAX_CLASSES); -#if CLASSQ_RED - red_init(); -#endif /* CLASSQ_RED */ -#if CLASSQ_RIO - rio_init(); -#endif /* CLASSQ_RIO */ -#if CLASSQ_BLUE - blue_init(); -#endif /* CLASSQ_BLUE */ sfb_init(); + fq_codel_scheduler_init(); + + if (!PE_parse_boot_argn("fq_codel", &ifclassq_sched_fq_codel, + sizeof (ifclassq_sched_fq_codel))) + ifclassq_sched_fq_codel = 1; } int @@ -95,6 +91,7 @@ ifclassq_setup(struct ifnet *ifp, u_int32_t sflags, boolean_t reuse) VERIFY(IFCQ_IS_EMPTY(ifq)); ifq->ifcq_ifp = ifp; IFCQ_LEN(ifq) = 0; + IFCQ_BYTES(ifq) = 0; bzero(&ifq->ifcq_xmitcnt, sizeof (ifq->ifcq_xmitcnt)); bzero(&ifq->ifcq_dropcnt, sizeof (ifq->ifcq_dropcnt)); @@ -115,31 +112,20 @@ ifclassq_setup(struct ifnet *ifp, u_int32_t sflags, boolean_t reuse) maxlen = if_sndq_maxlen; IFCQ_SET_MAXLEN(ifq, maxlen); + if (IFCQ_MAXLEN(ifq) != if_sndq_maxlen && + IFCQ_TARGET_QDELAY(ifq) == 0) { + /* + * Choose static queues because the interface has + * maximum queue size set + */ + sflags &= ~PKTSCHEDF_QALG_DELAYBASED; + } ifq->ifcq_sflags = sflags; err = ifclassq_pktsched_setup(ifq); if (err == 0) ifq->ifcq_flags = (IFCQF_READY | IFCQF_ENABLED); } - -#if PF_ALTQ - ifq->ifcq_drain = 0; - IFCQ_ALTQ(ifq)->altq_ifcq = ifq; - VERIFY(IFCQ_ALTQ(ifq)->altq_type == ALTQT_NONE); - VERIFY(IFCQ_ALTQ(ifq)->altq_flags == 0); - VERIFY(IFCQ_ALTQ(ifq)->altq_disc == NULL); - VERIFY(IFCQ_ALTQ(ifq)->altq_enqueue == NULL); - VERIFY(IFCQ_ALTQ(ifq)->altq_dequeue == NULL); - VERIFY(IFCQ_ALTQ(ifq)->altq_dequeue_sc == NULL); - VERIFY(IFCQ_ALTQ(ifq)->altq_request == NULL); - - if ((ifp->if_eflags & IFEF_TXSTART) && - ifp->if_output_sched_model != IFNET_SCHED_MODEL_DRIVER_MANAGED) - ALTQ_SET_READY(IFCQ_ALTQ(ifq)); - else - ALTQ_CLEAR_READY(IFCQ_ALTQ(ifq)); -#endif /* PF_ALTQ */ IFCQ_UNLOCK(ifq); - return (err); } @@ -149,24 +135,6 @@ ifclassq_teardown(struct ifnet *ifp) struct ifclassq *ifq = &ifp->if_snd; IFCQ_LOCK(ifq); -#if PF_ALTQ - if (ALTQ_IS_READY(IFCQ_ALTQ(ifq))) { - if (ALTQ_IS_ENABLED(IFCQ_ALTQ(ifq))) - altq_disable(IFCQ_ALTQ(ifq)); - if (ALTQ_IS_ATTACHED(IFCQ_ALTQ(ifq))) - altq_detach(IFCQ_ALTQ(ifq)); - IFCQ_ALTQ(ifq)->altq_flags = 0; - } - ifq->ifcq_drain = 0; - IFCQ_ALTQ(ifq)->altq_ifcq = NULL; - VERIFY(IFCQ_ALTQ(ifq)->altq_type == ALTQT_NONE); - VERIFY(IFCQ_ALTQ(ifq)->altq_flags == 0); - VERIFY(IFCQ_ALTQ(ifq)->altq_disc == NULL); - VERIFY(IFCQ_ALTQ(ifq)->altq_enqueue == NULL); - VERIFY(IFCQ_ALTQ(ifq)->altq_dequeue == NULL); - VERIFY(IFCQ_ALTQ(ifq)->altq_dequeue_sc == NULL); - VERIFY(IFCQ_ALTQ(ifq)->altq_request == NULL); -#endif /* PF_ALTQ */ if (IFCQ_IS_READY(ifq)) { if (IFCQ_TBR_IS_ENABLED(ifq)) { @@ -189,6 +157,7 @@ ifclassq_teardown(struct ifnet *ifp) VERIFY(ifq->ifcq_dequeue_sc == NULL); VERIFY(ifq->ifcq_request == NULL); IFCQ_LEN(ifq) = 0; + IFCQ_BYTES(ifq) = 0; IFCQ_MAXLEN(ifq) = 0; bzero(&ifq->ifcq_xmitcnt, sizeof (ifq->ifcq_xmitcnt)); bzero(&ifq->ifcq_dropcnt, sizeof (ifq->ifcq_dropcnt)); @@ -200,6 +169,7 @@ int ifclassq_pktsched_setup(struct ifclassq *ifq) { struct ifnet *ifp = ifq->ifcq_ifp; + classq_pkt_type_t ptype = QP_MBUF; int err = 0; IFCQ_LOCK_ASSERT_HELD(ifq); @@ -207,13 +177,28 @@ ifclassq_pktsched_setup(struct ifclassq *ifq) switch (ifp->if_output_sched_model) { case IFNET_SCHED_MODEL_DRIVER_MANAGED: - err = pktsched_setup(ifq, PKTSCHEDT_TCQ, ifq->ifcq_sflags); + if (ifclassq_sched_fq_codel != 0) { + err = pktsched_setup(ifq, PKTSCHEDT_FQ_CODEL, + ifq->ifcq_sflags, ptype); + } else { + err = pktsched_setup(ifq, PKTSCHEDT_TCQ, + ifq->ifcq_sflags, ptype); + } break; case IFNET_SCHED_MODEL_NORMAL: - err = pktsched_setup(ifq, PKTSCHEDT_QFQ, ifq->ifcq_sflags); + if (ifclassq_sched_fq_codel != 0) { + err = pktsched_setup(ifq, PKTSCHEDT_FQ_CODEL, + ifq->ifcq_sflags, ptype); + } else { + err = pktsched_setup(ifq, PKTSCHEDT_QFQ, + ifq->ifcq_sflags, ptype); + } + break; + case IFNET_SCHED_MODEL_FQ_CODEL: + err = pktsched_setup(ifq, PKTSCHEDT_FQ_CODEL, + ifq->ifcq_sflags, ptype); break; - default: VERIFY(0); /* NOTREACHED */ @@ -238,135 +223,185 @@ ifclassq_get_maxlen(struct ifclassq *ifq) return (IFCQ_MAXLEN(ifq)); } -u_int32_t -ifclassq_get_len(struct ifclassq *ifq) +int +ifclassq_get_len(struct ifclassq *ifq, mbuf_svc_class_t sc, u_int32_t *packets, + u_int32_t *bytes) { - return (IFCQ_LEN(ifq)); + int err = 0; + + IFCQ_LOCK(ifq); + if (sc == MBUF_SC_UNSPEC) { + VERIFY(packets != NULL); + *packets = IFCQ_LEN(ifq); + } else { + VERIFY(MBUF_VALID_SC(sc)); + VERIFY(packets != NULL && bytes != NULL); + IFCQ_LEN_SC(ifq, sc, packets, bytes, err); + } + IFCQ_UNLOCK(ifq); + + return (err); +} + +inline void +ifclassq_set_packet_metadata(struct ifclassq *ifq, struct ifnet *ifp, + void *p, classq_pkt_type_t ptype) +{ + if (!IFNET_IS_CELLULAR(ifp)) + return; + + switch (ptype) { + case QP_MBUF: { + struct mbuf *m = p; + m->m_pkthdr.pkt_flags |= PKTF_VALID_UNSENT_DATA; + m->m_pkthdr.bufstatus_if = IFCQ_BYTES(ifq); + m->m_pkthdr.bufstatus_sndbuf = ifp->if_sndbyte_unsent; + break; + } + + + default: + VERIFY(0); + /* NOTREACHED */ + } } errno_t -ifclassq_enqueue(struct ifclassq *ifq, struct mbuf *m) +ifclassq_enqueue(struct ifclassq *ifq, void *p, classq_pkt_type_t ptype, + boolean_t *pdrop) { errno_t err; - IFCQ_LOCK_SPIN(ifq); + switch (ptype) { + case QP_MBUF: + IFCQ_LOCK_SPIN(ifq); + break; -#if PF_ALTQ - if (ALTQ_IS_ENABLED(IFCQ_ALTQ(ifq))) { - ALTQ_ENQUEUE(IFCQ_ALTQ(ifq), m, err); - } else { - u_int32_t qlen = IFCQ_LEN(ifq); - IFCQ_ENQUEUE(ifq, m, err); - if (IFCQ_LEN(ifq) > qlen) - ifq->ifcq_drain += (IFCQ_LEN(ifq) - qlen); + default: + IFCQ_LOCK(ifq); + break; } -#else /* !PF_ALTQ */ - IFCQ_ENQUEUE(ifq, m, err); -#endif /* PF_ALTQ */ + IFCQ_ENQUEUE(ifq, p, ptype, err, pdrop); IFCQ_UNLOCK(ifq); - return (err); } errno_t -ifclassq_dequeue(struct ifclassq *ifq, u_int32_t limit, struct mbuf **head, - struct mbuf **tail, u_int32_t *cnt, u_int32_t *len) +ifclassq_dequeue(struct ifclassq *ifq, u_int32_t pkt_limit, + u_int32_t byte_limit, void **head, void **tail, + u_int32_t *cnt, u_int32_t *len, classq_pkt_type_t *ptype) { - return (ifclassq_dequeue_common(ifq, MBUF_SC_UNSPEC, limit, head, tail, - cnt, len, FALSE)); + return (ifclassq_dequeue_common(ifq, MBUF_SC_UNSPEC, pkt_limit, + byte_limit, head, tail, cnt, len, FALSE, ptype)); } errno_t ifclassq_dequeue_sc(struct ifclassq *ifq, mbuf_svc_class_t sc, - u_int32_t limit, struct mbuf **head, struct mbuf **tail, u_int32_t *cnt, - u_int32_t *len) + u_int32_t pkt_limit, u_int32_t byte_limit, void **head, void **tail, + u_int32_t *cnt, u_int32_t *len, classq_pkt_type_t *ptype) { - return (ifclassq_dequeue_common(ifq, sc, limit, head, tail, - cnt, len, TRUE)); + return (ifclassq_dequeue_common(ifq, sc, pkt_limit, byte_limit, + head, tail, cnt, len, TRUE, ptype)); } static errno_t ifclassq_dequeue_common(struct ifclassq *ifq, mbuf_svc_class_t sc, - u_int32_t limit, struct mbuf **head, struct mbuf **tail, u_int32_t *cnt, - u_int32_t *len, boolean_t drvmgt) + u_int32_t pkt_limit, u_int32_t byte_limit, void **head, + void **tail, u_int32_t *cnt, u_int32_t *len, boolean_t drvmgt, + classq_pkt_type_t *ptype) { struct ifnet *ifp = ifq->ifcq_ifp; - u_int32_t i = 0, l = 0; - struct mbuf **first, *last; -#if PF_ALTQ - struct ifaltq *altq = IFCQ_ALTQ(ifq); - boolean_t draining; -#endif /* PF_ALTQ */ + u_int32_t i = 0, l = 0, lock_spin = 1 ; + void **first, *last; VERIFY(!drvmgt || MBUF_VALID_SC(sc)); - *head = NULL; - first = &(*head); - last = NULL; + *ptype = 0; - ifq = &ifp->if_snd; - IFCQ_LOCK_SPIN(ifq); - while (i < limit) { - u_int64_t pktlen; -#if PF_ALTQ - u_int32_t qlen; + if (IFCQ_TBR_IS_ENABLED(ifq)) + goto dequeue_loop; - qlen = IFCQ_LEN(ifq); - draining = IFCQ_IS_DRAINING(ifq); + /* + * If the scheduler support dequeueing multiple packets at the + * same time, call that one instead. + */ + if (drvmgt && ifq->ifcq_dequeue_sc_multi != NULL) { + int err; - if (drvmgt) { - if (IFCQ_TBR_IS_ENABLED(ifq)) - IFCQ_TBR_DEQUEUE_SC(ifq, sc, *head); - else if (draining) - IFCQ_DEQUEUE_SC(ifq, sc, *head); - else if (ALTQ_IS_ENABLED(altq)) - ALTQ_DEQUEUE_SC(altq, sc, *head); - else - *head = NULL; - } else { - if (IFCQ_TBR_IS_ENABLED(ifq)) - IFCQ_TBR_DEQUEUE(ifq, *head); - else if (draining) - IFCQ_DEQUEUE(ifq, *head); - else if (ALTQ_IS_ENABLED(altq)) - ALTQ_DEQUEUE(altq, *head); - else - *head = NULL; - } + if (lock_spin) + IFCQ_LOCK_SPIN(ifq); + else + IFCQ_LOCK(ifq); + err = ifq->ifcq_dequeue_sc_multi(ifq, sc, pkt_limit, + byte_limit, head, tail, cnt, len, ptype); + IFCQ_UNLOCK(ifq); - if (draining && *head != NULL) { - VERIFY(ifq->ifcq_drain >= (qlen - IFCQ_LEN(ifq))); - ifq->ifcq_drain -= (qlen - IFCQ_LEN(ifq)); - } -#else /* ! PF_ALTQ */ + if (err == 0 && (*head) == NULL) + err = EAGAIN; + return (err); + } else if (ifq->ifcq_dequeue_multi != NULL) { + int err; + + if (lock_spin) + IFCQ_LOCK_SPIN(ifq); + else + IFCQ_LOCK(ifq); + + err = ifq->ifcq_dequeue_multi(ifq, pkt_limit, byte_limit, + head, tail, cnt, len, ptype); + IFCQ_UNLOCK(ifq); + + if (err == 0 && (*head) == NULL) + err = EAGAIN; + return (err); + } + +dequeue_loop: + *head = NULL; + first = &(*head); + last = NULL; + + if (lock_spin) + IFCQ_LOCK_SPIN(ifq); + else + IFCQ_LOCK(ifq); + + while (i < pkt_limit && l < byte_limit) { + classq_pkt_type_t tmp_ptype; if (drvmgt) { if (IFCQ_TBR_IS_ENABLED(ifq)) - IFCQ_TBR_DEQUEUE_SC(ifq, sc, *head); + IFCQ_TBR_DEQUEUE_SC(ifq, sc, *head, &tmp_ptype); else - IFCQ_DEQUEUE_SC(ifq, sc, *head); + IFCQ_DEQUEUE_SC(ifq, sc, *head, &tmp_ptype); } else { if (IFCQ_TBR_IS_ENABLED(ifq)) - IFCQ_TBR_DEQUEUE(ifq, *head); + IFCQ_TBR_DEQUEUE(ifq, *head, &tmp_ptype); else - IFCQ_DEQUEUE(ifq, *head); + IFCQ_DEQUEUE(ifq, *head, &tmp_ptype); } -#endif /* !PF_ALTQ */ if (*head == NULL) break; - (*head)->m_nextpkt = NULL; - last = *head; + switch (tmp_ptype) { + case QP_MBUF: + (*((mbuf_t *)head))->m_nextpkt = NULL; + last = *head; + l += (*((mbuf_t *)head))->m_pkthdr.len; + ifclassq_set_packet_metadata(ifq, ifp, (*head), + QP_MBUF); + head = (void **)&(*((mbuf_t *)head))->m_nextpkt; + break; - l += (*head)->m_pkthdr.len; - pktlen = (*head)->m_pkthdr.len; - (*head)->m_pkthdr.pf_mtag.pftag_pktseq = - atomic_add_64_ov(&(ifp->if_bw.cur_seq), pktlen); + default: + VERIFY(0); + /* NOTREACHED */ + } - head = &(*head)->m_nextpkt; + *ptype = tmp_ptype; i++; } @@ -382,89 +417,24 @@ ifclassq_dequeue_common(struct ifclassq *ifq, mbuf_svc_class_t sc, return ((*first != NULL) ? 0 : EAGAIN); } -struct mbuf * -ifclassq_poll(struct ifclassq *ifq) -{ - return (ifclassq_poll_common(ifq, MBUF_SC_UNSPEC, FALSE)); -} - -struct mbuf * -ifclassq_poll_sc(struct ifclassq *ifq, mbuf_svc_class_t sc) -{ - return (ifclassq_poll_common(ifq, sc, TRUE)); -} - -static struct mbuf * -ifclassq_poll_common(struct ifclassq *ifq, mbuf_svc_class_t sc, - boolean_t drvmgt) -{ -#if PF_ALTQ - struct ifaltq *altq = IFCQ_ALTQ(ifq); -#endif /* PF_ALTQ */ - struct mbuf *m; - - VERIFY(!drvmgt || MBUF_VALID_SC(sc)); - -#if PF_ALTQ - if (drvmgt) { - if (IFCQ_TBR_IS_ENABLED(ifq)) - IFCQ_TBR_POLL_SC(ifq, sc, m); - else if (IFCQ_IS_DRAINING(ifq)) - IFCQ_POLL_SC(ifq, sc, m); - else if (ALTQ_IS_ENABLED(altq)) - ALTQ_POLL_SC(altq, sc, m); - else - m = NULL; - } else { - if (IFCQ_TBR_IS_ENABLED(ifq)) - IFCQ_TBR_POLL(ifq, m); - else if (IFCQ_IS_DRAINING(ifq)) - IFCQ_POLL(ifq, m); - else if (ALTQ_IS_ENABLED(altq)) - ALTQ_POLL(altq, m); - else - m = NULL; - } -#else /* ! PF_ALTQ */ - if (drvmgt) { - if (IFCQ_TBR_IS_ENABLED(ifq)) - IFCQ_TBR_POLL_SC(ifq, sc, m); - else - IFCQ_POLL_SC(ifq, sc, m); - } else { - if (IFCQ_TBR_IS_ENABLED(ifq)) - IFCQ_TBR_POLL(ifq, m); - else - IFCQ_POLL(ifq, m); - } -#endif /* !PF_ALTQ */ - - return (m); -} - void ifclassq_update(struct ifclassq *ifq, cqev_t ev) { IFCQ_LOCK_ASSERT_HELD(ifq); VERIFY(IFCQ_IS_READY(ifq)); - -#if PF_ALTQ - if (ALTQ_IS_ENABLED(IFCQ_ALTQ(ifq))) - ALTQ_UPDATE(IFCQ_ALTQ(ifq), ev); -#endif /* PF_ALTQ */ IFCQ_UPDATE(ifq, ev); } int ifclassq_attach(struct ifclassq *ifq, u_int32_t type, void *discipline, ifclassq_enq_func enqueue, ifclassq_deq_func dequeue, - ifclassq_deq_sc_func dequeue_sc, ifclassq_req_func request) + ifclassq_deq_sc_func dequeue_sc, ifclassq_deq_multi_func dequeue_multi, + ifclassq_deq_sc_multi_func dequeue_sc_multi, ifclassq_req_func request) { IFCQ_LOCK_ASSERT_HELD(ifq); VERIFY(ifq->ifcq_disc == NULL); VERIFY(enqueue != NULL); - VERIFY(!(dequeue != NULL && dequeue_sc != NULL)); VERIFY(request != NULL); ifq->ifcq_type = type; @@ -472,6 +442,8 @@ ifclassq_attach(struct ifclassq *ifq, u_int32_t type, void *discipline, ifq->ifcq_enqueue = enqueue; ifq->ifcq_dequeue = dequeue; ifq->ifcq_dequeue_sc = dequeue_sc; + ifq->ifcq_dequeue_multi = dequeue_multi; + ifq->ifcq_dequeue_sc_multi = dequeue_sc_multi; ifq->ifcq_request = request; return (0); @@ -539,8 +511,12 @@ ifclassq_ev2str(cqev_t ev) const char *c; switch (ev) { - case CLASSQ_EV_LINK_SPEED: - c = "LINK_SPEED"; + case CLASSQ_EV_LINK_BANDWIDTH: + c = "LINK_BANDWIDTH"; + break; + + case CLASSQ_EV_LINK_LATENCY: + c = "LINK_LATENCY"; break; case CLASSQ_EV_LINK_MTU: @@ -574,24 +550,25 @@ ifclassq_ev2str(cqev_t ev) #define TBR_SCALE(x) ((int64_t)(x) << TBR_SHIFT) #define TBR_UNSCALE(x) ((x) >> TBR_SHIFT) -struct mbuf * -ifclassq_tbr_dequeue(struct ifclassq *ifq, int op) +void * +ifclassq_tbr_dequeue(struct ifclassq *ifq, classq_pkt_type_t *ptype) { - return (ifclassq_tbr_dequeue_common(ifq, op, MBUF_SC_UNSPEC, FALSE)); + return (ifclassq_tbr_dequeue_common(ifq, MBUF_SC_UNSPEC, FALSE, ptype)); } -struct mbuf * -ifclassq_tbr_dequeue_sc(struct ifclassq *ifq, int op, mbuf_svc_class_t sc) +void * +ifclassq_tbr_dequeue_sc(struct ifclassq *ifq, mbuf_svc_class_t sc, + classq_pkt_type_t *ptype) { - return (ifclassq_tbr_dequeue_common(ifq, op, sc, TRUE)); + return (ifclassq_tbr_dequeue_common(ifq, sc, TRUE, ptype)); } -static struct mbuf * -ifclassq_tbr_dequeue_common(struct ifclassq *ifq, int op, - mbuf_svc_class_t sc, boolean_t drvmgt) +static void * +ifclassq_tbr_dequeue_common(struct ifclassq *ifq, mbuf_svc_class_t sc, + boolean_t drvmgt, classq_pkt_type_t *ptype) { struct tb_regulator *tbr; - struct mbuf *m; + void *p; int64_t interval; u_int64_t now; @@ -601,64 +578,46 @@ ifclassq_tbr_dequeue_common(struct ifclassq *ifq, int op, VERIFY(IFCQ_TBR_IS_ENABLED(ifq)); tbr = &ifq->ifcq_tbr; - if (op == CLASSQDQ_REMOVE && tbr->tbr_lastop == CLASSQDQ_POLL) { - /* if this is a remove after poll, bypass tbr check */ - } else { - /* update token only when it is negative */ - if (tbr->tbr_token <= 0) { - now = read_machclk(); - interval = now - tbr->tbr_last; - if (interval >= tbr->tbr_filluptime) { + /* update token only when it is negative */ + if (tbr->tbr_token <= 0) { + now = read_machclk(); + interval = now - tbr->tbr_last; + if (interval >= tbr->tbr_filluptime) { + tbr->tbr_token = tbr->tbr_depth; + } else { + tbr->tbr_token += interval * tbr->tbr_rate; + if (tbr->tbr_token > tbr->tbr_depth) tbr->tbr_token = tbr->tbr_depth; - } else { - tbr->tbr_token += interval * tbr->tbr_rate; - if (tbr->tbr_token > tbr->tbr_depth) - tbr->tbr_token = tbr->tbr_depth; - } - tbr->tbr_last = now; } - /* if token is still negative, don't allow dequeue */ - if (tbr->tbr_token <= 0) - return (NULL); + tbr->tbr_last = now; } + /* if token is still negative, don't allow dequeue */ + if (tbr->tbr_token <= 0) + return (NULL); /* * ifclassq takes precedence over ALTQ queue; * ifcq_drain count is adjusted by the caller. */ -#if PF_ALTQ - if (IFCQ_IS_DRAINING(ifq)) { -#endif /* PF_ALTQ */ - if (op == CLASSQDQ_POLL) { - if (drvmgt) - IFCQ_POLL_SC(ifq, sc, m); - else - IFCQ_POLL(ifq, m); - } else { - if (drvmgt) - IFCQ_DEQUEUE_SC(ifq, sc, m); - else - IFCQ_DEQUEUE(ifq, m); - } -#if PF_ALTQ - } else { - struct ifaltq *altq = IFCQ_ALTQ(ifq); - if (ALTQ_IS_ENABLED(altq)) { - if (drvmgt) - m = (*altq->altq_dequeue_sc)(altq, sc, op); - else - m = (*altq->altq_dequeue)(altq, op); - } else { - m = NULL; + if (drvmgt) + IFCQ_DEQUEUE_SC(ifq, sc, p, ptype); + else + IFCQ_DEQUEUE(ifq, p, ptype); + + if (p != NULL) { + switch (*ptype) { + case QP_MBUF: + tbr->tbr_token -= TBR_SCALE(m_pktlen((mbuf_t)p)); + break; + + + default: + VERIFY(0); + /* NOTREACHED */ } } -#endif /* PF_ALTQ */ - if (m != NULL && op == CLASSQDQ_REMOVE) - tbr->tbr_token -= TBR_SCALE(m_pktlen(m)); - tbr->tbr_lastop = op; - - return (m); + return (p); } /* @@ -704,7 +663,7 @@ ifclassq_tbr_set(struct ifclassq *ifq, struct tb_profile *profile, bzero(tbr, sizeof (*tbr)); ifnet_set_start_cycle(ifp, NULL); if (update) - ifclassq_update(ifq, CLASSQ_EV_LINK_SPEED); + ifclassq_update(ifq, CLASSQ_EV_LINK_BANDWIDTH); return (0); } @@ -762,7 +721,6 @@ ifclassq_tbr_set(struct ifclassq *ifq, struct tb_profile *profile, } tbr->tbr_token = tbr->tbr_depth; tbr->tbr_last = read_machclk(); - tbr->tbr_lastop = CLASSQDQ_REMOVE; if (tbr->tbr_rate > 0 && (ifp->if_flags & IFF_UP)) { struct timespec ts = @@ -788,7 +746,57 @@ ifclassq_tbr_set(struct ifclassq *ifq, struct tb_profile *profile, ifnet_set_start_cycle(ifp, NULL); } if (update && tbr->tbr_rate_raw != old_rate) - ifclassq_update(ifq, CLASSQ_EV_LINK_SPEED); + ifclassq_update(ifq, CLASSQ_EV_LINK_BANDWIDTH); return (0); } + +void +ifclassq_calc_target_qdelay(struct ifnet *ifp, u_int64_t *if_target_qdelay) +{ + u_int64_t qdelay = 0; + qdelay = IFCQ_TARGET_QDELAY(&ifp->if_snd); + + if (ifclassq_target_qdelay != 0) + qdelay = ifclassq_target_qdelay; + + /* + * If we do not know the effective bandwidth, use the default + * target queue delay. + */ + if (qdelay == 0) + qdelay = IFQ_TARGET_DELAY; + + /* + * If a delay has been added to ifnet start callback for + * coalescing, we have to add that to the pre-set target delay + * because the packets can be in the queue longer. + */ + if ((ifp->if_eflags & IFEF_ENQUEUE_MULTI) && + ifp->if_start_delay_timeout > 0) + qdelay += ifp->if_start_delay_timeout; + + *(if_target_qdelay) = qdelay; +} + +void +ifclassq_calc_update_interval(u_int64_t *update_interval) +{ + u_int64_t uint = 0; + + /* If the system level override is set, use it */ + if (ifclassq_update_interval != 0) + uint = ifclassq_update_interval; + + /* Otherwise use the default value */ + if (uint == 0) + uint = IFQ_UPDATE_INTERVAL; + + *update_interval = uint; +} + +void +ifclassq_reap_caches(boolean_t purge) +{ + fq_codel_reap_caches(purge); +}