2 * Copyright (c) 2011-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
32 #include <sys/errno.h>
33 #include <sys/random.h>
34 #include <sys/kernel_types.h>
35 #include <sys/sysctl.h>
37 #include <kern/zalloc.h>
40 #include <net/net_osdep.h>
41 #include <net/classq/classq.h>
42 #include <pexpert/pexpert.h>
43 #include <net/classq/classq_sfb.h>
44 #include <net/classq/classq_fq_codel.h>
45 #include <net/pktsched/pktsched.h>
46 #include <net/pktsched/pktsched_fq_codel.h>
47 #include <net/flowadv.h>
49 #include <libkern/libkern.h>
52 static errno_t
ifclassq_dequeue_common(struct ifclassq
*, mbuf_svc_class_t
,
53 u_int32_t
, u_int32_t
, classq_pkt_t
*, classq_pkt_t
*, u_int32_t
*,
54 u_int32_t
*, boolean_t
);
55 static void ifclassq_tbr_dequeue_common(struct ifclassq
*, mbuf_svc_class_t
,
56 boolean_t
, classq_pkt_t
*);
58 static u_int64_t ifclassq_target_qdelay
= 0;
59 SYSCTL_QUAD(_net_classq
, OID_AUTO
, target_qdelay
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
60 &ifclassq_target_qdelay
, "target queue delay in nanoseconds");
62 static u_int64_t ifclassq_update_interval
= 0;
63 SYSCTL_QUAD(_net_classq
, OID_AUTO
, update_interval
,
64 CTLFLAG_RW
| CTLFLAG_LOCKED
, &ifclassq_update_interval
,
65 "update interval in nanoseconds");
67 #if DEBUG || DEVELOPMENT
68 uint32_t ifclassq_flow_control_adv
= 1; /* flow control advisory */
69 SYSCTL_UINT(_net_classq
, OID_AUTO
, flow_control_adv
,
70 CTLFLAG_RW
| CTLFLAG_LOCKED
, &ifclassq_flow_control_adv
, 1,
71 "enable/disable flow control advisory");
73 uint16_t fq_codel_quantum
= 0;
74 #endif /* DEBUG || DEVELOPMENT */
79 _CASSERT(MBUF_TC_BE
== 0);
80 _CASSERT(MBUF_SC_BE
== 0);
81 _CASSERT(IFCQ_SC_MAX
== MBUF_SC_MAX_CLASSES
);
82 #if DEBUG || DEVELOPMENT
83 PE_parse_boot_argn("fq_codel_quantum", &fq_codel_quantum
,
84 sizeof(fq_codel_quantum
));
85 PE_parse_boot_argn("ifclassq_target_qdelay", &ifclassq_target_qdelay
,
86 sizeof(ifclassq_target_qdelay
));
87 PE_parse_boot_argn("ifclassq_update_interval",
88 &ifclassq_update_interval
, sizeof(ifclassq_update_interval
));
89 #endif /* DEBUG || DEVELOPMENT */
94 ifclassq_setup(struct ifnet
*ifp
, u_int32_t sflags
, boolean_t reuse
)
97 struct ifclassq
*ifq
= &ifp
->if_snd
;
101 VERIFY(IFCQ_IS_EMPTY(ifq
));
105 bzero(&ifq
->ifcq_xmitcnt
, sizeof(ifq
->ifcq_xmitcnt
));
106 bzero(&ifq
->ifcq_dropcnt
, sizeof(ifq
->ifcq_dropcnt
));
108 VERIFY(!IFCQ_TBR_IS_ENABLED(ifq
));
109 VERIFY(ifq
->ifcq_type
== PKTSCHEDT_NONE
);
110 VERIFY(ifq
->ifcq_flags
== 0);
111 VERIFY(ifq
->ifcq_sflags
== 0);
112 VERIFY(ifq
->ifcq_disc
== NULL
);
114 if (ifp
->if_eflags
& IFEF_TXSTART
) {
115 u_int32_t maxlen
= 0;
117 if ((maxlen
= IFCQ_MAXLEN(ifq
)) == 0) {
118 maxlen
= if_sndq_maxlen
;
120 IFCQ_SET_MAXLEN(ifq
, maxlen
);
122 if (IFCQ_MAXLEN(ifq
) != if_sndq_maxlen
&&
123 IFCQ_TARGET_QDELAY(ifq
) == 0) {
125 * Choose static queues because the interface has
126 * maximum queue size set
128 sflags
&= ~PKTSCHEDF_QALG_DELAYBASED
;
130 ifq
->ifcq_sflags
= sflags
;
131 err
= ifclassq_pktsched_setup(ifq
);
133 ifq
->ifcq_flags
= (IFCQF_READY
| IFCQF_ENABLED
);
141 ifclassq_teardown(struct ifnet
*ifp
)
143 struct ifclassq
*ifq
= &ifp
->if_snd
;
147 if (IFCQ_IS_READY(ifq
)) {
148 if (IFCQ_TBR_IS_ENABLED(ifq
)) {
149 struct tb_profile tb
= { .rate
= 0, .percent
= 0, .depth
= 0 };
150 (void) ifclassq_tbr_set(ifq
, &tb
, FALSE
);
152 pktsched_teardown(ifq
);
155 ifq
->ifcq_sflags
= 0;
157 VERIFY(IFCQ_IS_EMPTY(ifq
));
158 VERIFY(!IFCQ_TBR_IS_ENABLED(ifq
));
159 VERIFY(ifq
->ifcq_type
== PKTSCHEDT_NONE
);
160 VERIFY(ifq
->ifcq_flags
== 0);
161 VERIFY(ifq
->ifcq_sflags
== 0);
162 VERIFY(ifq
->ifcq_disc
== NULL
);
165 IFCQ_MAXLEN(ifq
) = 0;
166 bzero(&ifq
->ifcq_xmitcnt
, sizeof(ifq
->ifcq_xmitcnt
));
167 bzero(&ifq
->ifcq_dropcnt
, sizeof(ifq
->ifcq_dropcnt
));
173 ifclassq_pktsched_setup(struct ifclassq
*ifq
)
175 struct ifnet
*ifp
= ifq
->ifcq_ifp
;
176 classq_pkt_type_t ptype
= QP_MBUF
;
179 IFCQ_LOCK_ASSERT_HELD(ifq
);
180 VERIFY(ifp
->if_eflags
& IFEF_TXSTART
);
182 err
= pktsched_setup(ifq
, PKTSCHEDT_FQ_CODEL
, ifq
->ifcq_sflags
, ptype
);
188 ifclassq_set_maxlen(struct ifclassq
*ifq
, u_int32_t maxqlen
)
192 maxqlen
= if_sndq_maxlen
;
194 IFCQ_SET_MAXLEN(ifq
, maxqlen
);
199 ifclassq_get_maxlen(struct ifclassq
*ifq
)
201 return IFCQ_MAXLEN(ifq
);
205 ifclassq_get_len(struct ifclassq
*ifq
, mbuf_svc_class_t sc
, u_int32_t
*packets
,
211 if (sc
== MBUF_SC_UNSPEC
) {
212 VERIFY(packets
!= NULL
);
213 *packets
= IFCQ_LEN(ifq
);
215 cqrq_stat_sc_t req
= { sc
, 0, 0 };
217 VERIFY(MBUF_VALID_SC(sc
));
218 VERIFY(packets
!= NULL
&& bytes
!= NULL
);
220 err
= fq_if_request_classq(ifq
, CLASSQRQ_STAT_SC
, &req
);
221 if (packets
!= NULL
) {
222 *packets
= req
.packets
;
235 ifclassq_set_packet_metadata(struct ifclassq
*ifq
, struct ifnet
*ifp
,
238 if (!IFNET_IS_CELLULAR(ifp
)) {
242 switch (p
->cp_ptype
) {
244 struct mbuf
*m
= p
->cp_mbuf
;
245 m
->m_pkthdr
.pkt_flags
|= PKTF_VALID_UNSENT_DATA
;
246 m
->m_pkthdr
.bufstatus_if
= IFCQ_BYTES(ifq
);
247 m
->m_pkthdr
.bufstatus_sndbuf
= (uint32_t)ifp
->if_sndbyte_unsent
;
255 __builtin_unreachable();
260 ifclassq_enqueue(struct ifclassq
*ifq
, classq_pkt_t
*head
, classq_pkt_t
*tail
,
261 u_int32_t cnt
, u_int32_t bytes
, boolean_t
*pdrop
)
263 return fq_if_enqueue_classq(ifq
, head
, tail
, cnt
, bytes
, pdrop
);
267 ifclassq_dequeue(struct ifclassq
*ifq
, u_int32_t pkt_limit
,
268 u_int32_t byte_limit
, classq_pkt_t
*head
, classq_pkt_t
*tail
,
269 u_int32_t
*cnt
, u_int32_t
*len
)
271 return ifclassq_dequeue_common(ifq
, MBUF_SC_UNSPEC
, pkt_limit
,
272 byte_limit
, head
, tail
, cnt
, len
, FALSE
);
276 ifclassq_dequeue_sc(struct ifclassq
*ifq
, mbuf_svc_class_t sc
,
277 u_int32_t pkt_limit
, u_int32_t byte_limit
, classq_pkt_t
*head
,
278 classq_pkt_t
*tail
, u_int32_t
*cnt
, u_int32_t
*len
)
280 return ifclassq_dequeue_common(ifq
, sc
, pkt_limit
, byte_limit
,
281 head
, tail
, cnt
, len
, TRUE
);
285 ifclassq_dequeue_common_default(struct ifclassq
*ifq
, mbuf_svc_class_t sc
,
286 u_int32_t pkt_limit
, u_int32_t byte_limit
, classq_pkt_t
*head
,
287 classq_pkt_t
*tail
, u_int32_t
*cnt
, u_int32_t
*len
, boolean_t drvmgt
)
289 struct ifnet
*ifp
= ifq
->ifcq_ifp
;
290 u_int32_t i
= 0, l
= 0;
291 classq_pkt_t first
= CLASSQ_PKT_INITIALIZER(first
);
292 classq_pkt_t last
= CLASSQ_PKT_INITIALIZER(last
);
294 VERIFY(!drvmgt
|| MBUF_VALID_SC(sc
));
296 if (IFCQ_TBR_IS_ENABLED(ifq
)) {
301 * If the scheduler support dequeueing multiple packets at the
302 * same time, call that one instead.
308 err
= fq_if_dequeue_sc_classq_multi(ifq
, sc
, pkt_limit
,
309 byte_limit
, head
, tail
, cnt
, len
);
312 if (err
== 0 && head
->cp_mbuf
== NULL
) {
320 err
= fq_if_dequeue_classq_multi(ifq
, pkt_limit
, byte_limit
,
321 head
, tail
, cnt
, len
);
324 if (err
== 0 && head
->cp_mbuf
== NULL
) {
334 while (i
< pkt_limit
&& l
< byte_limit
) {
336 if (IFCQ_TBR_IS_ENABLED(ifq
)) {
337 IFCQ_TBR_DEQUEUE_SC(ifq
, sc
, head
);
339 fq_if_dequeue_sc_classq(ifq
, sc
, head
);
342 if (IFCQ_TBR_IS_ENABLED(ifq
)) {
343 IFCQ_TBR_DEQUEUE(ifq
, head
);
345 fq_if_dequeue_classq(ifq
, head
);
349 if (head
->cp_mbuf
== NULL
) {
353 if (first
.cp_mbuf
== NULL
) {
357 switch (head
->cp_ptype
) {
359 head
->cp_mbuf
->m_nextpkt
= NULL
;
360 l
+= head
->cp_mbuf
->m_pkthdr
.len
;
361 ifclassq_set_packet_metadata(ifq
, ifp
, head
);
362 if (last
.cp_mbuf
!= NULL
) {
363 last
.cp_mbuf
->m_nextpkt
= head
->cp_mbuf
;
371 __builtin_unreachable();
391 return (first
.cp_mbuf
!= NULL
) ? 0 : EAGAIN
;
395 ifclassq_dequeue_common(struct ifclassq
*ifq
, mbuf_svc_class_t sc
,
396 u_int32_t pkt_limit
, u_int32_t byte_limit
, classq_pkt_t
*head
,
397 classq_pkt_t
*tail
, u_int32_t
*cnt
, u_int32_t
*len
, boolean_t drvmgt
)
399 return ifclassq_dequeue_common_default(ifq
, sc
,
400 pkt_limit
, byte_limit
, head
, tail
, cnt
, len
, drvmgt
);
404 ifclassq_update(struct ifclassq
*ifq
, cqev_t ev
)
406 IFCQ_LOCK_ASSERT_HELD(ifq
);
407 VERIFY(IFCQ_IS_READY(ifq
));
408 fq_if_request_classq(ifq
, CLASSQRQ_EVENT
, (void *)ev
);
412 ifclassq_attach(struct ifclassq
*ifq
, u_int32_t type
, void *discipline
)
414 IFCQ_LOCK_ASSERT_HELD(ifq
);
416 VERIFY(ifq
->ifcq_disc
== NULL
);
418 ifq
->ifcq_type
= type
;
419 ifq
->ifcq_disc
= discipline
;
425 ifclassq_detach(struct ifclassq
*ifq
)
427 IFCQ_LOCK_ASSERT_HELD(ifq
);
429 VERIFY(ifq
->ifcq_disc
== NULL
);
431 ifq
->ifcq_type
= PKTSCHEDT_NONE
;
435 ifclassq_getqstats(struct ifclassq
*ifq
, u_int32_t qid
, void *ubuf
,
438 struct if_ifclassq_stats
*ifqs
;
441 if (*nbytes
< sizeof(*ifqs
)) {
445 ifqs
= _MALLOC(sizeof(*ifqs
), M_TEMP
, M_WAITOK
| M_ZERO
);
451 if (!IFCQ_IS_READY(ifq
)) {
457 ifqs
->ifqs_len
= IFCQ_LEN(ifq
);
458 ifqs
->ifqs_maxlen
= IFCQ_MAXLEN(ifq
);
459 *(&ifqs
->ifqs_xmitcnt
) = *(&ifq
->ifcq_xmitcnt
);
460 *(&ifqs
->ifqs_dropcnt
) = *(&ifq
->ifcq_dropcnt
);
461 ifqs
->ifqs_scheduler
= ifq
->ifcq_type
;
463 err
= pktsched_getqstats(ifq
, qid
, ifqs
);
466 if (err
== 0 && (err
= copyout((caddr_t
)ifqs
,
467 (user_addr_t
)(uintptr_t)ubuf
, sizeof(*ifqs
))) == 0) {
468 *nbytes
= sizeof(*ifqs
);
477 ifclassq_ev2str(cqev_t ev
)
482 case CLASSQ_EV_LINK_BANDWIDTH
:
483 c
= "LINK_BANDWIDTH";
486 case CLASSQ_EV_LINK_LATENCY
:
490 case CLASSQ_EV_LINK_MTU
:
494 case CLASSQ_EV_LINK_UP
:
498 case CLASSQ_EV_LINK_DOWN
:
511 * internal representation of token bucket parameters
512 * rate: byte_per_unittime << 32
513 * (((bits_per_sec) / 8) << 32) / machclk_freq
518 #define TBR_SCALE(x) ((int64_t)(x) << TBR_SHIFT)
519 #define TBR_UNSCALE(x) ((x) >> TBR_SHIFT)
522 ifclassq_tbr_dequeue(struct ifclassq
*ifq
, classq_pkt_t
*pkt
)
524 ifclassq_tbr_dequeue_common(ifq
, MBUF_SC_UNSPEC
, FALSE
, pkt
);
528 ifclassq_tbr_dequeue_sc(struct ifclassq
*ifq
, mbuf_svc_class_t sc
,
531 ifclassq_tbr_dequeue_common(ifq
, sc
, TRUE
, pkt
);
535 ifclassq_tbr_dequeue_common(struct ifclassq
*ifq
, mbuf_svc_class_t sc
,
536 boolean_t drvmgt
, classq_pkt_t
*pkt
)
538 struct tb_regulator
*tbr
;
542 IFCQ_LOCK_ASSERT_HELD(ifq
);
544 VERIFY(!drvmgt
|| MBUF_VALID_SC(sc
));
545 VERIFY(IFCQ_TBR_IS_ENABLED(ifq
));
547 *pkt
= CLASSQ_PKT_INITIALIZER(*pkt
);
548 tbr
= &ifq
->ifcq_tbr
;
549 /* update token only when it is negative */
550 if (tbr
->tbr_token
<= 0) {
551 now
= read_machclk();
552 interval
= now
- tbr
->tbr_last
;
553 if (interval
>= tbr
->tbr_filluptime
) {
554 tbr
->tbr_token
= tbr
->tbr_depth
;
556 tbr
->tbr_token
+= interval
* tbr
->tbr_rate
;
557 if (tbr
->tbr_token
> tbr
->tbr_depth
) {
558 tbr
->tbr_token
= tbr
->tbr_depth
;
563 /* if token is still negative, don't allow dequeue */
564 if (tbr
->tbr_token
<= 0) {
569 * ifclassq takes precedence over ALTQ queue;
570 * ifcq_drain count is adjusted by the caller.
573 fq_if_dequeue_sc_classq(ifq
, sc
, pkt
);
575 fq_if_dequeue_classq(ifq
, pkt
);
578 if (pkt
->cp_mbuf
!= NULL
) {
579 switch (pkt
->cp_ptype
) {
581 tbr
->tbr_token
-= TBR_SCALE(m_pktlen(pkt
->cp_mbuf
));
593 * set a token bucket regulator.
594 * if the specified rate is zero, the token bucket regulator is deleted.
597 ifclassq_tbr_set(struct ifclassq
*ifq
, struct tb_profile
*profile
,
600 struct tb_regulator
*tbr
;
601 struct ifnet
*ifp
= ifq
->ifcq_ifp
;
602 u_int64_t rate
, old_rate
;
604 IFCQ_LOCK_ASSERT_HELD(ifq
);
605 VERIFY(IFCQ_IS_READY(ifq
));
607 VERIFY(machclk_freq
!= 0);
609 tbr
= &ifq
->ifcq_tbr
;
610 old_rate
= tbr
->tbr_rate_raw
;
612 rate
= profile
->rate
;
613 if (profile
->percent
> 0) {
616 if (profile
->percent
> 100) {
619 if ((eff_rate
= ifp
->if_output_bw
.eff_bw
) == 0) {
622 rate
= (eff_rate
* profile
->percent
) / 100;
626 if (!IFCQ_TBR_IS_ENABLED(ifq
)) {
630 if (pktsched_verbose
) {
631 printf("%s: TBR disabled\n", if_name(ifp
));
634 /* disable this TBR */
635 ifq
->ifcq_flags
&= ~IFCQF_TBR
;
636 bzero(tbr
, sizeof(*tbr
));
637 ifnet_set_start_cycle(ifp
, NULL
);
639 ifclassq_update(ifq
, CLASSQ_EV_LINK_BANDWIDTH
);
644 if (pktsched_verbose
) {
645 printf("%s: TBR %s (rate %llu bps depth %u)\n", if_name(ifp
),
646 (ifq
->ifcq_flags
& IFCQF_TBR
) ? "reconfigured" :
647 "enabled", rate
, profile
->depth
);
650 /* set the new TBR */
651 bzero(tbr
, sizeof(*tbr
));
652 tbr
->tbr_rate_raw
= rate
;
653 tbr
->tbr_percent
= profile
->percent
;
654 ifq
->ifcq_flags
|= IFCQF_TBR
;
657 * Note that the TBR fill up time (hence the ifnet restart time)
658 * is directly related to the specified TBR depth. The ideal
659 * depth value should be computed such that the interval time
660 * between each successive wakeup is adequately spaced apart,
661 * in order to reduce scheduling overheads. A target interval
662 * of 10 ms seems to provide good performance balance. This can be
663 * overridden by specifying the depth profile. Values smaller than
664 * the ideal depth will reduce delay at the expense of CPU cycles.
666 tbr
->tbr_rate
= TBR_SCALE(rate
/ 8) / machclk_freq
;
667 if (tbr
->tbr_rate
> 0) {
668 u_int32_t mtu
= ifp
->if_mtu
;
669 int64_t ival
, idepth
= 0;
672 if (mtu
< IF_MINMTU
) {
676 ival
= pktsched_nsecs_to_abstime(10 * NSEC_PER_MSEC
); /* 10ms */
679 idepth
= TBR_SCALE(i
* mtu
);
680 if ((idepth
/ tbr
->tbr_rate
) > ival
) {
686 tbr
->tbr_depth
= TBR_SCALE(profile
->depth
);
687 if (tbr
->tbr_depth
== 0) {
688 tbr
->tbr_filluptime
= idepth
/ tbr
->tbr_rate
;
689 /* a little fudge factor to get closer to rate */
690 tbr
->tbr_depth
= idepth
+ (idepth
>> 3);
692 tbr
->tbr_filluptime
= tbr
->tbr_depth
/ tbr
->tbr_rate
;
695 tbr
->tbr_depth
= TBR_SCALE(profile
->depth
);
696 tbr
->tbr_filluptime
= 0xffffffffffffffffLL
;
698 tbr
->tbr_token
= tbr
->tbr_depth
;
699 tbr
->tbr_last
= read_machclk();
701 if (tbr
->tbr_rate
> 0 && (ifp
->if_flags
& IFF_UP
)) {
703 { 0, (long)pktsched_abs_to_nsecs(tbr
->tbr_filluptime
) };
704 if (pktsched_verbose
) {
705 printf("%s: TBR calculated tokens %lld "
706 "filluptime %llu ns\n", if_name(ifp
),
707 TBR_UNSCALE(tbr
->tbr_token
),
708 pktsched_abs_to_nsecs(tbr
->tbr_filluptime
));
710 ifnet_set_start_cycle(ifp
, &ts
);
712 if (pktsched_verbose
) {
713 if (tbr
->tbr_rate
== 0) {
714 printf("%s: TBR calculated tokens %lld "
715 "infinite filluptime\n", if_name(ifp
),
716 TBR_UNSCALE(tbr
->tbr_token
));
717 } else if (!(ifp
->if_flags
& IFF_UP
)) {
718 printf("%s: TBR suspended (link is down)\n",
722 ifnet_set_start_cycle(ifp
, NULL
);
724 if (update
&& tbr
->tbr_rate_raw
!= old_rate
) {
725 ifclassq_update(ifq
, CLASSQ_EV_LINK_BANDWIDTH
);
732 ifclassq_calc_target_qdelay(struct ifnet
*ifp
, u_int64_t
*if_target_qdelay
)
734 u_int64_t qdelay
= 0;
735 qdelay
= IFCQ_TARGET_QDELAY(&ifp
->if_snd
);
737 if (ifclassq_target_qdelay
!= 0) {
738 qdelay
= ifclassq_target_qdelay
;
742 * If we do not know the effective bandwidth, use the default
743 * target queue delay.
746 qdelay
= IFQ_TARGET_DELAY
;
750 * If a delay has been added to ifnet start callback for
751 * coalescing, we have to add that to the pre-set target delay
752 * because the packets can be in the queue longer.
754 if ((ifp
->if_eflags
& IFEF_ENQUEUE_MULTI
) &&
755 ifp
->if_start_delay_timeout
> 0) {
756 qdelay
+= ifp
->if_start_delay_timeout
;
759 *(if_target_qdelay
) = qdelay
;
763 ifclassq_calc_update_interval(u_int64_t
*update_interval
)
767 /* If the system level override is set, use it */
768 if (ifclassq_update_interval
!= 0) {
769 uint
= ifclassq_update_interval
;
772 /* Otherwise use the default value */
774 uint
= IFQ_UPDATE_INTERVAL
;
777 *update_interval
= uint
;
781 ifclassq_reap_caches(boolean_t purge
)
783 fq_codel_reap_caches(purge
);
784 flowadv_reap_caches(purge
);