2 * Copyright (c) 2011-2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
32 #include <sys/errno.h>
33 #include <sys/random.h>
34 #include <sys/kernel_types.h>
35 #include <sys/sysctl.h>
37 #include <kern/zalloc.h>
40 #include <net/net_osdep.h>
41 #include <net/classq/classq.h>
42 #include <pexpert/pexpert.h>
43 #include <net/classq/classq_sfb.h>
44 #include <net/classq/classq_fq_codel.h>
45 #include <net/pktsched/pktsched.h>
46 #include <net/pktsched/pktsched_fq_codel.h>
48 #include <libkern/libkern.h>
51 static errno_t
ifclassq_dequeue_common(struct ifclassq
*, mbuf_svc_class_t
,
52 u_int32_t
, u_int32_t
, void **, void **, u_int32_t
*, u_int32_t
*,
53 boolean_t
, classq_pkt_type_t
*);
54 static void *ifclassq_tbr_dequeue_common(struct ifclassq
*, mbuf_svc_class_t
,
55 boolean_t
, classq_pkt_type_t
*);
57 static u_int64_t ifclassq_target_qdelay
= 0;
58 SYSCTL_QUAD(_net_classq
, OID_AUTO
, target_qdelay
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
59 &ifclassq_target_qdelay
, "target queue delay in nanoseconds");
61 static u_int64_t ifclassq_update_interval
= 0;
62 SYSCTL_QUAD(_net_classq
, OID_AUTO
, update_interval
,
63 CTLFLAG_RW
| CTLFLAG_LOCKED
, &ifclassq_update_interval
,
64 "update interval in nanoseconds");
66 static int32_t ifclassq_sched_fq_codel
;
71 _CASSERT(MBUF_TC_BE
== 0);
72 _CASSERT(MBUF_SC_BE
== 0);
73 _CASSERT(IFCQ_SC_MAX
== MBUF_SC_MAX_CLASSES
);
76 fq_codel_scheduler_init();
78 if (!PE_parse_boot_argn("fq_codel", &ifclassq_sched_fq_codel
,
79 sizeof(ifclassq_sched_fq_codel
))) {
80 ifclassq_sched_fq_codel
= 1;
85 ifclassq_setup(struct ifnet
*ifp
, u_int32_t sflags
, boolean_t reuse
)
88 struct ifclassq
*ifq
= &ifp
->if_snd
;
92 VERIFY(IFCQ_IS_EMPTY(ifq
));
96 bzero(&ifq
->ifcq_xmitcnt
, sizeof(ifq
->ifcq_xmitcnt
));
97 bzero(&ifq
->ifcq_dropcnt
, sizeof(ifq
->ifcq_dropcnt
));
99 VERIFY(!IFCQ_TBR_IS_ENABLED(ifq
));
100 VERIFY(ifq
->ifcq_type
== PKTSCHEDT_NONE
);
101 VERIFY(ifq
->ifcq_flags
== 0);
102 VERIFY(ifq
->ifcq_sflags
== 0);
103 VERIFY(ifq
->ifcq_disc
== NULL
);
104 VERIFY(ifq
->ifcq_enqueue
== NULL
);
105 VERIFY(ifq
->ifcq_dequeue
== NULL
);
106 VERIFY(ifq
->ifcq_dequeue_sc
== NULL
);
107 VERIFY(ifq
->ifcq_request
== NULL
);
109 if (ifp
->if_eflags
& IFEF_TXSTART
) {
110 u_int32_t maxlen
= 0;
112 if ((maxlen
= IFCQ_MAXLEN(ifq
)) == 0) {
113 maxlen
= if_sndq_maxlen
;
115 IFCQ_SET_MAXLEN(ifq
, maxlen
);
117 if (IFCQ_MAXLEN(ifq
) != if_sndq_maxlen
&&
118 IFCQ_TARGET_QDELAY(ifq
) == 0) {
120 * Choose static queues because the interface has
121 * maximum queue size set
123 sflags
&= ~PKTSCHEDF_QALG_DELAYBASED
;
125 ifq
->ifcq_sflags
= sflags
;
126 err
= ifclassq_pktsched_setup(ifq
);
128 ifq
->ifcq_flags
= (IFCQF_READY
| IFCQF_ENABLED
);
136 ifclassq_teardown(struct ifnet
*ifp
)
138 struct ifclassq
*ifq
= &ifp
->if_snd
;
142 if (IFCQ_IS_READY(ifq
)) {
143 if (IFCQ_TBR_IS_ENABLED(ifq
)) {
144 struct tb_profile tb
= { 0, 0, 0 };
145 (void) ifclassq_tbr_set(ifq
, &tb
, FALSE
);
147 (void) pktsched_teardown(ifq
);
150 ifq
->ifcq_sflags
= 0;
152 VERIFY(IFCQ_IS_EMPTY(ifq
));
153 VERIFY(!IFCQ_TBR_IS_ENABLED(ifq
));
154 VERIFY(ifq
->ifcq_type
== PKTSCHEDT_NONE
);
155 VERIFY(ifq
->ifcq_flags
== 0);
156 VERIFY(ifq
->ifcq_sflags
== 0);
157 VERIFY(ifq
->ifcq_disc
== NULL
);
158 VERIFY(ifq
->ifcq_enqueue
== NULL
);
159 VERIFY(ifq
->ifcq_dequeue
== NULL
);
160 VERIFY(ifq
->ifcq_dequeue_sc
== NULL
);
161 VERIFY(ifq
->ifcq_request
== NULL
);
164 IFCQ_MAXLEN(ifq
) = 0;
165 bzero(&ifq
->ifcq_xmitcnt
, sizeof(ifq
->ifcq_xmitcnt
));
166 bzero(&ifq
->ifcq_dropcnt
, sizeof(ifq
->ifcq_dropcnt
));
172 ifclassq_pktsched_setup(struct ifclassq
*ifq
)
174 struct ifnet
*ifp
= ifq
->ifcq_ifp
;
175 classq_pkt_type_t ptype
= QP_MBUF
;
178 IFCQ_LOCK_ASSERT_HELD(ifq
);
179 VERIFY(ifp
->if_eflags
& IFEF_TXSTART
);
181 switch (ifp
->if_output_sched_model
) {
182 case IFNET_SCHED_MODEL_DRIVER_MANAGED
:
183 if (ifclassq_sched_fq_codel
!= 0) {
184 err
= pktsched_setup(ifq
, PKTSCHEDT_FQ_CODEL
,
185 ifq
->ifcq_sflags
, ptype
);
187 err
= pktsched_setup(ifq
, PKTSCHEDT_TCQ
,
188 ifq
->ifcq_sflags
, ptype
);
192 case IFNET_SCHED_MODEL_NORMAL
:
193 if (ifclassq_sched_fq_codel
!= 0) {
194 err
= pktsched_setup(ifq
, PKTSCHEDT_FQ_CODEL
,
195 ifq
->ifcq_sflags
, ptype
);
197 err
= pktsched_setup(ifq
, PKTSCHEDT_QFQ
,
198 ifq
->ifcq_sflags
, ptype
);
201 case IFNET_SCHED_MODEL_FQ_CODEL
:
202 err
= pktsched_setup(ifq
, PKTSCHEDT_FQ_CODEL
,
203 ifq
->ifcq_sflags
, ptype
);
214 ifclassq_set_maxlen(struct ifclassq
*ifq
, u_int32_t maxqlen
)
218 maxqlen
= if_sndq_maxlen
;
220 IFCQ_SET_MAXLEN(ifq
, maxqlen
);
225 ifclassq_get_maxlen(struct ifclassq
*ifq
)
227 return IFCQ_MAXLEN(ifq
);
231 ifclassq_get_len(struct ifclassq
*ifq
, mbuf_svc_class_t sc
, u_int32_t
*packets
,
237 if (sc
== MBUF_SC_UNSPEC
) {
238 VERIFY(packets
!= NULL
);
239 *packets
= IFCQ_LEN(ifq
);
241 VERIFY(MBUF_VALID_SC(sc
));
242 VERIFY(packets
!= NULL
&& bytes
!= NULL
);
243 IFCQ_LEN_SC(ifq
, sc
, packets
, bytes
, err
);
251 ifclassq_set_packet_metadata(struct ifclassq
*ifq
, struct ifnet
*ifp
,
252 void *p
, classq_pkt_type_t ptype
)
254 if (!IFNET_IS_CELLULAR(ifp
)) {
261 m
->m_pkthdr
.pkt_flags
|= PKTF_VALID_UNSENT_DATA
;
262 m
->m_pkthdr
.bufstatus_if
= IFCQ_BYTES(ifq
);
263 m
->m_pkthdr
.bufstatus_sndbuf
= ifp
->if_sndbyte_unsent
;
275 ifclassq_enqueue(struct ifclassq
*ifq
, void *p
, classq_pkt_type_t ptype
,
290 IFCQ_ENQUEUE(ifq
, p
, ptype
, err
, pdrop
);
296 ifclassq_dequeue(struct ifclassq
*ifq
, u_int32_t pkt_limit
,
297 u_int32_t byte_limit
, void **head
, void **tail
,
298 u_int32_t
*cnt
, u_int32_t
*len
, classq_pkt_type_t
*ptype
)
300 return ifclassq_dequeue_common(ifq
, MBUF_SC_UNSPEC
, pkt_limit
,
301 byte_limit
, head
, tail
, cnt
, len
, FALSE
, ptype
);
305 ifclassq_dequeue_sc(struct ifclassq
*ifq
, mbuf_svc_class_t sc
,
306 u_int32_t pkt_limit
, u_int32_t byte_limit
, void **head
, void **tail
,
307 u_int32_t
*cnt
, u_int32_t
*len
, classq_pkt_type_t
*ptype
)
309 return ifclassq_dequeue_common(ifq
, sc
, pkt_limit
, byte_limit
,
310 head
, tail
, cnt
, len
, TRUE
, ptype
);
314 ifclassq_dequeue_common(struct ifclassq
*ifq
, mbuf_svc_class_t sc
,
315 u_int32_t pkt_limit
, u_int32_t byte_limit
, void **head
,
316 void **tail
, u_int32_t
*cnt
, u_int32_t
*len
, boolean_t drvmgt
,
317 classq_pkt_type_t
*ptype
)
319 struct ifnet
*ifp
= ifq
->ifcq_ifp
;
320 u_int32_t i
= 0, l
= 0, lock_spin
= 1;
323 VERIFY(!drvmgt
|| MBUF_VALID_SC(sc
));
328 if (IFCQ_TBR_IS_ENABLED(ifq
)) {
333 * If the scheduler support dequeueing multiple packets at the
334 * same time, call that one instead.
336 if (drvmgt
&& ifq
->ifcq_dequeue_sc_multi
!= NULL
) {
344 err
= ifq
->ifcq_dequeue_sc_multi(ifq
, sc
, pkt_limit
,
345 byte_limit
, head
, tail
, cnt
, len
, ptype
);
348 if (err
== 0 && (*head
) == NULL
) {
352 } else if (ifq
->ifcq_dequeue_multi
!= NULL
) {
361 err
= ifq
->ifcq_dequeue_multi(ifq
, pkt_limit
, byte_limit
,
362 head
, tail
, cnt
, len
, ptype
);
365 if (err
== 0 && (*head
) == NULL
) {
382 while (i
< pkt_limit
&& l
< byte_limit
) {
383 classq_pkt_type_t tmp_ptype
;
385 if (IFCQ_TBR_IS_ENABLED(ifq
)) {
386 IFCQ_TBR_DEQUEUE_SC(ifq
, sc
, *head
, &tmp_ptype
);
388 IFCQ_DEQUEUE_SC(ifq
, sc
, *head
, &tmp_ptype
);
391 if (IFCQ_TBR_IS_ENABLED(ifq
)) {
392 IFCQ_TBR_DEQUEUE(ifq
, *head
, &tmp_ptype
);
394 IFCQ_DEQUEUE(ifq
, *head
, &tmp_ptype
);
404 (*((mbuf_t
*)head
))->m_nextpkt
= NULL
;
406 l
+= (*((mbuf_t
*)head
))->m_pkthdr
.len
;
407 ifclassq_set_packet_metadata(ifq
, ifp
, (*head
),
409 head
= (void **)&(*((mbuf_t
*)head
))->m_nextpkt
;
434 return (*first
!= NULL
) ? 0 : EAGAIN
;
438 ifclassq_update(struct ifclassq
*ifq
, cqev_t ev
)
440 IFCQ_LOCK_ASSERT_HELD(ifq
);
441 VERIFY(IFCQ_IS_READY(ifq
));
442 IFCQ_UPDATE(ifq
, ev
);
446 ifclassq_attach(struct ifclassq
*ifq
, u_int32_t type
, void *discipline
,
447 ifclassq_enq_func enqueue
, ifclassq_deq_func dequeue
,
448 ifclassq_deq_sc_func dequeue_sc
, ifclassq_deq_multi_func dequeue_multi
,
449 ifclassq_deq_sc_multi_func dequeue_sc_multi
, ifclassq_req_func request
)
451 IFCQ_LOCK_ASSERT_HELD(ifq
);
453 VERIFY(ifq
->ifcq_disc
== NULL
);
454 VERIFY(enqueue
!= NULL
);
455 VERIFY(request
!= NULL
);
457 ifq
->ifcq_type
= type
;
458 ifq
->ifcq_disc
= discipline
;
459 ifq
->ifcq_enqueue
= enqueue
;
460 ifq
->ifcq_dequeue
= dequeue
;
461 ifq
->ifcq_dequeue_sc
= dequeue_sc
;
462 ifq
->ifcq_dequeue_multi
= dequeue_multi
;
463 ifq
->ifcq_dequeue_sc_multi
= dequeue_sc_multi
;
464 ifq
->ifcq_request
= request
;
470 ifclassq_detach(struct ifclassq
*ifq
)
472 IFCQ_LOCK_ASSERT_HELD(ifq
);
474 VERIFY(ifq
->ifcq_disc
== NULL
);
476 ifq
->ifcq_type
= PKTSCHEDT_NONE
;
477 ifq
->ifcq_disc
= NULL
;
478 ifq
->ifcq_enqueue
= NULL
;
479 ifq
->ifcq_dequeue
= NULL
;
480 ifq
->ifcq_dequeue_sc
= NULL
;
481 ifq
->ifcq_request
= NULL
;
487 ifclassq_getqstats(struct ifclassq
*ifq
, u_int32_t qid
, void *ubuf
,
490 struct if_ifclassq_stats
*ifqs
;
493 if (*nbytes
< sizeof(*ifqs
)) {
497 ifqs
= _MALLOC(sizeof(*ifqs
), M_TEMP
, M_WAITOK
| M_ZERO
);
503 if (!IFCQ_IS_READY(ifq
)) {
509 ifqs
->ifqs_len
= IFCQ_LEN(ifq
);
510 ifqs
->ifqs_maxlen
= IFCQ_MAXLEN(ifq
);
511 *(&ifqs
->ifqs_xmitcnt
) = *(&ifq
->ifcq_xmitcnt
);
512 *(&ifqs
->ifqs_dropcnt
) = *(&ifq
->ifcq_dropcnt
);
513 ifqs
->ifqs_scheduler
= ifq
->ifcq_type
;
515 err
= pktsched_getqstats(ifq
, qid
, ifqs
);
518 if (err
== 0 && (err
= copyout((caddr_t
)ifqs
,
519 (user_addr_t
)(uintptr_t)ubuf
, sizeof(*ifqs
))) == 0) {
520 *nbytes
= sizeof(*ifqs
);
529 ifclassq_ev2str(cqev_t ev
)
534 case CLASSQ_EV_LINK_BANDWIDTH
:
535 c
= "LINK_BANDWIDTH";
538 case CLASSQ_EV_LINK_LATENCY
:
542 case CLASSQ_EV_LINK_MTU
:
546 case CLASSQ_EV_LINK_UP
:
550 case CLASSQ_EV_LINK_DOWN
:
563 * internal representation of token bucket parameters
564 * rate: byte_per_unittime << 32
565 * (((bits_per_sec) / 8) << 32) / machclk_freq
570 #define TBR_SCALE(x) ((int64_t)(x) << TBR_SHIFT)
571 #define TBR_UNSCALE(x) ((x) >> TBR_SHIFT)
574 ifclassq_tbr_dequeue(struct ifclassq
*ifq
, classq_pkt_type_t
*ptype
)
576 return ifclassq_tbr_dequeue_common(ifq
, MBUF_SC_UNSPEC
, FALSE
, ptype
);
580 ifclassq_tbr_dequeue_sc(struct ifclassq
*ifq
, mbuf_svc_class_t sc
,
581 classq_pkt_type_t
*ptype
)
583 return ifclassq_tbr_dequeue_common(ifq
, sc
, TRUE
, ptype
);
587 ifclassq_tbr_dequeue_common(struct ifclassq
*ifq
, mbuf_svc_class_t sc
,
588 boolean_t drvmgt
, classq_pkt_type_t
*ptype
)
590 struct tb_regulator
*tbr
;
595 IFCQ_LOCK_ASSERT_HELD(ifq
);
597 VERIFY(!drvmgt
|| MBUF_VALID_SC(sc
));
598 VERIFY(IFCQ_TBR_IS_ENABLED(ifq
));
600 tbr
= &ifq
->ifcq_tbr
;
601 /* update token only when it is negative */
602 if (tbr
->tbr_token
<= 0) {
603 now
= read_machclk();
604 interval
= now
- tbr
->tbr_last
;
605 if (interval
>= tbr
->tbr_filluptime
) {
606 tbr
->tbr_token
= tbr
->tbr_depth
;
608 tbr
->tbr_token
+= interval
* tbr
->tbr_rate
;
609 if (tbr
->tbr_token
> tbr
->tbr_depth
) {
610 tbr
->tbr_token
= tbr
->tbr_depth
;
615 /* if token is still negative, don't allow dequeue */
616 if (tbr
->tbr_token
<= 0) {
621 * ifclassq takes precedence over ALTQ queue;
622 * ifcq_drain count is adjusted by the caller.
625 IFCQ_DEQUEUE_SC(ifq
, sc
, p
, ptype
);
627 IFCQ_DEQUEUE(ifq
, p
, ptype
);
633 tbr
->tbr_token
-= TBR_SCALE(m_pktlen((mbuf_t
)p
));
647 * set a token bucket regulator.
648 * if the specified rate is zero, the token bucket regulator is deleted.
651 ifclassq_tbr_set(struct ifclassq
*ifq
, struct tb_profile
*profile
,
654 struct tb_regulator
*tbr
;
655 struct ifnet
*ifp
= ifq
->ifcq_ifp
;
656 u_int64_t rate
, old_rate
;
658 IFCQ_LOCK_ASSERT_HELD(ifq
);
659 VERIFY(IFCQ_IS_READY(ifq
));
661 VERIFY(machclk_freq
!= 0);
663 tbr
= &ifq
->ifcq_tbr
;
664 old_rate
= tbr
->tbr_rate_raw
;
666 rate
= profile
->rate
;
667 if (profile
->percent
> 0) {
670 if (profile
->percent
> 100) {
673 if ((eff_rate
= ifp
->if_output_bw
.eff_bw
) == 0) {
676 rate
= (eff_rate
* profile
->percent
) / 100;
680 if (!IFCQ_TBR_IS_ENABLED(ifq
)) {
684 if (pktsched_verbose
) {
685 printf("%s: TBR disabled\n", if_name(ifp
));
688 /* disable this TBR */
689 ifq
->ifcq_flags
&= ~IFCQF_TBR
;
690 bzero(tbr
, sizeof(*tbr
));
691 ifnet_set_start_cycle(ifp
, NULL
);
693 ifclassq_update(ifq
, CLASSQ_EV_LINK_BANDWIDTH
);
698 if (pktsched_verbose
) {
699 printf("%s: TBR %s (rate %llu bps depth %u)\n", if_name(ifp
),
700 (ifq
->ifcq_flags
& IFCQF_TBR
) ? "reconfigured" :
701 "enabled", rate
, profile
->depth
);
704 /* set the new TBR */
705 bzero(tbr
, sizeof(*tbr
));
706 tbr
->tbr_rate_raw
= rate
;
707 tbr
->tbr_percent
= profile
->percent
;
708 ifq
->ifcq_flags
|= IFCQF_TBR
;
711 * Note that the TBR fill up time (hence the ifnet restart time)
712 * is directly related to the specified TBR depth. The ideal
713 * depth value should be computed such that the interval time
714 * between each successive wakeup is adequately spaced apart,
715 * in order to reduce scheduling overheads. A target interval
716 * of 10 ms seems to provide good performance balance. This can be
717 * overridden by specifying the depth profile. Values smaller than
718 * the ideal depth will reduce delay at the expense of CPU cycles.
720 tbr
->tbr_rate
= TBR_SCALE(rate
/ 8) / machclk_freq
;
721 if (tbr
->tbr_rate
> 0) {
722 u_int32_t mtu
= ifp
->if_mtu
;
723 int64_t ival
, idepth
= 0;
726 if (mtu
< IF_MINMTU
) {
730 ival
= pktsched_nsecs_to_abstime(10 * NSEC_PER_MSEC
); /* 10ms */
733 idepth
= TBR_SCALE(i
* mtu
);
734 if ((idepth
/ tbr
->tbr_rate
) > ival
) {
740 tbr
->tbr_depth
= TBR_SCALE(profile
->depth
);
741 if (tbr
->tbr_depth
== 0) {
742 tbr
->tbr_filluptime
= idepth
/ tbr
->tbr_rate
;
743 /* a little fudge factor to get closer to rate */
744 tbr
->tbr_depth
= idepth
+ (idepth
>> 3);
746 tbr
->tbr_filluptime
= tbr
->tbr_depth
/ tbr
->tbr_rate
;
749 tbr
->tbr_depth
= TBR_SCALE(profile
->depth
);
750 tbr
->tbr_filluptime
= 0xffffffffffffffffLL
;
752 tbr
->tbr_token
= tbr
->tbr_depth
;
753 tbr
->tbr_last
= read_machclk();
755 if (tbr
->tbr_rate
> 0 && (ifp
->if_flags
& IFF_UP
)) {
757 { 0, pktsched_abs_to_nsecs(tbr
->tbr_filluptime
) };
758 if (pktsched_verbose
) {
759 printf("%s: TBR calculated tokens %lld "
760 "filluptime %llu ns\n", if_name(ifp
),
761 TBR_UNSCALE(tbr
->tbr_token
),
762 pktsched_abs_to_nsecs(tbr
->tbr_filluptime
));
764 ifnet_set_start_cycle(ifp
, &ts
);
766 if (pktsched_verbose
) {
767 if (tbr
->tbr_rate
== 0) {
768 printf("%s: TBR calculated tokens %lld "
769 "infinite filluptime\n", if_name(ifp
),
770 TBR_UNSCALE(tbr
->tbr_token
));
771 } else if (!(ifp
->if_flags
& IFF_UP
)) {
772 printf("%s: TBR suspended (link is down)\n",
776 ifnet_set_start_cycle(ifp
, NULL
);
778 if (update
&& tbr
->tbr_rate_raw
!= old_rate
) {
779 ifclassq_update(ifq
, CLASSQ_EV_LINK_BANDWIDTH
);
786 ifclassq_calc_target_qdelay(struct ifnet
*ifp
, u_int64_t
*if_target_qdelay
)
788 u_int64_t qdelay
= 0;
789 qdelay
= IFCQ_TARGET_QDELAY(&ifp
->if_snd
);
791 if (ifclassq_target_qdelay
!= 0) {
792 qdelay
= ifclassq_target_qdelay
;
796 * If we do not know the effective bandwidth, use the default
797 * target queue delay.
800 qdelay
= IFQ_TARGET_DELAY
;
804 * If a delay has been added to ifnet start callback for
805 * coalescing, we have to add that to the pre-set target delay
806 * because the packets can be in the queue longer.
808 if ((ifp
->if_eflags
& IFEF_ENQUEUE_MULTI
) &&
809 ifp
->if_start_delay_timeout
> 0) {
810 qdelay
+= ifp
->if_start_delay_timeout
;
813 *(if_target_qdelay
) = qdelay
;
817 ifclassq_calc_update_interval(u_int64_t
*update_interval
)
821 /* If the system level override is set, use it */
822 if (ifclassq_update_interval
!= 0) {
823 uint
= ifclassq_update_interval
;
826 /* Otherwise use the default value */
828 uint
= IFQ_UPDATE_INTERVAL
;
831 *update_interval
= uint
;
835 ifclassq_reap_caches(boolean_t purge
)
837 fq_codel_reap_caches(purge
);