2 * Copyright (c) 2011-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
32 #include <sys/errno.h>
33 #include <sys/random.h>
34 #include <sys/kernel_types.h>
35 #include <sys/sysctl.h>
37 #include <kern/zalloc.h>
40 #include <net/net_osdep.h>
41 #include <net/classq/classq.h>
43 #include <net/classq/classq_red.h>
44 #endif /* CLASSQ_RED */
46 #include <net/classq/classq_rio.h>
47 #endif /* CLASSQ_RIO */
49 #include <net/classq/classq_blue.h>
50 #endif /* CLASSQ_BLUE */
51 #include <net/classq/classq_sfb.h>
52 #include <net/pktsched/pktsched.h>
54 #include <libkern/libkern.h>
57 #include <net/altq/altq.h>
60 static errno_t
ifclassq_dequeue_common(struct ifclassq
*, mbuf_svc_class_t
,
61 u_int32_t
, struct mbuf
**, struct mbuf
**, u_int32_t
*, u_int32_t
*,
63 static struct mbuf
*ifclassq_poll_common(struct ifclassq
*,
64 mbuf_svc_class_t
, boolean_t
);
65 static struct mbuf
*ifclassq_tbr_dequeue_common(struct ifclassq
*, int,
66 mbuf_svc_class_t
, boolean_t
);
71 _CASSERT(MBUF_TC_BE
== 0);
72 _CASSERT(MBUF_SC_BE
== 0);
73 _CASSERT(IFCQ_SC_MAX
== MBUF_SC_MAX_CLASSES
);
77 #endif /* CLASSQ_RED */
80 #endif /* CLASSQ_RIO */
83 #endif /* CLASSQ_BLUE */
88 ifclassq_setup(struct ifnet
*ifp
, u_int32_t sflags
, boolean_t reuse
)
91 struct ifclassq
*ifq
= &ifp
->if_snd
;
95 VERIFY(IFCQ_IS_EMPTY(ifq
));
99 bzero(&ifq
->ifcq_xmitcnt
, sizeof (ifq
->ifcq_xmitcnt
));
100 bzero(&ifq
->ifcq_dropcnt
, sizeof (ifq
->ifcq_dropcnt
));
102 VERIFY(!IFCQ_TBR_IS_ENABLED(ifq
));
103 VERIFY(ifq
->ifcq_type
== PKTSCHEDT_NONE
);
104 VERIFY(ifq
->ifcq_flags
== 0);
105 VERIFY(ifq
->ifcq_sflags
== 0);
106 VERIFY(ifq
->ifcq_disc
== NULL
);
107 VERIFY(ifq
->ifcq_enqueue
== NULL
);
108 VERIFY(ifq
->ifcq_dequeue
== NULL
);
109 VERIFY(ifq
->ifcq_dequeue_sc
== NULL
);
110 VERIFY(ifq
->ifcq_request
== NULL
);
112 if (ifp
->if_eflags
& IFEF_TXSTART
) {
113 u_int32_t maxlen
= 0;
115 if ((maxlen
= IFCQ_MAXLEN(ifq
)) == 0)
116 maxlen
= if_sndq_maxlen
;
117 IFCQ_SET_MAXLEN(ifq
, maxlen
);
119 if (IFCQ_MAXLEN(ifq
) != if_sndq_maxlen
&&
120 IFCQ_TARGET_QDELAY(ifq
) == 0) {
122 * Choose static queues because the interface has
123 * maximum queue size set
125 sflags
&= ~PKTSCHEDF_QALG_DELAYBASED
;
127 ifq
->ifcq_sflags
= sflags
;
128 err
= ifclassq_pktsched_setup(ifq
);
130 ifq
->ifcq_flags
= (IFCQF_READY
| IFCQF_ENABLED
);
135 IFCQ_ALTQ(ifq
)->altq_ifcq
= ifq
;
136 VERIFY(IFCQ_ALTQ(ifq
)->altq_type
== ALTQT_NONE
);
137 VERIFY(IFCQ_ALTQ(ifq
)->altq_flags
== 0);
138 VERIFY(IFCQ_ALTQ(ifq
)->altq_disc
== NULL
);
139 VERIFY(IFCQ_ALTQ(ifq
)->altq_enqueue
== NULL
);
140 VERIFY(IFCQ_ALTQ(ifq
)->altq_dequeue
== NULL
);
141 VERIFY(IFCQ_ALTQ(ifq
)->altq_dequeue_sc
== NULL
);
142 VERIFY(IFCQ_ALTQ(ifq
)->altq_request
== NULL
);
144 if ((ifp
->if_eflags
& IFEF_TXSTART
) &&
145 ifp
->if_output_sched_model
!= IFNET_SCHED_MODEL_DRIVER_MANAGED
)
146 ALTQ_SET_READY(IFCQ_ALTQ(ifq
));
148 ALTQ_CLEAR_READY(IFCQ_ALTQ(ifq
));
156 ifclassq_teardown(struct ifnet
*ifp
)
158 struct ifclassq
*ifq
= &ifp
->if_snd
;
162 if (ALTQ_IS_READY(IFCQ_ALTQ(ifq
))) {
163 if (ALTQ_IS_ENABLED(IFCQ_ALTQ(ifq
)))
164 altq_disable(IFCQ_ALTQ(ifq
));
165 if (ALTQ_IS_ATTACHED(IFCQ_ALTQ(ifq
)))
166 altq_detach(IFCQ_ALTQ(ifq
));
167 IFCQ_ALTQ(ifq
)->altq_flags
= 0;
170 IFCQ_ALTQ(ifq
)->altq_ifcq
= NULL
;
171 VERIFY(IFCQ_ALTQ(ifq
)->altq_type
== ALTQT_NONE
);
172 VERIFY(IFCQ_ALTQ(ifq
)->altq_flags
== 0);
173 VERIFY(IFCQ_ALTQ(ifq
)->altq_disc
== NULL
);
174 VERIFY(IFCQ_ALTQ(ifq
)->altq_enqueue
== NULL
);
175 VERIFY(IFCQ_ALTQ(ifq
)->altq_dequeue
== NULL
);
176 VERIFY(IFCQ_ALTQ(ifq
)->altq_dequeue_sc
== NULL
);
177 VERIFY(IFCQ_ALTQ(ifq
)->altq_request
== NULL
);
180 if (IFCQ_IS_READY(ifq
)) {
181 if (IFCQ_TBR_IS_ENABLED(ifq
)) {
182 struct tb_profile tb
= { 0, 0, 0 };
183 (void) ifclassq_tbr_set(ifq
, &tb
, FALSE
);
185 (void) pktsched_teardown(ifq
);
188 ifq
->ifcq_sflags
= 0;
190 VERIFY(IFCQ_IS_EMPTY(ifq
));
191 VERIFY(!IFCQ_TBR_IS_ENABLED(ifq
));
192 VERIFY(ifq
->ifcq_type
== PKTSCHEDT_NONE
);
193 VERIFY(ifq
->ifcq_flags
== 0);
194 VERIFY(ifq
->ifcq_sflags
== 0);
195 VERIFY(ifq
->ifcq_disc
== NULL
);
196 VERIFY(ifq
->ifcq_enqueue
== NULL
);
197 VERIFY(ifq
->ifcq_dequeue
== NULL
);
198 VERIFY(ifq
->ifcq_dequeue_sc
== NULL
);
199 VERIFY(ifq
->ifcq_request
== NULL
);
202 IFCQ_MAXLEN(ifq
) = 0;
203 bzero(&ifq
->ifcq_xmitcnt
, sizeof (ifq
->ifcq_xmitcnt
));
204 bzero(&ifq
->ifcq_dropcnt
, sizeof (ifq
->ifcq_dropcnt
));
210 ifclassq_pktsched_setup(struct ifclassq
*ifq
)
212 struct ifnet
*ifp
= ifq
->ifcq_ifp
;
215 IFCQ_LOCK_ASSERT_HELD(ifq
);
216 VERIFY(ifp
->if_eflags
& IFEF_TXSTART
);
218 switch (ifp
->if_output_sched_model
) {
219 case IFNET_SCHED_MODEL_DRIVER_MANAGED
:
220 err
= pktsched_setup(ifq
, PKTSCHEDT_TCQ
, ifq
->ifcq_sflags
);
223 case IFNET_SCHED_MODEL_NORMAL
:
224 err
= pktsched_setup(ifq
, PKTSCHEDT_QFQ
, ifq
->ifcq_sflags
);
236 ifclassq_set_maxlen(struct ifclassq
*ifq
, u_int32_t maxqlen
)
240 maxqlen
= if_sndq_maxlen
;
241 IFCQ_SET_MAXLEN(ifq
, maxqlen
);
246 ifclassq_get_maxlen(struct ifclassq
*ifq
)
248 return (IFCQ_MAXLEN(ifq
));
252 ifclassq_get_len(struct ifclassq
*ifq
, mbuf_svc_class_t sc
, u_int32_t
*packets
,
258 if (sc
== MBUF_SC_UNSPEC
) {
259 VERIFY(packets
!= NULL
);
260 *packets
= IFCQ_LEN(ifq
);
262 VERIFY(MBUF_VALID_SC(sc
));
263 VERIFY(packets
!= NULL
&& bytes
!= NULL
);
264 IFCQ_LEN_SC(ifq
, sc
, packets
, bytes
, err
);
272 ifclassq_enqueue(struct ifclassq
*ifq
, struct mbuf
*m
)
279 if (ALTQ_IS_ENABLED(IFCQ_ALTQ(ifq
))) {
280 ALTQ_ENQUEUE(IFCQ_ALTQ(ifq
), m
, err
);
282 u_int32_t qlen
= IFCQ_LEN(ifq
);
283 IFCQ_ENQUEUE(ifq
, m
, err
);
284 if (IFCQ_LEN(ifq
) > qlen
)
285 ifq
->ifcq_drain
+= (IFCQ_LEN(ifq
) - qlen
);
288 IFCQ_ENQUEUE(ifq
, m
, err
);
297 ifclassq_dequeue(struct ifclassq
*ifq
, u_int32_t limit
, struct mbuf
**head
,
298 struct mbuf
**tail
, u_int32_t
*cnt
, u_int32_t
*len
)
300 return (ifclassq_dequeue_common(ifq
, MBUF_SC_UNSPEC
, limit
, head
, tail
,
305 ifclassq_dequeue_sc(struct ifclassq
*ifq
, mbuf_svc_class_t sc
,
306 u_int32_t limit
, struct mbuf
**head
, struct mbuf
**tail
, u_int32_t
*cnt
,
309 return (ifclassq_dequeue_common(ifq
, sc
, limit
, head
, tail
,
314 ifclassq_dequeue_common(struct ifclassq
*ifq
, mbuf_svc_class_t sc
,
315 u_int32_t limit
, struct mbuf
**head
, struct mbuf
**tail
, u_int32_t
*cnt
,
316 u_int32_t
*len
, boolean_t drvmgt
)
318 struct ifnet
*ifp
= ifq
->ifcq_ifp
;
319 u_int32_t i
= 0, l
= 0;
320 struct mbuf
**first
, *last
;
322 struct ifaltq
*altq
= IFCQ_ALTQ(ifq
);
326 VERIFY(!drvmgt
|| MBUF_VALID_SC(sc
));
339 qlen
= IFCQ_LEN(ifq
);
340 draining
= IFCQ_IS_DRAINING(ifq
);
343 if (IFCQ_TBR_IS_ENABLED(ifq
))
344 IFCQ_TBR_DEQUEUE_SC(ifq
, sc
, *head
);
346 IFCQ_DEQUEUE_SC(ifq
, sc
, *head
);
347 else if (ALTQ_IS_ENABLED(altq
))
348 ALTQ_DEQUEUE_SC(altq
, sc
, *head
);
352 if (IFCQ_TBR_IS_ENABLED(ifq
))
353 IFCQ_TBR_DEQUEUE(ifq
, *head
);
355 IFCQ_DEQUEUE(ifq
, *head
);
356 else if (ALTQ_IS_ENABLED(altq
))
357 ALTQ_DEQUEUE(altq
, *head
);
362 if (draining
&& *head
!= NULL
) {
363 VERIFY(ifq
->ifcq_drain
>= (qlen
- IFCQ_LEN(ifq
)));
364 ifq
->ifcq_drain
-= (qlen
- IFCQ_LEN(ifq
));
366 #else /* ! PF_ALTQ */
368 if (IFCQ_TBR_IS_ENABLED(ifq
))
369 IFCQ_TBR_DEQUEUE_SC(ifq
, sc
, *head
);
371 IFCQ_DEQUEUE_SC(ifq
, sc
, *head
);
373 if (IFCQ_TBR_IS_ENABLED(ifq
))
374 IFCQ_TBR_DEQUEUE(ifq
, *head
);
376 IFCQ_DEQUEUE(ifq
, *head
);
378 #endif /* !PF_ALTQ */
383 (*head
)->m_nextpkt
= NULL
;
386 l
+= (*head
)->m_pkthdr
.len
;
389 (*head
)->m_pkthdr
.pkt_bwseq
=
390 atomic_add_64_ov(&(ifp
->if_bw
.cur_seq
), m_pktlen(*head
));
391 #endif /* MEASURE_BW */
392 if (IFNET_IS_CELLULAR(ifp
)) {
393 (*head
)->m_pkthdr
.pkt_flags
|= PKTF_VALID_UNSENT_DATA
;
394 (*head
)->m_pkthdr
.pkt_unsent_databytes
=
395 (total_snd_byte_count
<< MSIZESHIFT
) +
398 head
= &(*head
)->m_nextpkt
;
411 return ((*first
!= NULL
) ? 0 : EAGAIN
);
415 ifclassq_poll(struct ifclassq
*ifq
)
417 return (ifclassq_poll_common(ifq
, MBUF_SC_UNSPEC
, FALSE
));
421 ifclassq_poll_sc(struct ifclassq
*ifq
, mbuf_svc_class_t sc
)
423 return (ifclassq_poll_common(ifq
, sc
, TRUE
));
427 ifclassq_poll_common(struct ifclassq
*ifq
, mbuf_svc_class_t sc
,
431 struct ifaltq
*altq
= IFCQ_ALTQ(ifq
);
435 VERIFY(!drvmgt
|| MBUF_VALID_SC(sc
));
439 if (IFCQ_TBR_IS_ENABLED(ifq
))
440 IFCQ_TBR_POLL_SC(ifq
, sc
, m
);
441 else if (IFCQ_IS_DRAINING(ifq
))
442 IFCQ_POLL_SC(ifq
, sc
, m
);
443 else if (ALTQ_IS_ENABLED(altq
))
444 ALTQ_POLL_SC(altq
, sc
, m
);
448 if (IFCQ_TBR_IS_ENABLED(ifq
))
449 IFCQ_TBR_POLL(ifq
, m
);
450 else if (IFCQ_IS_DRAINING(ifq
))
452 else if (ALTQ_IS_ENABLED(altq
))
457 #else /* ! PF_ALTQ */
459 if (IFCQ_TBR_IS_ENABLED(ifq
))
460 IFCQ_TBR_POLL_SC(ifq
, sc
, m
);
462 IFCQ_POLL_SC(ifq
, sc
, m
);
464 if (IFCQ_TBR_IS_ENABLED(ifq
))
465 IFCQ_TBR_POLL(ifq
, m
);
469 #endif /* !PF_ALTQ */
475 ifclassq_update(struct ifclassq
*ifq
, cqev_t ev
)
477 IFCQ_LOCK_ASSERT_HELD(ifq
);
478 VERIFY(IFCQ_IS_READY(ifq
));
481 if (ALTQ_IS_ENABLED(IFCQ_ALTQ(ifq
)))
482 ALTQ_UPDATE(IFCQ_ALTQ(ifq
), ev
);
484 IFCQ_UPDATE(ifq
, ev
);
488 ifclassq_attach(struct ifclassq
*ifq
, u_int32_t type
, void *discipline
,
489 ifclassq_enq_func enqueue
, ifclassq_deq_func dequeue
,
490 ifclassq_deq_sc_func dequeue_sc
, ifclassq_req_func request
)
492 IFCQ_LOCK_ASSERT_HELD(ifq
);
494 VERIFY(ifq
->ifcq_disc
== NULL
);
495 VERIFY(enqueue
!= NULL
);
496 VERIFY(!(dequeue
!= NULL
&& dequeue_sc
!= NULL
));
497 VERIFY(request
!= NULL
);
499 ifq
->ifcq_type
= type
;
500 ifq
->ifcq_disc
= discipline
;
501 ifq
->ifcq_enqueue
= enqueue
;
502 ifq
->ifcq_dequeue
= dequeue
;
503 ifq
->ifcq_dequeue_sc
= dequeue_sc
;
504 ifq
->ifcq_request
= request
;
510 ifclassq_detach(struct ifclassq
*ifq
)
512 IFCQ_LOCK_ASSERT_HELD(ifq
);
514 VERIFY(ifq
->ifcq_disc
== NULL
);
516 ifq
->ifcq_type
= PKTSCHEDT_NONE
;
517 ifq
->ifcq_disc
= NULL
;
518 ifq
->ifcq_enqueue
= NULL
;
519 ifq
->ifcq_dequeue
= NULL
;
520 ifq
->ifcq_dequeue_sc
= NULL
;
521 ifq
->ifcq_request
= NULL
;
527 ifclassq_getqstats(struct ifclassq
*ifq
, u_int32_t qid
, void *ubuf
,
530 struct if_ifclassq_stats
*ifqs
;
533 if (*nbytes
< sizeof (*ifqs
))
536 ifqs
= _MALLOC(sizeof (*ifqs
), M_TEMP
, M_WAITOK
| M_ZERO
);
541 if (!IFCQ_IS_READY(ifq
)) {
547 ifqs
->ifqs_len
= IFCQ_LEN(ifq
);
548 ifqs
->ifqs_maxlen
= IFCQ_MAXLEN(ifq
);
549 *(&ifqs
->ifqs_xmitcnt
) = *(&ifq
->ifcq_xmitcnt
);
550 *(&ifqs
->ifqs_dropcnt
) = *(&ifq
->ifcq_dropcnt
);
551 ifqs
->ifqs_scheduler
= ifq
->ifcq_type
;
553 err
= pktsched_getqstats(ifq
, qid
, ifqs
);
556 if (err
== 0 && (err
= copyout((caddr_t
)ifqs
,
557 (user_addr_t
)(uintptr_t)ubuf
, sizeof (*ifqs
))) == 0)
558 *nbytes
= sizeof (*ifqs
);
566 ifclassq_ev2str(cqev_t ev
)
571 case CLASSQ_EV_LINK_BANDWIDTH
:
572 c
= "LINK_BANDWIDTH";
575 case CLASSQ_EV_LINK_LATENCY
:
579 case CLASSQ_EV_LINK_MTU
:
583 case CLASSQ_EV_LINK_UP
:
587 case CLASSQ_EV_LINK_DOWN
:
600 * internal representation of token bucket parameters
601 * rate: byte_per_unittime << 32
602 * (((bits_per_sec) / 8) << 32) / machclk_freq
607 #define TBR_SCALE(x) ((int64_t)(x) << TBR_SHIFT)
608 #define TBR_UNSCALE(x) ((x) >> TBR_SHIFT)
611 ifclassq_tbr_dequeue(struct ifclassq
*ifq
, int op
)
613 return (ifclassq_tbr_dequeue_common(ifq
, op
, MBUF_SC_UNSPEC
, FALSE
));
617 ifclassq_tbr_dequeue_sc(struct ifclassq
*ifq
, int op
, mbuf_svc_class_t sc
)
619 return (ifclassq_tbr_dequeue_common(ifq
, op
, sc
, TRUE
));
623 ifclassq_tbr_dequeue_common(struct ifclassq
*ifq
, int op
,
624 mbuf_svc_class_t sc
, boolean_t drvmgt
)
626 struct tb_regulator
*tbr
;
631 IFCQ_LOCK_ASSERT_HELD(ifq
);
633 VERIFY(!drvmgt
|| MBUF_VALID_SC(sc
));
634 VERIFY(IFCQ_TBR_IS_ENABLED(ifq
));
636 tbr
= &ifq
->ifcq_tbr
;
637 if (op
== CLASSQDQ_REMOVE
&& tbr
->tbr_lastop
== CLASSQDQ_POLL
) {
638 /* if this is a remove after poll, bypass tbr check */
640 /* update token only when it is negative */
641 if (tbr
->tbr_token
<= 0) {
642 now
= read_machclk();
643 interval
= now
- tbr
->tbr_last
;
644 if (interval
>= tbr
->tbr_filluptime
) {
645 tbr
->tbr_token
= tbr
->tbr_depth
;
647 tbr
->tbr_token
+= interval
* tbr
->tbr_rate
;
648 if (tbr
->tbr_token
> tbr
->tbr_depth
)
649 tbr
->tbr_token
= tbr
->tbr_depth
;
653 /* if token is still negative, don't allow dequeue */
654 if (tbr
->tbr_token
<= 0)
659 * ifclassq takes precedence over ALTQ queue;
660 * ifcq_drain count is adjusted by the caller.
663 if (IFCQ_IS_DRAINING(ifq
)) {
665 if (op
== CLASSQDQ_POLL
) {
667 IFCQ_POLL_SC(ifq
, sc
, m
);
672 IFCQ_DEQUEUE_SC(ifq
, sc
, m
);
674 IFCQ_DEQUEUE(ifq
, m
);
678 struct ifaltq
*altq
= IFCQ_ALTQ(ifq
);
679 if (ALTQ_IS_ENABLED(altq
)) {
681 m
= (*altq
->altq_dequeue_sc
)(altq
, sc
, op
);
683 m
= (*altq
->altq_dequeue
)(altq
, op
);
690 if (m
!= NULL
&& op
== CLASSQDQ_REMOVE
)
691 tbr
->tbr_token
-= TBR_SCALE(m_pktlen(m
));
692 tbr
->tbr_lastop
= op
;
698 * set a token bucket regulator.
699 * if the specified rate is zero, the token bucket regulator is deleted.
702 ifclassq_tbr_set(struct ifclassq
*ifq
, struct tb_profile
*profile
,
705 struct tb_regulator
*tbr
;
706 struct ifnet
*ifp
= ifq
->ifcq_ifp
;
707 u_int64_t rate
, old_rate
;
709 IFCQ_LOCK_ASSERT_HELD(ifq
);
710 VERIFY(IFCQ_IS_READY(ifq
));
712 VERIFY(machclk_freq
!= 0);
714 tbr
= &ifq
->ifcq_tbr
;
715 old_rate
= tbr
->tbr_rate_raw
;
717 rate
= profile
->rate
;
718 if (profile
->percent
> 0) {
721 if (profile
->percent
> 100)
723 if ((eff_rate
= ifp
->if_output_bw
.eff_bw
) == 0)
725 rate
= (eff_rate
* profile
->percent
) / 100;
729 if (!IFCQ_TBR_IS_ENABLED(ifq
))
732 if (pktsched_verbose
)
733 printf("%s: TBR disabled\n", if_name(ifp
));
735 /* disable this TBR */
736 ifq
->ifcq_flags
&= ~IFCQF_TBR
;
737 bzero(tbr
, sizeof (*tbr
));
738 ifnet_set_start_cycle(ifp
, NULL
);
740 ifclassq_update(ifq
, CLASSQ_EV_LINK_BANDWIDTH
);
744 if (pktsched_verbose
) {
745 printf("%s: TBR %s (rate %llu bps depth %u)\n", if_name(ifp
),
746 (ifq
->ifcq_flags
& IFCQF_TBR
) ? "reconfigured" :
747 "enabled", rate
, profile
->depth
);
750 /* set the new TBR */
751 bzero(tbr
, sizeof (*tbr
));
752 tbr
->tbr_rate_raw
= rate
;
753 tbr
->tbr_percent
= profile
->percent
;
754 ifq
->ifcq_flags
|= IFCQF_TBR
;
757 * Note that the TBR fill up time (hence the ifnet restart time)
758 * is directly related to the specified TBR depth. The ideal
759 * depth value should be computed such that the interval time
760 * between each successive wakeup is adequately spaced apart,
761 * in order to reduce scheduling overheads. A target interval
762 * of 10 ms seems to provide good performance balance. This can be
763 * overridden by specifying the depth profile. Values smaller than
764 * the ideal depth will reduce delay at the expense of CPU cycles.
766 tbr
->tbr_rate
= TBR_SCALE(rate
/ 8) / machclk_freq
;
767 if (tbr
->tbr_rate
> 0) {
768 u_int32_t mtu
= ifp
->if_mtu
;
769 int64_t ival
, idepth
= 0;
775 ival
= pktsched_nsecs_to_abstime(10 * NSEC_PER_MSEC
); /* 10ms */
778 idepth
= TBR_SCALE(i
* mtu
);
779 if ((idepth
/ tbr
->tbr_rate
) > ival
)
784 tbr
->tbr_depth
= TBR_SCALE(profile
->depth
);
785 if (tbr
->tbr_depth
== 0) {
786 tbr
->tbr_filluptime
= idepth
/ tbr
->tbr_rate
;
787 /* a little fudge factor to get closer to rate */
788 tbr
->tbr_depth
= idepth
+ (idepth
>> 3);
790 tbr
->tbr_filluptime
= tbr
->tbr_depth
/ tbr
->tbr_rate
;
793 tbr
->tbr_depth
= TBR_SCALE(profile
->depth
);
794 tbr
->tbr_filluptime
= 0xffffffffffffffffLL
;
796 tbr
->tbr_token
= tbr
->tbr_depth
;
797 tbr
->tbr_last
= read_machclk();
798 tbr
->tbr_lastop
= CLASSQDQ_REMOVE
;
800 if (tbr
->tbr_rate
> 0 && (ifp
->if_flags
& IFF_UP
)) {
802 { 0, pktsched_abs_to_nsecs(tbr
->tbr_filluptime
) };
803 if (pktsched_verbose
) {
804 printf("%s: TBR calculated tokens %lld "
805 "filluptime %llu ns\n", if_name(ifp
),
806 TBR_UNSCALE(tbr
->tbr_token
),
807 pktsched_abs_to_nsecs(tbr
->tbr_filluptime
));
809 ifnet_set_start_cycle(ifp
, &ts
);
811 if (pktsched_verbose
) {
812 if (tbr
->tbr_rate
== 0) {
813 printf("%s: TBR calculated tokens %lld "
814 "infinite filluptime\n", if_name(ifp
),
815 TBR_UNSCALE(tbr
->tbr_token
));
816 } else if (!(ifp
->if_flags
& IFF_UP
)) {
817 printf("%s: TBR suspended (link is down)\n",
821 ifnet_set_start_cycle(ifp
, NULL
);
823 if (update
&& tbr
->tbr_rate_raw
!= old_rate
)
824 ifclassq_update(ifq
, CLASSQ_EV_LINK_BANDWIDTH
);