2 * Copyright (c) 2016-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <kern/zalloc.h>
32 #include <net/if_var.h>
34 #include <net/classq/classq.h>
35 #include <net/classq/classq_fq_codel.h>
36 #include <net/pktsched/pktsched_fq_codel.h>
38 static size_t fq_if_size
;
39 static struct zone
*fq_if_zone
;
41 static fq_if_t
*fq_if_alloc(struct ifnet
*, classq_pkt_type_t
);
42 static void fq_if_destroy(fq_if_t
*fqs
);
43 static void fq_if_classq_init(fq_if_t
*fqs
, u_int32_t priority
,
44 u_int32_t quantum
, u_int32_t drr_max
, u_int32_t svc_class
);
45 static int fq_if_enqueue_classq(struct ifclassq
*, classq_pkt_t
*, boolean_t
*);
46 static void fq_if_dequeue_classq(struct ifclassq
*, classq_pkt_t
*);
47 static int fq_if_dequeue_classq_multi(struct ifclassq
*, u_int32_t
,
48 u_int32_t
, classq_pkt_t
*, classq_pkt_t
*, u_int32_t
*, u_int32_t
*);
49 static void fq_if_dequeue_sc_classq(struct ifclassq
*, mbuf_svc_class_t
,
51 static int fq_if_dequeue_sc_classq_multi(struct ifclassq
*,
52 mbuf_svc_class_t
, u_int32_t
, u_int32_t
, classq_pkt_t
*,
53 classq_pkt_t
*, u_int32_t
*, u_int32_t
*);
54 static void fq_if_dequeue(fq_if_t
*, fq_if_classq_t
*, u_int32_t
,
55 u_int32_t
, classq_pkt_t
*, classq_pkt_t
*, u_int32_t
*,
56 u_int32_t
*, boolean_t drvmgmt
);
57 static int fq_if_request_classq(struct ifclassq
*ifq
, cqrq_t op
, void *arg
);
58 void fq_if_stat_sc(fq_if_t
*fqs
, cqrq_stat_sc_t
*stat
);
59 static void fq_if_purge(fq_if_t
*);
60 static void fq_if_purge_classq(fq_if_t
*, fq_if_classq_t
*);
61 static void fq_if_purge_flow(fq_if_t
*, fq_t
*, u_int32_t
*, u_int32_t
*);
62 static void fq_if_empty_new_flow(fq_t
*fq
, fq_if_classq_t
*fq_cl
,
64 static void fq_if_empty_old_flow(fq_if_t
*fqs
, fq_if_classq_t
*fq_cl
,
65 fq_t
*fq
, bool remove_hash
);
67 #define FQ_IF_ZONE_MAX 32 /* Maximum elements in zone */
68 #define FQ_IF_ZONE_NAME "pktsched_fq_if" /* zone for fq_if class */
70 #define FQ_IF_FLOW_HASH_ID(_flowid_) \
71 (((_flowid_) >> FQ_IF_HASH_TAG_SHIFT) & FQ_IF_HASH_TAG_MASK)
73 #define FQ_IF_CLASSQ_IDLE(_fcl_) \
74 (STAILQ_EMPTY(&(_fcl_)->fcl_new_flows) && \
75 STAILQ_EMPTY(&(_fcl_)->fcl_old_flows))
77 typedef void (* fq_if_append_pkt_t
)(classq_pkt_t
*, classq_pkt_t
*);
78 typedef boolean_t (* fq_getq_flow_t
)(fq_if_t
*, fq_if_classq_t
*, fq_t
*,
79 u_int32_t
, u_int32_t
, classq_pkt_t
*, classq_pkt_t
*, u_int32_t
*,
80 u_int32_t
*, boolean_t
*, u_int32_t
);
83 fq_if_append_mbuf(classq_pkt_t
*pkt
, classq_pkt_t
*next_pkt
)
85 pkt
->cp_mbuf
->m_nextpkt
= next_pkt
->cp_mbuf
;
91 fq_getq_flow_mbuf(fq_if_t
*fqs
, fq_if_classq_t
*fq_cl
, fq_t
*fq
,
92 u_int32_t byte_limit
, u_int32_t pkt_limit
, classq_pkt_t
*top
,
93 classq_pkt_t
*last
, u_int32_t
*byte_cnt
, u_int32_t
*pkt_cnt
,
94 boolean_t
*qempty
, u_int32_t pflags
)
98 boolean_t limit_reached
= FALSE
;
99 struct ifclassq
*ifq
= fqs
->fqs_ifq
;
100 struct ifnet
*ifp
= ifq
->ifcq_ifp
;
102 while (fq
->fq_deficit
> 0 && limit_reached
== FALSE
&&
103 !MBUFQ_EMPTY(&fq
->fq_mbufq
)) {
104 _PKTSCHED_PKT_INIT(&pkt
);
105 fq_getq_flow(fqs
, fq
, &pkt
);
106 ASSERT(pkt
.pktsched_ptype
== QP_MBUF
);
108 plen
= pktsched_get_pkt_len(&pkt
);
109 fq
->fq_deficit
-= plen
;
110 pkt
.pktsched_pkt_mbuf
->m_pkthdr
.pkt_flags
|= pflags
;
112 if (top
->cp_mbuf
== NULL
) {
113 *top
= pkt
.pktsched_pkt
;
115 ASSERT(last
->cp_mbuf
!= NULL
);
116 ASSERT(last
->cp_mbuf
->m_nextpkt
== NULL
);
117 last
->cp_mbuf
->m_nextpkt
= pkt
.pktsched_pkt_mbuf
;
119 *last
= pkt
.pktsched_pkt
;
120 last
->cp_mbuf
->m_nextpkt
= NULL
;
121 fq_cl
->fcl_stat
.fcl_dequeue
++;
122 fq_cl
->fcl_stat
.fcl_dequeue_bytes
+= plen
;
126 ifclassq_set_packet_metadata(ifq
, ifp
, &pkt
.pktsched_pkt
);
128 /* Check if the limit is reached */
129 if (*pkt_cnt
>= pkt_limit
|| *byte_cnt
>= byte_limit
) {
130 limit_reached
= TRUE
;
134 *qempty
= MBUFQ_EMPTY(&fq
->fq_mbufq
);
135 return limit_reached
;
139 fq_codel_scheduler_init(void)
141 /* Initialize the zone for flow queue structures */
144 fq_if_size
= sizeof(fq_if_t
);
145 fq_if_zone
= zinit(fq_if_size
, (FQ_IF_ZONE_MAX
* fq_if_size
), 0,
147 if (fq_if_zone
== NULL
) {
148 panic("%s: failed allocating from %s", __func__
,
151 zone_change(fq_if_zone
, Z_EXPAND
, TRUE
);
152 zone_change(fq_if_zone
, Z_CALLERACCT
, TRUE
);
156 fq_if_alloc(struct ifnet
*ifp
, classq_pkt_type_t ptype
)
159 fqs
= zalloc(fq_if_zone
);
164 bzero(fqs
, fq_if_size
);
165 fqs
->fqs_ifq
= &ifp
->if_snd
;
166 fqs
->fqs_ptype
= ptype
;
168 /* Calculate target queue delay */
169 ifclassq_calc_target_qdelay(ifp
, &fqs
->fqs_target_qdelay
);
171 /* Calculate update interval */
172 ifclassq_calc_update_interval(&fqs
->fqs_update_interval
);
174 /* Configure packet drop limit across all queues */
175 fqs
->fqs_pkt_droplimit
= IFCQ_PKT_DROP_LIMIT(&ifp
->if_snd
);
176 STAILQ_INIT(&fqs
->fqs_fclist
);
181 fq_if_destroy(fq_if_t
*fqs
)
185 zfree(fq_if_zone
, fqs
);
188 static inline u_int32_t
189 fq_if_service_to_priority(fq_if_t
*fqs
, mbuf_svc_class_t svc
)
193 if (fqs
->fqs_flags
& FQS_DRIVER_MANAGED
) {
197 pri
= FQ_IF_BK_INDEX
;
202 pri
= FQ_IF_BE_INDEX
;
208 pri
= FQ_IF_VI_INDEX
;
212 pri
= FQ_IF_VO_INDEX
;
215 pri
= FQ_IF_BE_INDEX
; /* Use best effort by default */
221 /* scheduler is not managed by the driver */
224 pri
= FQ_IF_BK_SYS_INDEX
;
227 pri
= FQ_IF_BK_INDEX
;
230 pri
= FQ_IF_BE_INDEX
;
233 pri
= FQ_IF_RD_INDEX
;
236 pri
= FQ_IF_OAM_INDEX
;
239 pri
= FQ_IF_AV_INDEX
;
242 pri
= FQ_IF_RV_INDEX
;
245 pri
= FQ_IF_VI_INDEX
;
248 pri
= FQ_IF_SIG_INDEX
;
251 pri
= FQ_IF_VO_INDEX
;
254 pri
= FQ_IF_CTL_INDEX
;
257 pri
= FQ_IF_BE_INDEX
; /* Use best effort by default */
264 fq_if_classq_init(fq_if_t
*fqs
, u_int32_t pri
, u_int32_t quantum
,
265 u_int32_t drr_max
, u_int32_t svc_class
)
267 fq_if_classq_t
*fq_cl
;
268 VERIFY(pri
< FQ_IF_MAX_CLASSES
);
269 fq_cl
= &fqs
->fqs_classq
[pri
];
271 VERIFY(fq_cl
->fcl_quantum
== 0);
272 fq_cl
->fcl_quantum
= quantum
;
273 fq_cl
->fcl_pri
= pri
;
274 fq_cl
->fcl_drr_max
= drr_max
;
275 fq_cl
->fcl_service_class
= svc_class
;
276 STAILQ_INIT(&fq_cl
->fcl_new_flows
);
277 STAILQ_INIT(&fq_cl
->fcl_old_flows
);
281 fq_if_enqueue_classq(struct ifclassq
*ifq
, classq_pkt_t
*p
, boolean_t
*pdrop
)
285 fq_if_classq_t
*fq_cl
;
287 mbuf_svc_class_t svc
;
290 IFCQ_LOCK_ASSERT_HELD(ifq
);
291 if ((p
->cp_ptype
== QP_MBUF
) && !(p
->cp_mbuf
->m_flags
& M_PKTHDR
)) {
292 IFCQ_CONVERT_LOCK(ifq
);
294 *p
= CLASSQ_PKT_INITIALIZER(*p
);
298 pktsched_pkt_encap(&pkt
, p
);
300 fqs
= (fq_if_t
*)ifq
->ifcq_disc
;
301 svc
= pktsched_get_pkt_svc(&pkt
);
302 pri
= fq_if_service_to_priority(fqs
, svc
);
303 VERIFY(pri
< FQ_IF_MAX_CLASSES
);
304 fq_cl
= &fqs
->fqs_classq
[pri
];
306 if (svc
== MBUF_SC_BK_SYS
&& fqs
->fqs_throttle
== 1) {
307 /* BK_SYS is currently throttled */
308 fq_cl
->fcl_stat
.fcl_throttle_drops
++;
309 IFCQ_CONVERT_LOCK(ifq
);
310 pktsched_free_pkt(&pkt
);
315 len
= pktsched_get_pkt_len(&pkt
);
316 ret
= fq_addq(fqs
, &pkt
, fq_cl
);
317 if (!(fqs
->fqs_flags
& FQS_DRIVER_MANAGED
) &&
318 !FQ_IF_CLASSQ_IDLE(fq_cl
)) {
319 if (((fqs
->fqs_bitmaps
[FQ_IF_ER
] | fqs
->fqs_bitmaps
[FQ_IF_EB
]) &
322 * this group is not in ER or EB groups,
325 pktsched_bit_set(pri
, &fqs
->fqs_bitmaps
[FQ_IF_IB
]);
330 if (ret
== CLASSQEQ_SUCCESS_FC
) {
331 /* packet enqueued, return advisory feedback */
336 VERIFY(ret
== CLASSQEQ_DROP
||
337 ret
== CLASSQEQ_DROP_FC
||
338 ret
== CLASSQEQ_DROP_SP
);
339 pktsched_free_pkt(&pkt
);
343 case CLASSQEQ_DROP_FC
:
345 case CLASSQEQ_DROP_SP
:
353 IFCQ_INC_BYTES(ifq
, len
);
358 fq_if_dequeue_classq(struct ifclassq
*ifq
, classq_pkt_t
*pkt
)
360 (void) fq_if_dequeue_classq_multi(ifq
, 1,
361 CLASSQ_DEQUEUE_MAX_BYTE_LIMIT
, pkt
, NULL
, NULL
, NULL
);
365 fq_if_dequeue_sc_classq(struct ifclassq
*ifq
, mbuf_svc_class_t svc
,
368 fq_if_t
*fqs
= (fq_if_t
*)ifq
->ifcq_disc
;
369 fq_if_classq_t
*fq_cl
;
372 pri
= fq_if_service_to_priority(fqs
, svc
);
373 fq_cl
= &fqs
->fqs_classq
[pri
];
375 fq_if_dequeue(fqs
, fq_cl
, 1, CLASSQ_DEQUEUE_MAX_BYTE_LIMIT
,
376 pkt
, NULL
, NULL
, NULL
, TRUE
);
380 fq_if_dequeue_classq_multi(struct ifclassq
*ifq
, u_int32_t maxpktcnt
,
381 u_int32_t maxbytecnt
, classq_pkt_t
*first_packet
,
382 classq_pkt_t
*last_packet
, u_int32_t
*retpktcnt
,
383 u_int32_t
*retbytecnt
)
385 u_int32_t pktcnt
= 0, bytecnt
= 0, total_pktcnt
= 0, total_bytecnt
= 0;
386 classq_pkt_t first
= CLASSQ_PKT_INITIALIZER(fisrt
);
387 classq_pkt_t last
= CLASSQ_PKT_INITIALIZER(last
);
388 classq_pkt_t tmp
= CLASSQ_PKT_INITIALIZER(tmp
);
389 fq_if_append_pkt_t append_pkt
;
390 fq_if_classq_t
*fq_cl
;
394 IFCQ_LOCK_ASSERT_HELD(ifq
);
396 fqs
= (fq_if_t
*)ifq
->ifcq_disc
;
398 switch (fqs
->fqs_ptype
) {
400 append_pkt
= fq_if_append_mbuf
;
407 __builtin_unreachable();
411 classq_pkt_t top
= CLASSQ_PKT_INITIALIZER(top
);
412 classq_pkt_t tail
= CLASSQ_PKT_INITIALIZER(tail
);
414 if (fqs
->fqs_bitmaps
[FQ_IF_ER
] == 0 &&
415 fqs
->fqs_bitmaps
[FQ_IF_EB
] == 0) {
416 fqs
->fqs_bitmaps
[FQ_IF_EB
] = fqs
->fqs_bitmaps
[FQ_IF_IB
];
417 fqs
->fqs_bitmaps
[FQ_IF_IB
] = 0;
418 if (fqs
->fqs_bitmaps
[FQ_IF_EB
] == 0) {
422 pri
= pktsched_ffs(fqs
->fqs_bitmaps
[FQ_IF_ER
]);
425 * There are no ER flows, move the highest
426 * priority one from EB if there are any in that
429 pri
= pktsched_ffs(fqs
->fqs_bitmaps
[FQ_IF_EB
]);
431 pktsched_bit_clr((pri
- 1),
432 &fqs
->fqs_bitmaps
[FQ_IF_EB
]);
433 pktsched_bit_set((pri
- 1),
434 &fqs
->fqs_bitmaps
[FQ_IF_ER
]);
436 pri
--; /* index starts at 0 */
437 fq_cl
= &fqs
->fqs_classq
[pri
];
439 if (fq_cl
->fcl_budget
<= 0) {
440 /* Update the budget */
441 fq_cl
->fcl_budget
+= (min(fq_cl
->fcl_drr_max
,
442 fq_cl
->fcl_stat
.fcl_flows_cnt
) *
444 if (fq_cl
->fcl_budget
<= 0) {
448 fq_if_dequeue(fqs
, fq_cl
, (maxpktcnt
- total_pktcnt
),
449 (maxbytecnt
- total_bytecnt
), &top
, &tail
, &pktcnt
,
451 if (top
.cp_mbuf
!= NULL
) {
452 ASSERT(pktcnt
> 0 && bytecnt
> 0);
453 if (first
.cp_mbuf
== NULL
) {
455 total_pktcnt
= pktcnt
;
456 total_bytecnt
= bytecnt
;
458 ASSERT(last
.cp_mbuf
!= NULL
);
459 append_pkt(&last
, &top
);
460 total_pktcnt
+= pktcnt
;
461 total_bytecnt
+= bytecnt
;
464 append_pkt(&last
, &tmp
);
465 fq_cl
->fcl_budget
-= bytecnt
;
471 * If the class has exceeded the budget but still has data
472 * to send, move it to IB
475 if (!FQ_IF_CLASSQ_IDLE(fq_cl
)) {
476 if (fq_cl
->fcl_budget
<= 0) {
477 pktsched_bit_set(pri
,
478 &fqs
->fqs_bitmaps
[FQ_IF_IB
]);
479 pktsched_bit_clr(pri
,
480 &fqs
->fqs_bitmaps
[FQ_IF_ER
]);
483 pktsched_bit_clr(pri
, &fqs
->fqs_bitmaps
[FQ_IF_ER
]);
484 VERIFY(((fqs
->fqs_bitmaps
[FQ_IF_ER
] |
485 fqs
->fqs_bitmaps
[FQ_IF_EB
] |
486 fqs
->fqs_bitmaps
[FQ_IF_IB
]) & (1 << pri
)) == 0);
487 fq_cl
->fcl_budget
= 0;
489 if (total_pktcnt
>= maxpktcnt
|| total_bytecnt
>= maxbytecnt
) {
494 if (__probable(first_packet
!= NULL
)) {
495 *first_packet
= first
;
497 if (last_packet
!= NULL
) {
500 if (retpktcnt
!= NULL
) {
501 *retpktcnt
= total_pktcnt
;
503 if (retbytecnt
!= NULL
) {
504 *retbytecnt
= total_bytecnt
;
507 IFCQ_XMIT_ADD(ifq
, total_pktcnt
, total_bytecnt
);
512 fq_if_dequeue_sc_classq_multi(struct ifclassq
*ifq
, mbuf_svc_class_t svc
,
513 u_int32_t maxpktcnt
, u_int32_t maxbytecnt
, classq_pkt_t
*first_packet
,
514 classq_pkt_t
*last_packet
, u_int32_t
*retpktcnt
, u_int32_t
*retbytecnt
)
516 fq_if_t
*fqs
= (fq_if_t
*)ifq
->ifcq_disc
;
518 u_int32_t total_pktcnt
= 0, total_bytecnt
= 0;
519 fq_if_classq_t
*fq_cl
;
520 classq_pkt_t first
= CLASSQ_PKT_INITIALIZER(fisrt
);
521 classq_pkt_t last
= CLASSQ_PKT_INITIALIZER(last
);
522 fq_if_append_pkt_t append_pkt
;
524 switch (fqs
->fqs_ptype
) {
526 append_pkt
= fq_if_append_mbuf
;
533 __builtin_unreachable();
536 pri
= fq_if_service_to_priority(fqs
, svc
);
537 fq_cl
= &fqs
->fqs_classq
[pri
];
539 * Now we have the queue for a particular service class. We need
540 * to dequeue as many packets as needed, first from the new flows
541 * and then from the old flows.
543 while (total_pktcnt
< maxpktcnt
&& total_bytecnt
< maxbytecnt
&&
544 fq_cl
->fcl_stat
.fcl_pkt_cnt
> 0) {
545 classq_pkt_t top
= CLASSQ_PKT_INITIALIZER(top
);
546 classq_pkt_t tail
= CLASSQ_PKT_INITIALIZER(tail
);
547 u_int32_t pktcnt
= 0, bytecnt
= 0;
549 fq_if_dequeue(fqs
, fq_cl
, (maxpktcnt
- total_pktcnt
),
550 (maxbytecnt
- total_bytecnt
), &top
, &tail
, &pktcnt
,
552 if (top
.cp_mbuf
!= NULL
) {
553 if (first
.cp_mbuf
== NULL
) {
555 total_pktcnt
= pktcnt
;
556 total_bytecnt
= bytecnt
;
558 ASSERT(last
.cp_mbuf
!= NULL
);
559 append_pkt(&last
, &top
);
560 total_pktcnt
+= pktcnt
;
561 total_bytecnt
+= bytecnt
;
567 if (__probable(first_packet
!= NULL
)) {
568 *first_packet
= first
;
570 if (last_packet
!= NULL
) {
573 if (retpktcnt
!= NULL
) {
574 *retpktcnt
= total_pktcnt
;
576 if (retbytecnt
!= NULL
) {
577 *retbytecnt
= total_bytecnt
;
584 fq_if_purge_flow(fq_if_t
*fqs
, fq_t
*fq
, u_int32_t
*pktsp
,
587 fq_if_classq_t
*fq_cl
;
588 u_int32_t pkts
, bytes
;
591 fq_cl
= &fqs
->fqs_classq
[fq
->fq_sc_index
];
593 _PKTSCHED_PKT_INIT(&pkt
);
595 fq_getq_flow(fqs
, fq
, &pkt
);
596 if (pkt
.pktsched_pkt_mbuf
== NULL
) {
597 VERIFY(pkt
.pktsched_ptype
== QP_INVALID
);
601 bytes
+= pktsched_get_pkt_len(&pkt
);
602 pktsched_free_pkt(&pkt
);
604 IFCQ_DROP_ADD(fqs
->fqs_ifq
, pkts
, bytes
);
606 if (fq
->fq_flags
& FQF_NEW_FLOW
) {
607 fq_if_empty_new_flow(fq
, fq_cl
, false);
608 } else if (fq
->fq_flags
& FQF_OLD_FLOW
) {
609 fq_if_empty_old_flow(fqs
, fq_cl
, fq
, false);
612 fq_if_destroy_flow(fqs
, fq_cl
, fq
);
614 if (FQ_IF_CLASSQ_IDLE(fq_cl
)) {
616 for (i
= FQ_IF_ER
; i
< FQ_IF_MAX_STATE
; i
++) {
617 pktsched_bit_clr(fq_cl
->fcl_pri
,
618 &fqs
->fqs_bitmaps
[i
]);
624 if (bytesp
!= NULL
) {
630 fq_if_purge_classq(fq_if_t
*fqs
, fq_if_classq_t
*fq_cl
)
634 * Take each flow from new/old flow list and flush mbufs
637 STAILQ_FOREACH_SAFE(fq
, &fq_cl
->fcl_new_flows
, fq_actlink
, tfq
) {
638 fq_if_purge_flow(fqs
, fq
, NULL
, NULL
);
640 STAILQ_FOREACH_SAFE(fq
, &fq_cl
->fcl_old_flows
, fq_actlink
, tfq
) {
641 fq_if_purge_flow(fqs
, fq
, NULL
, NULL
);
643 VERIFY(STAILQ_EMPTY(&fq_cl
->fcl_new_flows
));
644 VERIFY(STAILQ_EMPTY(&fq_cl
->fcl_old_flows
));
646 STAILQ_INIT(&fq_cl
->fcl_new_flows
);
647 STAILQ_INIT(&fq_cl
->fcl_old_flows
);
648 fq_cl
->fcl_budget
= 0;
652 fq_if_purge(fq_if_t
*fqs
)
656 IFCQ_CONVERT_LOCK(fqs
->fqs_ifq
);
657 for (i
= 0; i
< FQ_IF_MAX_CLASSES
; i
++) {
658 fq_if_purge_classq(fqs
, &fqs
->fqs_classq
[i
]);
661 VERIFY(STAILQ_EMPTY(&fqs
->fqs_fclist
));
663 fqs
->fqs_large_flow
= NULL
;
664 for (i
= 0; i
< FQ_IF_HASH_TABLE_SIZE
; i
++) {
665 VERIFY(SLIST_EMPTY(&fqs
->fqs_flows
[i
]));
668 bzero(&fqs
->fqs_bitmaps
, sizeof(fqs
->fqs_bitmaps
));
670 IFCQ_LEN(fqs
->fqs_ifq
) = 0;
671 IFCQ_BYTES(fqs
->fqs_ifq
) = 0;
675 fq_if_purge_sc(fq_if_t
*fqs
, cqrq_purge_sc_t
*req
)
679 IFCQ_LOCK_ASSERT_HELD(fqs
->fqs_ifq
);
680 req
->packets
= req
->bytes
= 0;
681 VERIFY(req
->flow
!= 0);
683 /* packet type is needed only if we want to create a flow queue */
684 fq
= fq_if_hash_pkt(fqs
, req
->flow
, req
->sc
, 0, FALSE
, QP_INVALID
);
687 fq_if_purge_flow(fqs
, fq
, &req
->packets
, &req
->bytes
);
692 fq_if_event(fq_if_t
*fqs
, cqev_t ev
)
694 IFCQ_LOCK_ASSERT_HELD(fqs
->fqs_ifq
);
697 case CLASSQ_EV_LINK_UP
:
698 case CLASSQ_EV_LINK_DOWN
:
707 fq_if_classq_suspend(fq_if_t
*fqs
, fq_if_classq_t
*fq_cl
)
709 fq_if_purge_classq(fqs
, fq_cl
);
710 fqs
->fqs_throttle
= 1;
711 fq_cl
->fcl_stat
.fcl_throttle_on
++;
715 fq_if_classq_resume(fq_if_t
*fqs
, fq_if_classq_t
*fq_cl
)
717 VERIFY(FQ_IF_CLASSQ_IDLE(fq_cl
));
718 fqs
->fqs_throttle
= 0;
719 fq_cl
->fcl_stat
.fcl_throttle_off
++;
724 fq_if_throttle(fq_if_t
*fqs
, cqrq_throttle_t
*tr
)
726 struct ifclassq
*ifq
= fqs
->fqs_ifq
;
731 IFCQ_LOCK_ASSERT_HELD(ifq
);
734 tr
->level
= fqs
->fqs_throttle
;
738 if (tr
->level
== fqs
->fqs_throttle
) {
742 /* Throttling is allowed on BK_SYS class only */
743 index
= fq_if_service_to_priority(fqs
, MBUF_SC_BK_SYS
);
745 case IFNET_THROTTLE_OFF
:
746 fq_if_classq_resume(fqs
, &fqs
->fqs_classq
[index
]);
748 case IFNET_THROTTLE_OPPORTUNISTIC
:
749 fq_if_classq_suspend(fqs
, &fqs
->fqs_classq
[index
]);
758 fq_if_stat_sc(fq_if_t
*fqs
, cqrq_stat_sc_t
*stat
)
761 fq_if_classq_t
*fq_cl
;
767 pri
= fq_if_service_to_priority(fqs
, stat
->sc
);
768 fq_cl
= &fqs
->fqs_classq
[pri
];
769 stat
->packets
= fq_cl
->fcl_stat
.fcl_pkt_cnt
;
770 stat
->bytes
= fq_cl
->fcl_stat
.fcl_byte_cnt
;
774 fq_if_request_classq(struct ifclassq
*ifq
, cqrq_t rq
, void *arg
)
777 fq_if_t
*fqs
= (fq_if_t
*)ifq
->ifcq_disc
;
779 IFCQ_LOCK_ASSERT_HELD(ifq
);
782 * These are usually slow operations, convert the lock ahead of time
784 IFCQ_CONVERT_LOCK(fqs
->fqs_ifq
);
789 case CLASSQRQ_PURGE_SC
:
790 fq_if_purge_sc(fqs
, (cqrq_purge_sc_t
*)arg
);
793 fq_if_event(fqs
, (cqev_t
)arg
);
795 case CLASSQRQ_THROTTLE
:
796 fq_if_throttle(fqs
, (cqrq_throttle_t
*)arg
);
798 case CLASSQRQ_STAT_SC
:
799 fq_if_stat_sc(fqs
, (cqrq_stat_sc_t
*)arg
);
806 fq_if_setup_ifclassq(struct ifclassq
*ifq
, u_int32_t flags
,
807 classq_pkt_type_t ptype
)
809 #pragma unused(flags)
810 struct ifnet
*ifp
= ifq
->ifcq_ifp
;
814 IFCQ_LOCK_ASSERT_HELD(ifq
);
815 VERIFY(ifq
->ifcq_disc
== NULL
);
816 VERIFY(ifq
->ifcq_type
== PKTSCHEDT_NONE
);
818 fqs
= fq_if_alloc(ifp
, ptype
);
823 if (flags
& PKTSCHEDF_QALG_DRIVER_MANAGED
) {
824 fqs
->fqs_flags
|= FQS_DRIVER_MANAGED
;
825 fq_if_classq_init(fqs
, FQ_IF_BK_INDEX
, 1500,
827 fq_if_classq_init(fqs
, FQ_IF_BE_INDEX
, 1500,
829 fq_if_classq_init(fqs
, FQ_IF_VI_INDEX
, 3000,
831 fq_if_classq_init(fqs
, FQ_IF_VO_INDEX
, 600,
834 /* SIG shares same INDEX with VI */
835 _CASSERT(SCIDX_SIG
== SCIDX_VI
);
836 _CASSERT(FQ_IF_SIG_INDEX
== FQ_IF_VI_INDEX
);
838 fq_if_classq_init(fqs
, FQ_IF_BK_SYS_INDEX
, 1500,
840 fq_if_classq_init(fqs
, FQ_IF_BK_INDEX
, 1500,
842 fq_if_classq_init(fqs
, FQ_IF_BE_INDEX
, 1500,
844 fq_if_classq_init(fqs
, FQ_IF_RD_INDEX
, 1500,
846 fq_if_classq_init(fqs
, FQ_IF_OAM_INDEX
, 1500,
848 fq_if_classq_init(fqs
, FQ_IF_AV_INDEX
, 3000,
850 fq_if_classq_init(fqs
, FQ_IF_RV_INDEX
, 3000,
852 fq_if_classq_init(fqs
, FQ_IF_VI_INDEX
, 3000,
854 fq_if_classq_init(fqs
, FQ_IF_VO_INDEX
, 600,
856 fq_if_classq_init(fqs
, FQ_IF_CTL_INDEX
, 600,
860 err
= ifclassq_attach(ifq
, PKTSCHEDT_FQ_CODEL
, fqs
,
861 fq_if_enqueue_classq
, fq_if_dequeue_classq
,
862 fq_if_dequeue_sc_classq
, fq_if_dequeue_classq_multi
,
863 fq_if_dequeue_sc_classq_multi
, fq_if_request_classq
);
866 printf("%s: error from ifclassq_attach, "
867 "failed to attach fq_if: %d\n", __func__
, err
);
874 fq_if_hash_pkt(fq_if_t
*fqs
, u_int32_t flowid
, mbuf_svc_class_t svc_class
,
875 u_int64_t now
, boolean_t create
, classq_pkt_type_t ptype
)
878 flowq_list_t
*fq_list
;
879 fq_if_classq_t
*fq_cl
;
880 u_int8_t fqs_hash_id
;
883 scidx
= fq_if_service_to_priority(fqs
, svc_class
);
885 fqs_hash_id
= FQ_IF_FLOW_HASH_ID(flowid
);
887 fq_list
= &fqs
->fqs_flows
[fqs_hash_id
];
889 SLIST_FOREACH(fq
, fq_list
, fq_hashlink
) {
890 if (fq
->fq_flowhash
== flowid
&&
891 fq
->fq_sc_index
== scidx
) {
895 if (fq
== NULL
&& create
== TRUE
) {
896 ASSERT(ptype
== QP_MBUF
);
898 /* If the flow is not already on the list, allocate it */
899 IFCQ_CONVERT_LOCK(fqs
->fqs_ifq
);
900 fq
= fq_alloc(ptype
);
902 fq
->fq_flowhash
= flowid
;
903 fq
->fq_sc_index
= scidx
;
904 fq
->fq_updatetime
= now
+ fqs
->fqs_update_interval
;
905 fq_cl
= &fqs
->fqs_classq
[scidx
];
906 fq
->fq_flags
= FQF_FLOWCTL_CAPABLE
;
907 SLIST_INSERT_HEAD(fq_list
, fq
, fq_hashlink
);
908 fq_cl
->fcl_stat
.fcl_flows_cnt
++;
913 * If getq time is not set because this is the first packet or after
914 * idle time, set it now so that we can detect a stall.
916 if (fq
!= NULL
&& fq
->fq_getqtime
== 0) {
917 fq
->fq_getqtime
= now
;
924 fq_if_destroy_flow(fq_if_t
*fqs
, fq_if_classq_t
*fq_cl
, fq_t
*fq
)
927 hash_id
= FQ_IF_FLOW_HASH_ID(fq
->fq_flowhash
);
928 SLIST_REMOVE(&fqs
->fqs_flows
[hash_id
], fq
, flowq
,
930 fq_cl
->fcl_stat
.fcl_flows_cnt
--;
931 IFCQ_CONVERT_LOCK(fqs
->fqs_ifq
);
936 fq_if_at_drop_limit(fq_if_t
*fqs
)
938 return (IFCQ_LEN(fqs
->fqs_ifq
) >= fqs
->fqs_pkt_droplimit
) ?
943 fq_if_empty_old_flow(fq_if_t
*fqs
, fq_if_classq_t
*fq_cl
, fq_t
*fq
,
947 * Remove the flow queue if it is empty
950 STAILQ_REMOVE(&fq_cl
->fcl_old_flows
, fq
, flowq
,
952 fq
->fq_flags
&= ~FQF_OLD_FLOW
;
953 fq_cl
->fcl_stat
.fcl_oldflows_cnt
--;
954 VERIFY(fq
->fq_bytes
== 0);
957 /* Remove from the hash list */
958 fq_if_destroy_flow(fqs
, fq_cl
, fq
);
963 fq_if_empty_new_flow(fq_t
*fq
, fq_if_classq_t
*fq_cl
, bool add_to_old
)
965 /* Move to the end of old queue list */
966 STAILQ_REMOVE(&fq_cl
->fcl_new_flows
, fq
,
968 fq
->fq_flags
&= ~FQF_NEW_FLOW
;
969 fq_cl
->fcl_stat
.fcl_newflows_cnt
--;
972 STAILQ_INSERT_TAIL(&fq_cl
->fcl_old_flows
, fq
,
974 fq
->fq_flags
|= FQF_OLD_FLOW
;
975 fq_cl
->fcl_stat
.fcl_oldflows_cnt
++;
980 fq_if_drop_packet(fq_if_t
*fqs
)
982 fq_t
*fq
= fqs
->fqs_large_flow
;
983 fq_if_classq_t
*fq_cl
;
985 volatile uint32_t *pkt_flags
;
986 uint64_t *pkt_timestamp
;
991 /* queue can not be empty on the largest flow */
992 VERIFY(!fq_empty(fq
));
994 fq_cl
= &fqs
->fqs_classq
[fq
->fq_sc_index
];
995 _PKTSCHED_PKT_INIT(&pkt
);
996 fq_getq_flow_internal(fqs
, fq
, &pkt
);
997 ASSERT(pkt
.pktsched_ptype
!= QP_INVALID
);
999 pktsched_get_pkt_vars(&pkt
, &pkt_flags
, &pkt_timestamp
, NULL
, NULL
,
1002 IFCQ_CONVERT_LOCK(fqs
->fqs_ifq
);
1004 switch (pkt
.pktsched_ptype
) {
1006 *pkt_flags
&= ~PKTF_PRIV_GUARDED
;
1011 __builtin_unreachable();
1015 fqs
->fqs_large_flow
= NULL
;
1016 if (fq
->fq_flags
& FQF_OLD_FLOW
) {
1017 fq_if_empty_old_flow(fqs
, fq_cl
, fq
, true);
1019 VERIFY(fq
->fq_flags
& FQF_NEW_FLOW
);
1020 fq_if_empty_new_flow(fq
, fq_cl
, true);
1023 IFCQ_DROP_ADD(fqs
->fqs_ifq
, 1, pktsched_get_pkt_len(&pkt
));
1025 pktsched_free_pkt(&pkt
);
1026 fq_cl
->fcl_stat
.fcl_drop_overflow
++;
1030 fq_if_is_flow_heavy(fq_if_t
*fqs
, fq_t
*fq
)
1034 if (fqs
->fqs_large_flow
!= NULL
&&
1035 fqs
->fqs_large_flow
->fq_bytes
< FQ_IF_LARGE_FLOW_BYTE_LIMIT
) {
1036 fqs
->fqs_large_flow
= NULL
;
1039 if (fq
== NULL
|| fq
->fq_bytes
< FQ_IF_LARGE_FLOW_BYTE_LIMIT
) {
1043 prev_fq
= fqs
->fqs_large_flow
;
1044 if (prev_fq
== NULL
) {
1045 if (!fq_empty(fq
)) {
1046 fqs
->fqs_large_flow
= fq
;
1049 } else if (fq
->fq_bytes
> prev_fq
->fq_bytes
) {
1050 fqs
->fqs_large_flow
= fq
;
1055 fq_if_add_fcentry(fq_if_t
*fqs
, pktsched_pkt_t
*pkt
, uint32_t flowid
,
1056 uint8_t flowsrc
, fq_if_classq_t
*fq_cl
)
1058 struct flowadv_fcentry
*fce
;
1060 STAILQ_FOREACH(fce
, &fqs
->fqs_fclist
, fce_link
) {
1061 if ((uint8_t)fce
->fce_flowsrc_type
== flowsrc
&&
1062 fce
->fce_flowid
== flowid
) {
1063 /* Already on flowcontrol list */
1067 IFCQ_CONVERT_LOCK(fqs
->fqs_ifq
);
1068 fce
= pktsched_alloc_fcentry(pkt
, fqs
->fqs_ifq
->ifcq_ifp
, M_WAITOK
);
1070 /* XXX Add number of bytes in the queue */
1071 STAILQ_INSERT_TAIL(&fqs
->fqs_fclist
, fce
, fce_link
);
1072 fq_cl
->fcl_stat
.fcl_flow_control
++;
1074 return (fce
!= NULL
) ? TRUE
: FALSE
;
1078 fq_if_flow_feedback(fq_if_t
*fqs
, fq_t
*fq
, fq_if_classq_t
*fq_cl
)
1080 struct flowadv_fcentry
*fce
= NULL
;
1082 IFCQ_CONVERT_LOCK(fqs
->fqs_ifq
);
1083 STAILQ_FOREACH(fce
, &fqs
->fqs_fclist
, fce_link
) {
1084 if (fce
->fce_flowid
== fq
->fq_flowhash
) {
1089 STAILQ_REMOVE(&fqs
->fqs_fclist
, fce
, flowadv_fcentry
,
1091 STAILQ_NEXT(fce
, fce_link
) = NULL
;
1092 flowadv_add_entry(fce
);
1093 fq_cl
->fcl_stat
.fcl_flow_feedback
++;
1095 fq
->fq_flags
&= ~FQF_FLOWCTL_ON
;
1099 fq_if_dequeue(fq_if_t
*fqs
, fq_if_classq_t
*fq_cl
, u_int32_t pktlimit
,
1100 u_int32_t bytelimit
, classq_pkt_t
*top
, classq_pkt_t
*tail
,
1101 u_int32_t
*retpktcnt
, u_int32_t
*retbytecnt
, boolean_t drvmgmt
)
1103 fq_t
*fq
= NULL
, *tfq
= NULL
;
1104 flowq_stailq_t temp_stailq
;
1105 u_int32_t pktcnt
, bytecnt
;
1106 boolean_t qempty
, limit_reached
= FALSE
;
1107 classq_pkt_t last
= CLASSQ_PKT_INITIALIZER(last
);
1108 fq_getq_flow_t fq_getq_flow_fn
;
1110 switch (fqs
->fqs_ptype
) {
1112 fq_getq_flow_fn
= fq_getq_flow_mbuf
;
1119 __builtin_unreachable();
1123 * maximum byte limit should not be greater than the budget for
1126 if ((int32_t)bytelimit
> fq_cl
->fcl_budget
&& !drvmgmt
) {
1127 bytelimit
= fq_cl
->fcl_budget
;
1130 VERIFY(pktlimit
> 0 && bytelimit
> 0 && top
!= NULL
);
1131 pktcnt
= bytecnt
= 0;
1132 STAILQ_INIT(&temp_stailq
);
1134 STAILQ_FOREACH_SAFE(fq
, &fq_cl
->fcl_new_flows
, fq_actlink
, tfq
) {
1135 ASSERT((fq
->fq_flags
& (FQF_NEW_FLOW
| FQF_OLD_FLOW
)) ==
1138 limit_reached
= fq_getq_flow_fn(fqs
, fq_cl
, fq
, bytelimit
,
1139 pktlimit
, top
, &last
, &bytecnt
, &pktcnt
, &qempty
,
1142 if (fq
->fq_deficit
<= 0 || qempty
) {
1143 fq_if_empty_new_flow(fq
, fq_cl
, true);
1145 fq
->fq_deficit
+= fq_cl
->fcl_quantum
;
1146 if (limit_reached
) {
1151 STAILQ_FOREACH_SAFE(fq
, &fq_cl
->fcl_old_flows
, fq_actlink
, tfq
) {
1152 VERIFY((fq
->fq_flags
& (FQF_NEW_FLOW
| FQF_OLD_FLOW
)) ==
1155 limit_reached
= fq_getq_flow_fn(fqs
, fq_cl
, fq
, bytelimit
,
1156 pktlimit
, top
, &last
, &bytecnt
, &pktcnt
, &qempty
, 0);
1159 fq_if_empty_old_flow(fqs
, fq_cl
, fq
, true);
1160 } else if (fq
->fq_deficit
<= 0) {
1161 STAILQ_REMOVE(&fq_cl
->fcl_old_flows
, fq
,
1164 * Move to the end of the old queues list. We do not
1165 * need to update the flow count since this flow
1166 * will be added to the tail again
1168 STAILQ_INSERT_TAIL(&temp_stailq
, fq
, fq_actlink
);
1169 fq
->fq_deficit
+= fq_cl
->fcl_quantum
;
1171 if (limit_reached
) {
1177 if (!STAILQ_EMPTY(&fq_cl
->fcl_old_flows
)) {
1178 STAILQ_CONCAT(&fq_cl
->fcl_old_flows
, &temp_stailq
);
1179 } else if (!STAILQ_EMPTY(&temp_stailq
)) {
1180 fq_cl
->fcl_old_flows
= temp_stailq
;
1183 if (last
.cp_mbuf
!= NULL
) {
1184 VERIFY(top
->cp_mbuf
!= NULL
);
1188 if (retpktcnt
!= NULL
) {
1189 *retpktcnt
= pktcnt
;
1191 if (retbytecnt
!= NULL
) {
1192 *retbytecnt
= bytecnt
;
1198 fq_if_teardown_ifclassq(struct ifclassq
*ifq
)
1200 fq_if_t
*fqs
= (fq_if_t
*)ifq
->ifcq_disc
;
1202 IFCQ_LOCK_ASSERT_HELD(ifq
);
1203 VERIFY(fqs
!= NULL
&& ifq
->ifcq_type
== PKTSCHEDT_FQ_CODEL
);
1206 ifq
->ifcq_disc
= NULL
;
1207 return ifclassq_detach(ifq
);
1211 fq_export_flowstats(fq_if_t
*fqs
, fq_t
*fq
,
1212 struct fq_codel_flowstats
*flowstat
)
1214 bzero(flowstat
, sizeof(*flowstat
));
1215 flowstat
->fqst_min_qdelay
= fq
->fq_min_qdelay
;
1216 flowstat
->fqst_bytes
= fq
->fq_bytes
;
1217 flowstat
->fqst_flowhash
= fq
->fq_flowhash
;
1218 if (fq
->fq_flags
& FQF_NEW_FLOW
) {
1219 flowstat
->fqst_flags
|= FQ_FLOWSTATS_NEW_FLOW
;
1221 if (fq
->fq_flags
& FQF_OLD_FLOW
) {
1222 flowstat
->fqst_flags
|= FQ_FLOWSTATS_OLD_FLOW
;
1224 if (fq
->fq_flags
& FQF_DELAY_HIGH
) {
1225 flowstat
->fqst_flags
|= FQ_FLOWSTATS_DELAY_HIGH
;
1227 if (fq
->fq_flags
& FQF_FLOWCTL_ON
) {
1228 flowstat
->fqst_flags
|= FQ_FLOWSTATS_FLOWCTL_ON
;
1230 if (fqs
->fqs_large_flow
== fq
) {
1231 flowstat
->fqst_flags
|= FQ_FLOWSTATS_LARGE_FLOW
;
1236 fq_if_getqstats_ifclassq(struct ifclassq
*ifq
, u_int32_t qid
,
1237 struct if_ifclassq_stats
*ifqs
)
1239 struct fq_codel_classstats
*fcls
;
1240 fq_if_classq_t
*fq_cl
;
1243 u_int32_t i
, flowstat_cnt
;
1245 if (qid
>= FQ_IF_MAX_CLASSES
) {
1249 fqs
= (fq_if_t
*)ifq
->ifcq_disc
;
1250 fcls
= &ifqs
->ifqs_fq_codel_stats
;
1252 fq_cl
= &fqs
->fqs_classq
[qid
];
1254 fcls
->fcls_pri
= fq_cl
->fcl_pri
;
1255 fcls
->fcls_service_class
= fq_cl
->fcl_service_class
;
1256 fcls
->fcls_quantum
= fq_cl
->fcl_quantum
;
1257 fcls
->fcls_drr_max
= fq_cl
->fcl_drr_max
;
1258 fcls
->fcls_budget
= fq_cl
->fcl_budget
;
1259 fcls
->fcls_target_qdelay
= fqs
->fqs_target_qdelay
;
1260 fcls
->fcls_update_interval
= fqs
->fqs_update_interval
;
1261 fcls
->fcls_flow_control
= fq_cl
->fcl_stat
.fcl_flow_control
;
1262 fcls
->fcls_flow_feedback
= fq_cl
->fcl_stat
.fcl_flow_feedback
;
1263 fcls
->fcls_dequeue_stall
= fq_cl
->fcl_stat
.fcl_dequeue_stall
;
1264 fcls
->fcls_drop_overflow
= fq_cl
->fcl_stat
.fcl_drop_overflow
;
1265 fcls
->fcls_drop_early
= fq_cl
->fcl_stat
.fcl_drop_early
;
1266 fcls
->fcls_drop_memfailure
= fq_cl
->fcl_stat
.fcl_drop_memfailure
;
1267 fcls
->fcls_flows_cnt
= fq_cl
->fcl_stat
.fcl_flows_cnt
;
1268 fcls
->fcls_newflows_cnt
= fq_cl
->fcl_stat
.fcl_newflows_cnt
;
1269 fcls
->fcls_oldflows_cnt
= fq_cl
->fcl_stat
.fcl_oldflows_cnt
;
1270 fcls
->fcls_pkt_cnt
= fq_cl
->fcl_stat
.fcl_pkt_cnt
;
1271 fcls
->fcls_flow_control_fail
= fq_cl
->fcl_stat
.fcl_flow_control_fail
;
1272 fcls
->fcls_flow_control_fail
= fq_cl
->fcl_stat
.fcl_flow_control_fail
;
1273 fcls
->fcls_dequeue
= fq_cl
->fcl_stat
.fcl_dequeue
;
1274 fcls
->fcls_dequeue_bytes
= fq_cl
->fcl_stat
.fcl_dequeue_bytes
;
1275 fcls
->fcls_byte_cnt
= fq_cl
->fcl_stat
.fcl_byte_cnt
;
1276 fcls
->fcls_throttle_on
= fq_cl
->fcl_stat
.fcl_throttle_on
;
1277 fcls
->fcls_throttle_off
= fq_cl
->fcl_stat
.fcl_throttle_off
;
1278 fcls
->fcls_throttle_drops
= fq_cl
->fcl_stat
.fcl_throttle_drops
;
1279 fcls
->fcls_dup_rexmts
= fq_cl
->fcl_stat
.fcl_dup_rexmts
;
1281 /* Gather per flow stats */
1282 flowstat_cnt
= min((fcls
->fcls_newflows_cnt
+
1283 fcls
->fcls_oldflows_cnt
), FQ_IF_MAX_FLOWSTATS
);
1285 STAILQ_FOREACH(fq
, &fq_cl
->fcl_new_flows
, fq_actlink
) {
1286 if (i
>= fcls
->fcls_newflows_cnt
|| i
>= flowstat_cnt
) {
1290 /* leave space for a few old flows */
1291 if ((flowstat_cnt
- i
) < fcls
->fcls_oldflows_cnt
&&
1292 i
>= (FQ_IF_MAX_FLOWSTATS
>> 1)) {
1295 fq_export_flowstats(fqs
, fq
, &fcls
->fcls_flowstats
[i
]);
1298 STAILQ_FOREACH(fq
, &fq_cl
->fcl_old_flows
, fq_actlink
) {
1299 if (i
>= flowstat_cnt
) {
1302 fq_export_flowstats(fqs
, fq
, &fcls
->fcls_flowstats
[i
]);
1305 VERIFY(i
<= flowstat_cnt
);
1306 fcls
->fcls_flowstats_cnt
= i
;