]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/net/pktsched/pktsched_fairq.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / bsd / net / pktsched / pktsched_fairq.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*
30 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
31 *
32 * This code is derived from software contributed to The DragonFly Project
33 * by Matthew Dillon <dillon@backplane.com>
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 *
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in
43 * the documentation and/or other materials provided with the
44 * distribution.
45 * 3. Neither the name of The DragonFly Project nor the names of its
46 * contributors may be used to endorse or promote products derived
47 * from this software without specific, prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
50 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
51 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
52 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
53 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
54 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
55 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
56 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
57 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
58 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
59 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * SUCH DAMAGE.
61 *
62 * $DragonFly: src/sys/net/altq/altq_fairq.c,v 1.2 2008/05/14 11:59:23 sephe Exp $
63 */
64/*
65 * Matt: I gutted altq_priq.c and used it as a skeleton on which to build
66 * fairq. The fairq algorithm is completely different then priq, of course,
67 * but because I used priq's skeleton I believe I should include priq's
68 * copyright.
69 *
70 * Copyright (C) 2000-2003
71 * Sony Computer Science Laboratories Inc. All rights reserved.
72 *
73 * Redistribution and use in source and binary forms, with or without
74 * modification, are permitted provided that the following conditions
75 * are met:
76 * 1. Redistributions of source code must retain the above copyright
77 * notice, this list of conditions and the following disclaimer.
78 * 2. Redistributions in binary form must reproduce the above copyright
79 * notice, this list of conditions and the following disclaimer in the
80 * documentation and/or other materials provided with the distribution.
81 *
82 * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
83 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
84 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
85 * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
86 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
87 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
88 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
89 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
90 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
91 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
92 * SUCH DAMAGE.
93 */
94
95/*
96 * FAIRQ - take traffic classified by keep state (hashed into
97 * pf->pftag_flowhash) and bucketize it. Fairly extract
98 * the first packet from each bucket in a round-robin fashion.
99 *
100 * TODO - better overall qlimit support (right now it is per-bucket).
101 * - NOTE: red etc is per bucket, not overall.
102 * - better service curve support.
103 *
104 * EXAMPLE:
105 *
106 * altq on em0 fairq bandwidth 650Kb queue { std, bulk }
107 * queue std priority 3 bandwidth 200Kb \
108 * fairq (buckets 64, default, hogs 1Kb) qlimit 50
109 * queue bulk priority 2 bandwidth 100Kb \
110 * fairq (buckets 64, hogs 1Kb) qlimit 50
111 *
112 * NOTE: When the aggregate bandwidth is less than the link bandwidth
113 * any remaining bandwidth is dynamically assigned using the
114 * existing bandwidth specs as weightings.
115 *
116 * pass out on em0 from any to any keep state queue std
117 * pass out on em0 inet proto tcp ..... port ... keep state queue bulk
118 */
119
120#if PKTSCHED_FAIRQ
121
122#include <sys/cdefs.h>
123#include <sys/param.h>
124#include <sys/malloc.h>
125#include <sys/mbuf.h>
126#include <sys/systm.h>
127#include <sys/errno.h>
128#include <sys/kernel.h>
129#include <sys/syslog.h>
130
131#include <kern/zalloc.h>
132
133#include <net/if.h>
134#include <net/net_osdep.h>
135
136#include <net/pktsched/pktsched_fairq.h>
137#include <netinet/in.h>
138
139/*
140 * function prototypes
141 */
142#if 0
143static int fairq_enqueue_ifclassq(struct ifclassq *, struct mbuf *);
144static struct mbuf *fairq_dequeue_ifclassq(struct ifclassq *, cqdq_op_t);
145static int fairq_request_ifclassq(struct ifclassq *, cqrq_t, void *);
146#endif
147static int fairq_clear_interface(struct fairq_if *);
148static inline int fairq_addq(struct fairq_class *, struct mbuf *,
149 struct pf_mtag *);
150static inline struct mbuf *fairq_getq(struct fairq_class *, u_int64_t);
151static inline struct mbuf *fairq_pollq(struct fairq_class *, u_int64_t, int *);
152static fairq_bucket_t *fairq_selectq(struct fairq_class *, int);
153static void fairq_purgeq(struct fairq_if *, struct fairq_class *, u_int32_t,
154 u_int32_t *, u_int32_t *);
155static void fairq_updateq(struct fairq_if *, struct fairq_class *, cqev_t);
156static struct fairq_class *fairq_class_create(struct fairq_if *, int, u_int32_t,
157 u_int64_t, u_int32_t, int, u_int64_t, u_int64_t, u_int64_t, u_int64_t,
158 u_int32_t);
159static int fairq_class_destroy(struct fairq_if *, struct fairq_class *);
160static int fairq_destroy_locked(struct fairq_if *);
161static inline struct fairq_class *fairq_clh_to_clp(struct fairq_if *,
162 u_int32_t);
163static const char *fairq_style(struct fairq_if *);
164
165#define FAIRQ_ZONE_MAX 32 /* maximum elements in zone */
166#define FAIRQ_ZONE_NAME "pktsched_fairq" /* zone name */
167
168static unsigned int fairq_size; /* size of zone element */
169static struct zone *fairq_zone; /* zone for fairq */
170
171#define FAIRQ_CL_ZONE_MAX 32 /* maximum elements in zone */
172#define FAIRQ_CL_ZONE_NAME "pktsched_fairq_cl" /* zone name */
173
174static unsigned int fairq_cl_size; /* size of zone element */
175static struct zone *fairq_cl_zone; /* zone for fairq */
176
177void
178fairq_init(void)
179{
180 fairq_size = sizeof (struct fairq_if);
181 fairq_zone = zinit(fairq_size, FAIRQ_ZONE_MAX * fairq_size,
182 0, FAIRQ_ZONE_NAME);
183 if (fairq_zone == NULL) {
184 panic("%s: failed allocating %s", __func__, FAIRQ_ZONE_NAME);
185 /* NOTREACHED */
186 }
187 zone_change(fairq_zone, Z_EXPAND, TRUE);
188 zone_change(fairq_zone, Z_CALLERACCT, TRUE);
189
190 fairq_cl_size = sizeof (struct fairq_class);
191 fairq_cl_zone = zinit(fairq_cl_size, FAIRQ_CL_ZONE_MAX * fairq_cl_size,
192 0, FAIRQ_CL_ZONE_NAME);
193 if (fairq_cl_zone == NULL) {
194 panic("%s: failed allocating %s", __func__, FAIRQ_CL_ZONE_NAME);
195 /* NOTREACHED */
196 }
197 zone_change(fairq_cl_zone, Z_EXPAND, TRUE);
198 zone_change(fairq_cl_zone, Z_CALLERACCT, TRUE);
199}
200
201struct fairq_if *
202fairq_alloc(struct ifnet *ifp, int how, boolean_t altq)
203{
204 struct fairq_if *fif;
205
206 fif = (how == M_WAITOK) ?
207 zalloc(fairq_zone) : zalloc_noblock(fairq_zone);
208 if (fif == NULL)
209 return (NULL);
210
211 bzero(fif, fairq_size);
212 fif->fif_maxpri = -1;
213 fif->fif_ifq = &ifp->if_snd;
214 if (altq)
215 fif->fif_flags |= FAIRQIFF_ALTQ;
216
217 if (pktsched_verbose) {
218 log(LOG_DEBUG, "%s: %s scheduler allocated\n",
219 if_name(ifp), fairq_style(fif));
220 }
221
222 return (fif);
223}
224
225int
226fairq_destroy(struct fairq_if *fif)
227{
228 struct ifclassq *ifq = fif->fif_ifq;
229 int err;
230
231 IFCQ_LOCK(ifq);
232 err = fairq_destroy_locked(fif);
233 IFCQ_UNLOCK(ifq);
234
235 return (err);
236}
237
238static int
239fairq_destroy_locked(struct fairq_if *fif)
240{
241 IFCQ_LOCK_ASSERT_HELD(fif->fif_ifq);
242
243 (void) fairq_clear_interface(fif);
244
245 if (pktsched_verbose) {
246 log(LOG_DEBUG, "%s: %s scheduler destroyed\n",
247 if_name(FAIRQIF_IFP(fif)), fairq_style(fif));
248 }
249
250 zfree(fairq_zone, fif);
251
252 return (0);
253}
254
255/*
256 * bring the interface back to the initial state by discarding
257 * all the filters and classes.
258 */
259static int
260fairq_clear_interface(struct fairq_if *fif)
261{
262 struct fairq_class *cl;
263 int pri;
264
265 IFCQ_LOCK_ASSERT_HELD(fif->fif_ifq);
266
267 /* clear out the classes */
268 for (pri = 0; pri <= fif->fif_maxpri; pri++)
269 if ((cl = fif->fif_classes[pri]) != NULL)
270 fairq_class_destroy(fif, cl);
271
272 return (0);
273}
274
275/* discard all the queued packets on the interface */
276void
277fairq_purge(struct fairq_if *fif)
278{
279 struct fairq_class *cl;
280 int pri;
281
282 IFCQ_LOCK_ASSERT_HELD(fif->fif_ifq);
283
284 for (pri = 0; pri <= fif->fif_maxpri; pri++) {
285 if ((cl = fif->fif_classes[pri]) != NULL && cl->cl_head)
286 fairq_purgeq(fif, cl, 0, NULL, NULL);
287 }
288#if !PF_ALTQ
289 /*
290 * This assertion is safe to be made only when PF_ALTQ is not
291 * configured; otherwise, IFCQ_LEN represents the sum of the
292 * packets managed by ifcq_disc and altq_disc instances, which
293 * is possible when transitioning between the two.
294 */
295 VERIFY(IFCQ_LEN(fif->fif_ifq) == 0);
296#endif /* !PF_ALTQ */
297}
298
299void
300fairq_event(struct fairq_if *fif, cqev_t ev)
301{
302 struct fairq_class *cl;
303 int pri;
304
305 IFCQ_LOCK_ASSERT_HELD(fif->fif_ifq);
306
307 for (pri = 0; pri <= fif->fif_maxpri; pri++)
308 if ((cl = fif->fif_classes[pri]) != NULL)
309 fairq_updateq(fif, cl, ev);
310}
311
312int
313fairq_add_queue(struct fairq_if *fif, int priority, u_int32_t qlimit,
314 u_int64_t bandwidth, u_int32_t nbuckets, int flags, u_int64_t hogs_m1,
315 u_int64_t lssc_m1, u_int64_t lssc_d, u_int64_t lssc_m2, u_int32_t qid,
316 struct fairq_class **clp)
317{
318 struct fairq_class *cl;
319
320 IFCQ_LOCK_ASSERT_HELD(fif->fif_ifq);
321
322 /* check parameters */
323 if (priority >= FAIRQ_MAXPRI)
324 return (EINVAL);
325 if (bandwidth == 0 || (bandwidth / 8) == 0)
326 return (EINVAL);
327 if (fif->fif_classes[priority] != NULL)
328 return (EBUSY);
329 if (fairq_clh_to_clp(fif, qid) != NULL)
330 return (EBUSY);
331
332 cl = fairq_class_create(fif, priority, qlimit, bandwidth,
333 nbuckets, flags, hogs_m1, lssc_m1, lssc_d, lssc_m2, qid);
334 if (cl == NULL)
335 return (ENOMEM);
336
337 if (clp != NULL)
338 *clp = cl;
339
340 return (0);
341}
342
343static struct fairq_class *
344fairq_class_create(struct fairq_if *fif, int pri, u_int32_t qlimit,
345 u_int64_t bandwidth, u_int32_t nbuckets, int flags, u_int64_t hogs_m1,
346 u_int64_t lssc_m1, u_int64_t lssc_d, u_int64_t lssc_m2, u_int32_t qid)
347{
348#pragma unused(lssc_d, lssc_m2)
349 struct ifnet *ifp;
350 struct ifclassq *ifq;
351 struct fairq_class *cl;
352 u_int32_t i;
353
354 IFCQ_LOCK_ASSERT_HELD(fif->fif_ifq);
355
356 /* Sanitize flags unless internally configured */
357 if (fif->fif_flags & FAIRQIFF_ALTQ)
358 flags &= FARF_USERFLAGS;
359
360#if !CLASSQ_RED
361 if (flags & FARF_RED) {
362 log(LOG_ERR, "%s: %s RED not available!\n",
363 if_name(FAIRQIF_IFP(fif)), fairq_style(fif));
364 return (NULL);
365 }
366#endif /* !CLASSQ_RED */
367
368#if !CLASSQ_RIO
369 if (flags & FARF_RIO) {
370 log(LOG_ERR, "%s: %s RIO not available!\n",
371 if_name(FAIRQIF_IFP(fif)), fairq_style(fif));
372 return (NULL);
373 }
374#endif /* CLASSQ_RIO */
375
376#if !CLASSQ_BLUE
377 if (flags & FARF_BLUE) {
378 log(LOG_ERR, "%s: %s BLUE not available!\n",
379 if_name(FAIRQIF_IFP(fif)), fairq_style(fif));
380 return (NULL);
381 }
382#endif /* CLASSQ_BLUE */
383
384 /* These are mutually exclusive */
385 if ((flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) &&
386 (flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) != FARF_RED &&
387 (flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) != FARF_RIO &&
388 (flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) != FARF_BLUE &&
389 (flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) != FARF_SFB) {
390 log(LOG_ERR, "%s: %s more than one RED|RIO|BLUE|SFB\n",
391 if_name(FAIRQIF_IFP(fif)), fairq_style(fif));
392 return (NULL);
393 }
394
395 if (bandwidth == 0 || (bandwidth / 8) == 0) {
396 log(LOG_ERR, "%s: %s invalid data rate %llu\n",
397 if_name(FAIRQIF_IFP(fif)), fairq_style(fif), bandwidth);
398 return (NULL);
399 }
400
401 if (nbuckets == 0)
402 nbuckets = 256;
403 if (nbuckets > FAIRQ_MAX_BUCKETS)
404 nbuckets = FAIRQ_MAX_BUCKETS;
405 /* enforce power-of-2 size */
406 while ((nbuckets ^ (nbuckets - 1)) != ((nbuckets << 1) - 1))
407 ++nbuckets;
408
409 ifq = fif->fif_ifq;
410 ifp = FAIRQIF_IFP(fif);
411
412 if ((cl = fif->fif_classes[pri]) != NULL) {
413 /* modify the class instead of creating a new one */
414 if (cl->cl_head)
415 fairq_purgeq(fif, cl, 0, NULL, NULL);
416#if CLASSQ_RIO
417 if (cl->cl_qtype == Q_RIO)
418 rio_destroy(cl->cl_rio);
419#endif /* CLASSQ_RIO */
420#if CLASSQ_RED
421 if (cl->cl_qtype == Q_RED)
422 red_destroy(cl->cl_red);
423#endif /* CLASSQ_RED */
424#if CLASSQ_BLUE
425 if (cl->cl_qtype == Q_BLUE)
426 blue_destroy(cl->cl_blue);
427#endif /* CLASSQ_BLUE */
428 if (cl->cl_qtype == Q_SFB && cl->cl_sfb != NULL)
429 sfb_destroy(cl->cl_sfb);
430 cl->cl_qalg.ptr = NULL;
431 cl->cl_qtype = Q_DROPTAIL;
432 cl->cl_qstate = QS_RUNNING;
433 } else {
434 cl = zalloc(fairq_cl_zone);
435 if (cl == NULL)
436 goto err_ret;
437 bzero(cl, fairq_cl_size);
438 cl->cl_nbuckets = nbuckets;
439 cl->cl_nbucket_mask = nbuckets - 1;
440
441 cl->cl_buckets = _MALLOC(sizeof (struct fairq_bucket) *
442 cl->cl_nbuckets, M_DEVBUF, M_WAITOK|M_ZERO);
443 if (cl->cl_buckets == NULL)
444 goto err_buckets;
445 cl->cl_head = NULL;
446 }
447
448 fif->fif_classes[pri] = cl;
449 if (flags & FARF_DEFAULTCLASS)
450 fif->fif_default = cl;
451 if (qlimit == 0 || qlimit > IFCQ_MAXLEN(ifq)) {
452 qlimit = IFCQ_MAXLEN(ifq);
453 if (qlimit == 0)
454 qlimit = DEFAULT_QLIMIT; /* use default */
455 }
456 cl->cl_qlimit = qlimit;
457 for (i = 0; i < cl->cl_nbuckets; ++i) {
458 _qinit(&cl->cl_buckets[i].queue, Q_DROPTAIL, qlimit);
459 }
460 cl->cl_bandwidth = bandwidth / 8; /* cvt to bytes per second */
461 cl->cl_qtype = Q_DROPTAIL;
462 cl->cl_qstate = QS_RUNNING;
463 cl->cl_flags = flags;
464 cl->cl_pri = pri;
465 if (pri > fif->fif_maxpri)
466 fif->fif_maxpri = pri;
467 cl->cl_fif = fif;
468 cl->cl_handle = qid;
469 cl->cl_hogs_m1 = hogs_m1 / 8;
470 cl->cl_lssc_m1 = lssc_m1 / 8; /* NOT YET USED */
471 cl->cl_bw_current = 0;
472
473 if (flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) {
474#if CLASSQ_RED || CLASSQ_RIO
475 u_int64_t ifbandwidth = ifnet_output_linkrate(ifp);
476 int pkttime;
477#endif /* CLASSQ_RED || CLASSQ_RIO */
478
479 cl->cl_qflags = 0;
480 if (flags & FARF_ECN) {
481 if (flags & FARF_BLUE)
482 cl->cl_qflags |= BLUEF_ECN;
483 else if (flags & FARF_SFB)
484 cl->cl_qflags |= SFBF_ECN;
485 else if (flags & FARF_RED)
486 cl->cl_qflags |= REDF_ECN;
487 else if (flags & FARF_RIO)
488 cl->cl_qflags |= RIOF_ECN;
489 }
490 if (flags & FARF_FLOWCTL) {
491 if (flags & FARF_SFB)
492 cl->cl_qflags |= SFBF_FLOWCTL;
493 }
494 if (flags & FARF_CLEARDSCP) {
495 if (flags & FARF_RIO)
496 cl->cl_qflags |= RIOF_CLEARDSCP;
497 }
498#if CLASSQ_RED || CLASSQ_RIO
499 /*
500 * XXX: RED & RIO should be watching link speed and MTU
501 * events and recompute pkttime accordingly.
502 */
503 if (ifbandwidth < 8)
504 pkttime = 1000 * 1000 * 1000; /* 1 sec */
505 else
506 pkttime = (int64_t)ifp->if_mtu * 1000 * 1000 * 1000 /
507 (ifbandwidth / 8);
508
509 /* Test for exclusivity {RED,RIO,BLUE,SFB} was done above */
510#if CLASSQ_RIO
511 if (flags & FARF_RIO) {
512 cl->cl_rio =
513 rio_alloc(ifp, 0, NULL, cl->cl_qflags, pkttime);
514 if (cl->cl_rio != NULL)
515 cl->cl_qtype = Q_RIO;
516 }
517#endif /* CLASSQ_RIO */
518#if CLASSQ_RED
519 if (flags & FARF_RED) {
520 cl->cl_red = red_alloc(ifp, 0, 0,
521 cl->cl_qlimit * 10/100,
522 cl->cl_qlimit * 30/100,
523 cl->cl_qflags, pkttime);
524 if (cl->cl_red != NULL)
525 cl->cl_qtype = Q_RED;
526 }
527#endif /* CLASSQ_RED */
528#endif /* CLASSQ_RED || CLASSQ_RIO */
529#if CLASSQ_BLUE
530 if (flags & FARF_BLUE) {
531 cl->cl_blue = blue_alloc(ifp, 0, 0, cl->cl_qflags);
532 if (cl->cl_blue != NULL)
533 cl->cl_qtype = Q_BLUE;
534 }
535#endif /* CLASSQ_BLUE */
536 if (flags & FARF_SFB) {
537 if (!(cl->cl_flags & FARF_LAZY))
538 cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
539 cl->cl_qlimit, cl->cl_qflags);
540 if (cl->cl_sfb != NULL || (cl->cl_flags & FARF_LAZY))
541 cl->cl_qtype = Q_SFB;
542 }
543 }
544
545 if (pktsched_verbose) {
546 log(LOG_DEBUG, "%s: %s created qid=%d pri=%d qlimit=%d "
547 "flags=%b\n", if_name(ifp), fairq_style(fif),
548 cl->cl_handle, cl->cl_pri, cl->cl_qlimit, flags, FARF_BITS);
549 }
550
551 return (cl);
552
553err_buckets:
554 if (cl->cl_buckets != NULL)
555 _FREE(cl->cl_buckets, M_DEVBUF);
556err_ret:
557 if (cl != NULL) {
558 if (cl->cl_qalg.ptr != NULL) {
559#if CLASSQ_RIO
560 if (cl->cl_qtype == Q_RIO)
561 rio_destroy(cl->cl_rio);
562#endif /* CLASSQ_RIO */
563#if CLASSQ_RED
564 if (cl->cl_qtype == Q_RED)
565 red_destroy(cl->cl_red);
566#endif /* CLASSQ_RED */
567#if CLASSQ_BLUE
568 if (cl->cl_qtype == Q_BLUE)
569 blue_destroy(cl->cl_blue);
570#endif /* CLASSQ_BLUE */
571 if (cl->cl_qtype == Q_SFB && cl->cl_sfb != NULL)
572 sfb_destroy(cl->cl_sfb);
573 cl->cl_qalg.ptr = NULL;
574 cl->cl_qtype = Q_DROPTAIL;
575 cl->cl_qstate = QS_RUNNING;
576 }
577 zfree(fairq_cl_zone, cl);
578 }
579 return (NULL);
580}
581
582int
583fairq_remove_queue(struct fairq_if *fif, u_int32_t qid)
584{
585 struct fairq_class *cl;
586
587 IFCQ_LOCK_ASSERT_HELD(fif->fif_ifq);
588
589 if ((cl = fairq_clh_to_clp(fif, qid)) == NULL)
590 return (EINVAL);
591
592 return (fairq_class_destroy(fif, cl));
593}
594
595static int
596fairq_class_destroy(struct fairq_if *fif, struct fairq_class *cl)
597{
598 struct ifclassq *ifq = fif->fif_ifq;
599 int pri;
600
601 IFCQ_LOCK_ASSERT_HELD(ifq);
602
603 if (cl->cl_head)
604 fairq_purgeq(fif, cl, 0, NULL, NULL);
605
606 fif->fif_classes[cl->cl_pri] = NULL;
607 if (fif->fif_poll_cache == cl)
608 fif->fif_poll_cache = NULL;
609 if (fif->fif_maxpri == cl->cl_pri) {
610 for (pri = cl->cl_pri; pri >= 0; pri--)
611 if (fif->fif_classes[pri] != NULL) {
612 fif->fif_maxpri = pri;
613 break;
614 }
615 if (pri < 0)
616 fif->fif_maxpri = -1;
617 }
618
619 if (cl->cl_qalg.ptr != NULL) {
620#if CLASSQ_RIO
621 if (cl->cl_qtype == Q_RIO)
622 rio_destroy(cl->cl_rio);
623#endif /* CLASSQ_RIO */
624#if CLASSQ_RED
625 if (cl->cl_qtype == Q_RED)
626 red_destroy(cl->cl_red);
627#endif /* CLASSQ_RED */
628#if CLASSQ_BLUE
629 if (cl->cl_qtype == Q_BLUE)
630 blue_destroy(cl->cl_blue);
631#endif /* CLASSQ_BLUE */
632 if (cl->cl_qtype == Q_SFB && cl->cl_sfb != NULL)
633 sfb_destroy(cl->cl_sfb);
634 cl->cl_qalg.ptr = NULL;
635 cl->cl_qtype = Q_DROPTAIL;
636 cl->cl_qstate = QS_RUNNING;
637 }
638
639 if (fif->fif_default == cl)
640 fif->fif_default = NULL;
641
642 if (pktsched_verbose) {
643 log(LOG_DEBUG, "%s: %s destroyed qid=%d pri=%d\n",
644 if_name(FAIRQIF_IFP(fif)), fairq_style(fif),
645 cl->cl_handle, cl->cl_pri);
646 }
647
648 _FREE(cl->cl_buckets, M_DEVBUF);
649 cl->cl_head = NULL; /* sanity */
650 cl->cl_polled = NULL; /* sanity */
651 cl->cl_buckets = NULL; /* sanity */
652
653 zfree(fairq_cl_zone, cl);
654
655 return (0);
656}
657
658int
659fairq_enqueue(struct fairq_if *fif, struct fairq_class *cl, struct mbuf *m,
660 struct pf_mtag *t)
661{
662 struct ifclassq *ifq = fif->fif_ifq;
663 int len, ret;
664
665 IFCQ_LOCK_ASSERT_HELD(ifq);
666 VERIFY(cl == NULL || cl->cl_fif == fif);
667
668 if (cl == NULL) {
669#if PF_ALTQ
670 cl = fairq_clh_to_clp(fif, t->pftag_qid);
671#else /* !PF_ALTQ */
672 cl = fairq_clh_to_clp(fif, 0);
673#endif /* !PF_ALTQ */
674 if (cl == NULL) {
675 cl = fif->fif_default;
676 if (cl == NULL) {
677 IFCQ_CONVERT_LOCK(ifq);
678 m_freem(m);
679 return (ENOBUFS);
680 }
681 }
682 }
683
684 cl->cl_flags |= FARF_HAS_PACKETS;
685 len = m_pktlen(m);
686
687 ret = fairq_addq(cl, m, t);
688 if (ret != 0) {
689 if (ret == CLASSQEQ_SUCCESS_FC) {
690 /* packet enqueued, return advisory feedback */
691 ret = EQFULL;
692 } else {
693 VERIFY(ret == CLASSQEQ_DROPPED ||
694 ret == CLASSQEQ_DROPPED_FC ||
695 ret == CLASSQEQ_DROPPED_SP);
696
697 /* packet has been freed in fairq_addq */
698 PKTCNTR_ADD(&cl->cl_dropcnt, 1, len);
699 IFCQ_DROP_ADD(ifq, 1, len);
700 switch (ret) {
701 case CLASSQEQ_DROPPED:
702 return (ENOBUFS);
703 case CLASSQEQ_DROPPED_FC:
704 return (EQFULL);
705 case CLASSQEQ_DROPPED_SP:
706 return (EQSUSPENDED);
707 }
708 /* NOT REACHED */
709 }
710 }
711 IFCQ_INC_LEN(ifq);
712 IFCQ_INC_BYTES(ifq, len);
713
714 /* successfully queued. */
715 return (ret);
716}
717
718/*
719 * note: CLASSQDQ_POLL returns the next packet without removing the packet
720 * from the queue. CLASSQDQ_REMOVE is a normal dequeue operation.
721 * CLASSQDQ_REMOVE must return the same packet if called immediately
722 * after CLASSQDQ_POLL.
723 */
724struct mbuf *
725fairq_dequeue(struct fairq_if *fif, cqdq_op_t op)
726{
727 struct ifclassq *ifq = fif->fif_ifq;
728 struct fairq_class *cl;
729 struct fairq_class *best_cl;
730 struct mbuf *best_m;
731 struct mbuf *m;
732 u_int64_t cur_time = read_machclk();
733 u_int32_t best_scale;
734 u_int32_t scale;
735 int pri;
736 int hit_limit;
737
738 IFCQ_LOCK_ASSERT_HELD(ifq);
739
740 if (IFCQ_IS_EMPTY(ifq)) {
741 /* no packet in the queue */
742 return (NULL);
743 }
744
745 if (fif->fif_poll_cache && op == CLASSQDQ_REMOVE) {
746 best_cl = fif->fif_poll_cache;
747 m = fairq_getq(best_cl, cur_time);
748 fif->fif_poll_cache = NULL;
749 if (m != NULL) {
750 IFCQ_DEC_LEN(ifq);
751 IFCQ_DEC_BYTES(ifq, m_pktlen(m));
752 IFCQ_XMIT_ADD(ifq, 1, m_pktlen(m));
753 PKTCNTR_ADD(&best_cl->cl_xmitcnt, 1, m_pktlen(m));
754 }
755 } else {
756 best_cl = NULL;
757 best_m = NULL;
758 best_scale = 0xFFFFFFFFU;
759
760 for (pri = fif->fif_maxpri; pri >= 0; pri--) {
761 if ((cl = fif->fif_classes[pri]) == NULL)
762 continue;
763 if ((cl->cl_flags & FARF_HAS_PACKETS) == 0)
764 continue;
765 m = fairq_pollq(cl, cur_time, &hit_limit);
766 if (m == NULL) {
767 cl->cl_flags &= ~FARF_HAS_PACKETS;
768 continue;
769 }
770
771 /*
772 * We can halt the search immediately if the queue
773 * did not hit its bandwidth limit.
774 */
775 if (hit_limit == 0) {
776 best_cl = cl;
777 best_m = m;
778 break;
779 }
780
781 /*
782 * Otherwise calculate the scale factor and select
783 * the queue with the lowest scale factor. This
784 * apportions any unused bandwidth weighted by
785 * the relative bandwidth specification.
786 */
787 scale = cl->cl_bw_current * 100 / cl->cl_bandwidth;
788 if (scale < best_scale) {
789 best_cl = cl;
790 best_m = m;
791 best_scale = scale;
792 }
793 }
794
795 if (op == CLASSQDQ_POLL) {
796 fif->fif_poll_cache = best_cl;
797 m = best_m;
798 } else if (best_cl != NULL) {
799 m = fairq_getq(best_cl, cur_time);
800 if (m != NULL) {
801 IFCQ_DEC_LEN(ifq);
802 IFCQ_DEC_BYTES(ifq, m_pktlen(m));
803 IFCQ_XMIT_ADD(ifq, 1, m_pktlen(m));
804 PKTCNTR_ADD(&best_cl->cl_xmitcnt, 1,
805 m_pktlen(m));
806 }
807 } else {
808 m = NULL;
809 }
810 }
811 return (m);
812}
813
814static inline int
815fairq_addq(struct fairq_class *cl, struct mbuf *m, struct pf_mtag *t)
816{
817 struct ifclassq *ifq = cl->cl_fif->fif_ifq;
818 fairq_bucket_t *b;
819 u_int32_t hash = m->m_pkthdr.pkt_flowid;
820 u_int32_t hindex;
821 u_int64_t bw;
822
823 IFCQ_LOCK_ASSERT_HELD(ifq);
824
825 /*
826 * If the packet doesn't have any keep state put it on the end of
827 * our queue. XXX this can result in out of order delivery.
828 */
829 if (hash == 0) {
830 if (cl->cl_head)
831 b = cl->cl_head->prev;
832 else
833 b = &cl->cl_buckets[0];
834 } else {
835 hindex = (hash & cl->cl_nbucket_mask);
836 b = &cl->cl_buckets[hindex];
837 }
838
839 /*
840 * Add the bucket to the end of the circular list of active buckets.
841 *
842 * As a special case we add the bucket to the beginning of the list
843 * instead of the end if it was not previously on the list and if
844 * its traffic is less then the hog level.
845 */
846 if (b->in_use == 0) {
847 b->in_use = 1;
848 if (cl->cl_head == NULL) {
849 cl->cl_head = b;
850 b->next = b;
851 b->prev = b;
852 } else {
853 b->next = cl->cl_head;
854 b->prev = cl->cl_head->prev;
855 b->prev->next = b;
856 b->next->prev = b;
857
858 if (b->bw_delta && cl->cl_hogs_m1) {
859 bw = b->bw_bytes * machclk_freq / b->bw_delta;
860 if (bw < cl->cl_hogs_m1)
861 cl->cl_head = b;
862 }
863 }
864 }
865
866#if CLASSQ_RIO
867 if (cl->cl_qtype == Q_RIO)
868 return (rio_addq(cl->cl_rio, &b->queue, m, t));
869 else
870#endif /* CLASSQ_RIO */
871#if CLASSQ_RED
872 if (cl->cl_qtype == Q_RED)
873 return (red_addq(cl->cl_red, &b->queue, m, t));
874 else
875#endif /* CLASSQ_RED */
876#if CLASSQ_BLUE
877 if (cl->cl_qtype == Q_BLUE)
878 return (blue_addq(cl->cl_blue, &b->queue, m, t));
879 else
880#endif /* CLASSQ_BLUE */
881 if (cl->cl_qtype == Q_SFB) {
882 if (cl->cl_sfb == NULL) {
883 struct ifnet *ifp = FAIRQIF_IFP(cl->cl_fif);
884
885 VERIFY(cl->cl_flags & FARF_LAZY);
886 IFCQ_CONVERT_LOCK(ifq);
887
888 cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
889 cl->cl_qlimit, cl->cl_qflags);
890 if (cl->cl_sfb == NULL) {
891 /* fall back to droptail */
892 cl->cl_qtype = Q_DROPTAIL;
893 cl->cl_flags &= ~FARF_SFB;
894 cl->cl_qflags &= ~(SFBF_ECN | SFBF_FLOWCTL);
895
896 log(LOG_ERR, "%s: %s SFB lazy allocation "
897 "failed for qid=%d pri=%d, falling back "
898 "to DROPTAIL\n", if_name(ifp),
899 fairq_style(cl->cl_fif), cl->cl_handle,
900 cl->cl_pri);
901 }
902 }
903 if (cl->cl_sfb != NULL)
904 return (sfb_addq(cl->cl_sfb, &b->queue, m, t));
905 } else if (qlen(&b->queue) >= qlimit(&b->queue)) {
906 IFCQ_CONVERT_LOCK(ifq);
907 m_freem(m);
908 return (CLASSQEQ_DROPPED);
909 }
910
911#if PF_ECN
912 if (cl->cl_flags & FARF_CLEARDSCP)
913 write_dsfield(m, t, 0);
914#endif /* PF_ECN */
915
916 _addq(&b->queue, m);
917
918 return (0);
919}
920
921static inline struct mbuf *
922fairq_getq(struct fairq_class *cl, u_int64_t cur_time)
923{
924 fairq_bucket_t *b;
925 struct mbuf *m;
926
927 IFCQ_LOCK_ASSERT_HELD(cl->cl_fif->fif_ifq);
928
929 b = fairq_selectq(cl, 0);
930 if (b == NULL)
931 m = NULL;
932#if CLASSQ_RIO
933 else if (cl->cl_qtype == Q_RIO)
934 m = rio_getq(cl->cl_rio, &b->queue);
935#endif /* CLASSQ_RIO */
936#if CLASSQ_RED
937 else if (cl->cl_qtype == Q_RED)
938 m = red_getq(cl->cl_red, &b->queue);
939#endif /* CLASSQ_RED */
940#if CLASSQ_BLUE
941 else if (cl->cl_qtype == Q_BLUE)
942 m = blue_getq(cl->cl_blue, &b->queue);
943#endif /* CLASSQ_BLUE */
944 else if (cl->cl_qtype == Q_SFB && cl->cl_sfb != NULL)
945 m = sfb_getq(cl->cl_sfb, &b->queue);
946 else
947 m = _getq(&b->queue);
948
949 /*
950 * Calculate the BW change
951 */
952 if (m != NULL) {
953 u_int64_t delta;
954
955 /*
956 * Per-class bandwidth calculation
957 */
958 delta = (cur_time - cl->cl_last_time);
959 if (delta > machclk_freq * 8)
960 delta = machclk_freq * 8;
961 cl->cl_bw_delta += delta;
962 cl->cl_bw_bytes += m->m_pkthdr.len;
963 cl->cl_last_time = cur_time;
964 if (cl->cl_bw_delta > machclk_freq) {
965 cl->cl_bw_delta -= cl->cl_bw_delta >> 2;
966 cl->cl_bw_bytes -= cl->cl_bw_bytes >> 2;
967 }
968
969 /*
970 * Per-bucket bandwidth calculation
971 */
972 delta = (cur_time - b->last_time);
973 if (delta > machclk_freq * 8)
974 delta = machclk_freq * 8;
975 b->bw_delta += delta;
976 b->bw_bytes += m->m_pkthdr.len;
977 b->last_time = cur_time;
978 if (b->bw_delta > machclk_freq) {
979 b->bw_delta -= b->bw_delta >> 2;
980 b->bw_bytes -= b->bw_bytes >> 2;
981 }
982 }
983 return (m);
984}
985
986/*
987 * Figure out what the next packet would be if there were no limits. If
988 * this class hits its bandwidth limit *hit_limit is set to no-zero, otherwise
989 * it is set to 0. A non-NULL mbuf is returned either way.
990 */
991static inline struct mbuf *
992fairq_pollq(struct fairq_class *cl, u_int64_t cur_time, int *hit_limit)
993{
994 fairq_bucket_t *b;
995 struct mbuf *m;
996 u_int64_t delta;
997 u_int64_t bw;
998
999 IFCQ_LOCK_ASSERT_HELD(cl->cl_fif->fif_ifq);
1000
1001 *hit_limit = 0;
1002 b = fairq_selectq(cl, 1);
1003 if (b == NULL)
1004 return (NULL);
1005 m = qhead(&b->queue);
1006
1007 /*
1008 * Did this packet exceed the class bandwidth? Calculate the
1009 * bandwidth component of the packet.
1010 *
1011 * - Calculate bytes per second
1012 */
1013 delta = cur_time - cl->cl_last_time;
1014 if (delta > machclk_freq * 8)
1015 delta = machclk_freq * 8;
1016 cl->cl_bw_delta += delta;
1017 cl->cl_last_time = cur_time;
1018 if (cl->cl_bw_delta) {
1019 bw = cl->cl_bw_bytes * machclk_freq / cl->cl_bw_delta;
1020
1021 if (bw > cl->cl_bandwidth)
1022 *hit_limit = 1;
1023 cl->cl_bw_current = bw;
1024#if 0
1025 printf("BW %6lld relative to %6u %d queue 0x%llx\n",
1026 bw, cl->cl_bandwidth, *hit_limit,
1027 (uint64_t)VM_KERNEL_ADDRPERM(b));
1028#endif
1029 }
1030 return (m);
1031}
1032
1033/*
1034 * Locate the next queue we want to pull a packet out of. This code
1035 * is also responsible for removing empty buckets from the circular list.
1036 */
1037static fairq_bucket_t *
1038fairq_selectq(struct fairq_class *cl, int ispoll)
1039{
1040 fairq_bucket_t *b;
1041 u_int64_t bw;
1042
1043 IFCQ_LOCK_ASSERT_HELD(cl->cl_fif->fif_ifq);
1044
1045 if (ispoll == 0 && cl->cl_polled) {
1046 b = cl->cl_polled;
1047 cl->cl_polled = NULL;
1048 return (b);
1049 }
1050
1051 while ((b = cl->cl_head) != NULL) {
1052 /*
1053 * Remove empty queues from consideration
1054 */
1055 if (qempty(&b->queue)) {
1056 b->in_use = 0;
1057 cl->cl_head = b->next;
1058 if (cl->cl_head == b) {
1059 cl->cl_head = NULL;
1060 } else {
1061 b->next->prev = b->prev;
1062 b->prev->next = b->next;
1063 }
1064 continue;
1065 }
1066
1067 /*
1068 * Advance the round robin. Queues with bandwidths less
1069 * then the hog bandwidth are allowed to burst.
1070 */
1071 if (cl->cl_hogs_m1 == 0) {
1072 cl->cl_head = b->next;
1073 } else if (b->bw_delta) {
1074 bw = b->bw_bytes * machclk_freq / b->bw_delta;
1075 if (bw >= cl->cl_hogs_m1) {
1076 cl->cl_head = b->next;
1077 }
1078 /*
1079 * XXX TODO -
1080 */
1081 }
1082
1083 /*
1084 * Return bucket b.
1085 */
1086 break;
1087 }
1088 if (ispoll)
1089 cl->cl_polled = b;
1090 return (b);
1091}
1092
1093static void
1094fairq_purgeq(struct fairq_if *fif, struct fairq_class *cl, u_int32_t flow,
1095 u_int32_t *packets, u_int32_t *bytes)
1096{
1097 struct ifclassq *ifq = fif->fif_ifq;
1098 u_int32_t _cnt = 0, _len = 0;
1099 fairq_bucket_t *b;
1100
1101 IFCQ_LOCK_ASSERT_HELD(ifq);
1102
1103 /* become regular mutex before freeing mbufs */
1104 IFCQ_CONVERT_LOCK(ifq);
1105
1106 while ((b = fairq_selectq(cl, 0)) != NULL) {
1107 u_int32_t cnt, len, qlen;
1108
1109 if ((qlen = qlen(&b->queue)) == 0)
1110 continue;
1111
1112#if CLASSQ_RIO
1113 if (cl->cl_qtype == Q_RIO)
1114 rio_purgeq(cl->cl_rio, &b->queue, flow, &cnt, &len);
1115 else
1116#endif /* CLASSQ_RIO */
1117#if CLASSQ_RED
1118 if (cl->cl_qtype == Q_RED)
1119 red_purgeq(cl->cl_red, &b->queue, flow, &cnt, &len);
1120 else
1121#endif /* CLASSQ_RED */
1122#if CLASSQ_BLUE
1123 if (cl->cl_qtype == Q_BLUE)
1124 blue_purgeq(cl->cl_blue, &b->queue, flow, &cnt, &len);
1125 else
1126#endif /* CLASSQ_BLUE */
1127 if (cl->cl_qtype == Q_SFB && cl->cl_sfb != NULL)
1128 sfb_purgeq(cl->cl_sfb, &b->queue, flow, &cnt, &len);
1129 else
1130 _flushq_flow(&b->queue, flow, &cnt, &len);
1131
1132 if (cnt == 0)
1133 continue;
1134
1135 VERIFY(qlen(&b->queue) == (qlen - cnt));
1136
1137 PKTCNTR_ADD(&cl->cl_dropcnt, cnt, len);
1138 IFCQ_DROP_ADD(ifq, cnt, len);
1139
1140 VERIFY(((signed)IFCQ_LEN(ifq) - cnt) >= 0);
1141 IFCQ_LEN(ifq) -= cnt;
1142
1143 _cnt += cnt;
1144 _len += len;
1145
1146 if (pktsched_verbose) {
1147 log(LOG_DEBUG, "%s: %s purge qid=%d pri=%d "
1148 "qlen=[%d,%d] cnt=%d len=%d flow=0x%x\n",
1149 if_name(FAIRQIF_IFP(fif)), fairq_style(fif),
1150 cl->cl_handle, cl->cl_pri, qlen, qlen(&b->queue),
1151 cnt, len, flow);
1152 }
1153 }
1154
1155 if (packets != NULL)
1156 *packets = _cnt;
1157 if (bytes != NULL)
1158 *bytes = _len;
1159}
1160
1161static void
1162fairq_updateq(struct fairq_if *fif, struct fairq_class *cl, cqev_t ev)
1163{
1164 IFCQ_LOCK_ASSERT_HELD(fif->fif_ifq);
1165
1166 if (pktsched_verbose) {
1167 log(LOG_DEBUG, "%s: %s update qid=%d pri=%d event=%s\n",
1168 if_name(FAIRQIF_IFP(fif)), fairq_style(fif),
1169 cl->cl_handle, cl->cl_pri, ifclassq_ev2str(ev));
1170 }
1171
1172#if CLASSQ_RIO
1173 if (cl->cl_qtype == Q_RIO)
1174 return (rio_updateq(cl->cl_rio, ev));
1175#endif /* CLASSQ_RIO */
1176#if CLASSQ_RED
1177 if (cl->cl_qtype == Q_RED)
1178 return (red_updateq(cl->cl_red, ev));
1179#endif /* CLASSQ_RED */
1180#if CLASSQ_BLUE
1181 if (cl->cl_qtype == Q_BLUE)
1182 return (blue_updateq(cl->cl_blue, ev));
1183#endif /* CLASSQ_BLUE */
1184 if (cl->cl_qtype == Q_SFB && cl->cl_sfb != NULL)
1185 return (sfb_updateq(cl->cl_sfb, ev));
1186}
1187
1188int
1189fairq_get_class_stats(struct fairq_if *fif, u_int32_t qid,
1190 struct fairq_classstats *sp)
1191{
1192 struct fairq_class *cl;
1193 fairq_bucket_t *b;
1194
1195 IFCQ_LOCK_ASSERT_HELD(fif->fif_ifq);
1196
1197 if ((cl = fairq_clh_to_clp(fif, qid)) == NULL)
1198 return (EINVAL);
1199
1200 sp->class_handle = cl->cl_handle;
1201 sp->priority = cl->cl_pri;
1202 sp->qlimit = cl->cl_qlimit;
1203 sp->xmit_cnt = cl->cl_xmitcnt;
1204 sp->drop_cnt = cl->cl_dropcnt;
1205 sp->qtype = cl->cl_qtype;
1206 sp->qstate = cl->cl_qstate;
1207 sp->qlength = 0;
1208
1209 if (cl->cl_head) {
1210 b = cl->cl_head;
1211 do {
1212 sp->qlength += qlen(&b->queue);
1213 b = b->next;
1214 } while (b != cl->cl_head);
1215 }
1216
1217#if CLASSQ_RED
1218 if (cl->cl_qtype == Q_RED)
1219 red_getstats(cl->cl_red, &sp->red[0]);
1220#endif /* CLASSQ_RED */
1221#if CLASSQ_RIO
1222 if (cl->cl_qtype == Q_RIO)
1223 rio_getstats(cl->cl_rio, &sp->red[0]);
1224#endif /* CLASSQ_RIO */
1225#if CLASSQ_BLUE
1226 if (cl->cl_qtype == Q_BLUE)
1227 blue_getstats(cl->cl_blue, &sp->blue);
1228#endif /* CLASSQ_BLUE */
1229 if (cl->cl_qtype == Q_SFB && cl->cl_sfb != NULL)
1230 sfb_getstats(cl->cl_sfb, &sp->sfb);
1231
1232 return (0);
1233}
1234
1235/* convert a class handle to the corresponding class pointer */
1236static inline struct fairq_class *
1237fairq_clh_to_clp(struct fairq_if *fif, u_int32_t chandle)
1238{
1239 struct fairq_class *cl;
1240 int idx;
1241
1242 IFCQ_LOCK_ASSERT_HELD(fif->fif_ifq);
1243
1244 for (idx = fif->fif_maxpri; idx >= 0; idx--)
1245 if ((cl = fif->fif_classes[idx]) != NULL &&
1246 cl->cl_handle == chandle)
1247 return (cl);
1248
1249 return (NULL);
1250}
1251
1252static const char *
1253fairq_style(struct fairq_if *fif)
1254{
1255 return ((fif->fif_flags & FAIRQIFF_ALTQ) ? "ALTQ_FAIRQ" : "FAIRQ");
1256}
1257
1258int
1259fairq_setup_ifclassq(struct ifclassq *ifq, u_int32_t flags)
1260{
1261#pragma unused(ifq, flags)
1262 return (ENXIO); /* not yet */
1263}
1264
1265int
1266fairq_teardown_ifclassq(struct ifclassq *ifq)
1267{
1268 struct fairq_if *fif = ifq->ifcq_disc;
1269 int i;
1270
1271 IFCQ_LOCK_ASSERT_HELD(ifq);
1272 VERIFY(fif != NULL && ifq->ifcq_type == PKTSCHEDT_FAIRQ);
1273
1274 (void) fairq_destroy_locked(fif);
1275
1276 ifq->ifcq_disc = NULL;
1277 for (i = 0; i < IFCQ_SC_MAX; i++) {
1278 ifq->ifcq_disc_slots[i].qid = 0;
1279 ifq->ifcq_disc_slots[i].cl = NULL;
1280 }
1281
1282 return (ifclassq_detach(ifq));
1283}
1284
1285int
1286fairq_getqstats_ifclassq(struct ifclassq *ifq, u_int32_t slot,
1287 struct if_ifclassq_stats *ifqs)
1288{
1289 struct fairq_if *fif = ifq->ifcq_disc;
1290
1291 IFCQ_LOCK_ASSERT_HELD(ifq);
1292 VERIFY(ifq->ifcq_type == PKTSCHEDT_FAIRQ);
1293
1294 if (slot >= IFCQ_SC_MAX)
1295 return (EINVAL);
1296
1297 return (fairq_get_class_stats(fif, ifq->ifcq_disc_slots[slot].qid,
1298 &ifqs->ifqs_fairq_stats));
1299}
1300#endif /* PKTSCHED_FAIRQ */