]> git.saurik.com Git - apple/xnu.git/blame - bsd/net/pktsched/pktsched_cbq.c
xnu-2050.48.11.tar.gz
[apple/xnu.git] / bsd / net / pktsched / pktsched_cbq.c
CommitLineData
316670eb
A
1/*
2 * Copyright (c) 2007-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/* $OpenBSD: altq_cbq.c,v 1.23 2007/09/13 20:40:02 chl Exp $ */
30/* $KAME: altq_cbq.c,v 1.9 2000/12/14 08:12:45 thorpej Exp $ */
31
32/*
33 * Copyright (c) Sun Microsystems, Inc. 1993-1998 All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 *
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 *
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 *
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the SMCC Technology
49 * Development Group at Sun Microsystems, Inc.
50 *
51 * 4. The name of the Sun Microsystems, Inc nor may not be used to endorse or
52 * promote products derived from this software without specific prior
53 * written permission.
54 *
55 * SUN MICROSYSTEMS DOES NOT CLAIM MERCHANTABILITY OF THIS SOFTWARE OR THE
56 * SUITABILITY OF THIS SOFTWARE FOR ANY PARTICULAR PURPOSE. The software is
57 * provided "as is" without express or implied warranty of any kind.
58 *
59 * These notices must be retained in any copies of any part of this software.
60 */
61
62#if PKTSCHED_CBQ
63
64#include <sys/cdefs.h>
65#include <sys/param.h>
66#include <sys/malloc.h>
67#include <sys/mbuf.h>
68#include <sys/systm.h>
69#include <sys/errno.h>
70#include <sys/kernel.h>
71#include <sys/syslog.h>
72
73#include <kern/zalloc.h>
74
75#include <net/if.h>
76#include <net/net_osdep.h>
77
78#include <net/pktsched/pktsched_cbq.h>
79#include <netinet/in.h>
80
81/*
82 * Forward Declarations.
83 */
84#if 0
85static int cbq_enqueue_ifclassq(struct ifclassq *, struct mbuf *);
86static struct mbuf *cbq_dequeue_ifclassq(struct ifclassq *, cqdq_op_t);
87static int cbq_request_ifclassq(struct ifclassq *, cqrq_t, void *);
88#endif
89static int cbq_class_destroy(cbq_state_t *, struct rm_class *);
90static int cbq_destroy_locked(cbq_state_t *);
91static struct rm_class *cbq_clh_to_clp(cbq_state_t *, u_int32_t);
92static const char *cbq_style(cbq_state_t *);
93static int cbq_clear_interface(cbq_state_t *);
94static void cbqrestart(struct ifclassq *);
95
96#define CBQ_ZONE_MAX 32 /* maximum elements in zone */
97#define CBQ_ZONE_NAME "pktsched_cbq" /* zone name */
98
99static unsigned int cbq_size; /* size of zone element */
100static struct zone *cbq_zone; /* zone for cbq */
101
102void
103cbq_init(void)
104{
105 _CASSERT(CBQCLF_RED == RMCF_RED);
106 _CASSERT(CBQCLF_ECN == RMCF_ECN);
107 _CASSERT(CBQCLF_RIO == RMCF_RIO);
108 _CASSERT(CBQCLF_FLOWVALVE == RMCF_FLOWVALVE);
109 _CASSERT(CBQCLF_CLEARDSCP == RMCF_CLEARDSCP);
110 _CASSERT(CBQCLF_WRR == RMCF_WRR);
111 _CASSERT(CBQCLF_EFFICIENT == RMCF_EFFICIENT);
112 _CASSERT(CBQCLF_BLUE == RMCF_BLUE);
113 _CASSERT(CBQCLF_SFB == RMCF_SFB);
114 _CASSERT(CBQCLF_FLOWCTL == RMCF_FLOWCTL);
115 _CASSERT(CBQCLF_LAZY == RMCF_LAZY);
116
117 cbq_size = sizeof (cbq_state_t);
118 cbq_zone = zinit(cbq_size, CBQ_ZONE_MAX * cbq_size, 0, CBQ_ZONE_NAME);
119 if (cbq_zone == NULL) {
120 panic("%s: failed allocating %s", __func__, CBQ_ZONE_NAME);
121 /* NOTREACHED */
122 }
123 zone_change(cbq_zone, Z_EXPAND, TRUE);
124 zone_change(cbq_zone, Z_CALLERACCT, TRUE);
125
126 rmclass_init();
127}
128
129cbq_state_t *
130cbq_alloc(struct ifnet *ifp, int how, boolean_t altq)
131{
132 cbq_state_t *cbqp;
133
134 /* allocate and initialize cbq_state_t */
135 cbqp = (how == M_WAITOK) ? zalloc(cbq_zone) : zalloc_noblock(cbq_zone);
136 if (cbqp == NULL)
137 return (NULL);
138
139 bzero(cbqp, cbq_size);
140 CALLOUT_INIT(&cbqp->cbq_callout);
141 cbqp->cbq_qlen = 0;
142 cbqp->ifnp.ifq_ = &ifp->if_snd; /* keep the ifclassq */
143 if (altq)
144 cbqp->cbq_flags |= CBQSF_ALTQ;
145
146 if (pktsched_verbose) {
147 log(LOG_DEBUG, "%s: %s scheduler allocated\n",
148 if_name(ifp), cbq_style(cbqp));
149 }
150
151 return (cbqp);
152}
153
154int
155cbq_destroy(cbq_state_t *cbqp)
156{
157 struct ifclassq *ifq = cbqp->ifnp.ifq_;
158 int err;
159
160 IFCQ_LOCK(ifq);
161 err = cbq_destroy_locked(cbqp);
162 IFCQ_UNLOCK(ifq);
163
164 return (err);
165}
166
167static int
168cbq_destroy_locked(cbq_state_t *cbqp)
169{
170 IFCQ_LOCK_ASSERT_HELD(cbqp->ifnp.ifq_);
171
172 (void) cbq_clear_interface(cbqp);
173
174 if (pktsched_verbose) {
175 log(LOG_DEBUG, "%s: %s scheduler destroyed\n",
176 if_name(CBQS_IFP(cbqp)), cbq_style(cbqp));
177 }
178
179 if (cbqp->ifnp.default_)
180 cbq_class_destroy(cbqp, cbqp->ifnp.default_);
181 if (cbqp->ifnp.root_)
182 cbq_class_destroy(cbqp, cbqp->ifnp.root_);
183
184 /* deallocate cbq_state_t */
185 zfree(cbq_zone, cbqp);
186
187 return (0);
188}
189
190int
191cbq_add_queue(cbq_state_t *cbqp, u_int32_t qlimit, u_int32_t priority,
192 u_int32_t minburst, u_int32_t maxburst, u_int32_t pktsize,
193 u_int32_t maxpktsize, u_int32_t ns_per_byte, u_int32_t maxidle, int minidle,
194 u_int32_t offtime, u_int32_t flags, u_int32_t parent_qid, u_int32_t qid,
195 struct rm_class **clp)
196{
197#pragma unused(minburst, maxburst, maxpktsize)
198 struct rm_class *borrow, *parent;
199 struct rm_class *cl;
200 int i, error;
201
202 IFCQ_LOCK_ASSERT_HELD(cbqp->ifnp.ifq_);
203
204 /* Sanitize flags unless internally configured */
205 if (cbqp->cbq_flags & CBQSF_ALTQ)
206 flags &= CBQCLF_USERFLAGS;
207
208 /*
209 * find a free slot in the class table. if the slot matching
210 * the lower bits of qid is free, use this slot. otherwise,
211 * use the first free slot.
212 */
213 i = qid % CBQ_MAX_CLASSES;
214 if (cbqp->cbq_class_tbl[i] != NULL) {
215 for (i = 0; i < CBQ_MAX_CLASSES; i++)
216 if (cbqp->cbq_class_tbl[i] == NULL)
217 break;
218 if (i == CBQ_MAX_CLASSES)
219 return (EINVAL);
220 }
221
222 /* check parameters */
223 if (priority >= CBQ_MAXPRI)
224 return (EINVAL);
225
226 if (ns_per_byte == 0) {
227 log(LOG_ERR, "%s: %s invalid inverse data rate\n",
228 if_name(CBQS_IFP(cbqp)), cbq_style(cbqp));
229 return (EINVAL);
230 }
231
232 /* Get pointers to parent and borrow classes. */
233 parent = cbq_clh_to_clp(cbqp, parent_qid);
234 if (flags & CBQCLF_BORROW)
235 borrow = parent;
236 else
237 borrow = NULL;
238
239 /*
240 * A class must borrow from its parent or it can not
241 * borrow at all. Hence, borrow can be null.
242 */
243 if (parent == NULL && (flags & CBQCLF_ROOTCLASS) == 0) {
244 log(LOG_ERR, "%s: %s no parent class!\n",
245 if_name(CBQS_IFP(cbqp)), cbq_style(cbqp));
246 return (EINVAL);
247 }
248
249 if ((borrow != parent) && (borrow != NULL)) {
250 log(LOG_ERR, "%s: %s borrow class != parent\n",
251 if_name(CBQS_IFP(cbqp)), cbq_style(cbqp));
252 return (EINVAL);
253 }
254
255 /*
256 * check parameters
257 */
258 switch (flags & CBQCLF_CLASSMASK) {
259 case CBQCLF_ROOTCLASS:
260 if (parent != NULL) {
261 log(LOG_ERR, "%s: %s parent exists\n",
262 if_name(CBQS_IFP(cbqp)), cbq_style(cbqp));
263 return (EINVAL);
264 }
265 if (cbqp->ifnp.root_) {
266 log(LOG_ERR, "%s: %s root class exists\n",
267 if_name(CBQS_IFP(cbqp)), cbq_style(cbqp));
268 return (EINVAL);
269 }
270 break;
271 case CBQCLF_DEFCLASS:
272 if (cbqp->ifnp.default_) {
273 log(LOG_ERR, "%s: %s default class exists\n",
274 if_name(CBQS_IFP(cbqp)), cbq_style(cbqp));
275 return (EINVAL);
276 }
277 break;
278 case 0:
279 break;
280 default:
281 /* more than two flags bits set */
282 log(LOG_ERR, "%s: %s invalid class flags 0x%x\n",
283 if_name(CBQS_IFP(cbqp)), cbq_style(cbqp),
284 (flags & CBQCLF_CLASSMASK));
285 return (EINVAL);
286 }
287
288 /*
289 * create a class. if this is a root class, initialize the
290 * interface.
291 */
292 if ((flags & CBQCLF_CLASSMASK) == CBQCLF_ROOTCLASS) {
293 error = rmc_init(cbqp->ifnp.ifq_, &cbqp->ifnp, ns_per_byte,
294 cbqrestart, qid, qlimit, RM_MAXQUEUED, maxidle, minidle,
295 offtime, flags);
296 if (error != 0)
297 return (error);
298 cl = cbqp->ifnp.root_;
299 } else {
300 cl = rmc_newclass(priority, &cbqp->ifnp, ns_per_byte,
301 rmc_delay_action, qid, qlimit, parent, borrow, maxidle,
302 minidle, offtime, pktsize, flags);
303 }
304 if (cl == NULL)
305 return (ENOMEM);
306
307 /* return handle to user space. */
308 cl->stats_.handle = qid;
309 cl->stats_.depth = cl->depth_;
310
311 /* save the allocated class */
312 cbqp->cbq_class_tbl[i] = cl;
313
314 if ((flags & CBQCLF_CLASSMASK) == CBQCLF_DEFCLASS)
315 cbqp->ifnp.default_ = cl;
316
317 if (clp != NULL)
318 *clp = cl;
319
320 if (pktsched_verbose) {
321 log(LOG_DEBUG, "%s: %s created qid=%d pri=%d qlimit=%d "
322 "flags=%b\n", if_name(CBQS_IFP(cbqp)), cbq_style(cbqp),
323 qid, priority, qlimit, flags, CBQCLF_BITS);
324 }
325
326 return (0);
327}
328
329int
330cbq_remove_queue(cbq_state_t *cbqp, u_int32_t qid)
331{
332 struct rm_class *cl;
333 int i;
334
335 IFCQ_LOCK_ASSERT_HELD(cbqp->ifnp.ifq_);
336
337 if ((cl = cbq_clh_to_clp(cbqp, qid)) == NULL)
338 return (EINVAL);
339
340 /* if we are a parent class, then return an error. */
341 if (RMC_IS_A_PARENT_CLASS(cl))
342 return (EINVAL);
343
344 /* delete the class */
345 rmc_delete_class(&cbqp->ifnp, cl);
346
347 /*
348 * free the class handle
349 */
350 for (i = 0; i < CBQ_MAX_CLASSES; i++) {
351 if (cbqp->cbq_class_tbl[i] == cl) {
352 cbqp->cbq_class_tbl[i] = NULL;
353 if (cl == cbqp->ifnp.root_)
354 cbqp->ifnp.root_ = NULL;
355 if (cl == cbqp->ifnp.default_)
356 cbqp->ifnp.default_ = NULL;
357 break;
358 }
359 }
360 return (0);
361}
362
363/*
364 * int
365 * cbq_class_destroy(cbq_mod_state_t *, struct rm_class *) - This
366 * function destroys a given traffic class. Before destroying
367 * the class, all traffic for that class is released.
368 */
369static int
370cbq_class_destroy(cbq_state_t *cbqp, struct rm_class *cl)
371{
372 int i;
373
374 IFCQ_LOCK_ASSERT_HELD(cbqp->ifnp.ifq_);
375
376 if (pktsched_verbose) {
377 log(LOG_DEBUG, "%s: %s destroyed qid=%d pri=%d\n",
378 if_name(CBQS_IFP(cbqp)), cbq_style(cbqp),
379 cl->stats_.handle, cl->pri_);
380 }
381
382 /* delete the class */
383 rmc_delete_class(&cbqp->ifnp, cl);
384
385 /*
386 * free the class handle
387 */
388 for (i = 0; i < CBQ_MAX_CLASSES; i++)
389 if (cbqp->cbq_class_tbl[i] == cl)
390 cbqp->cbq_class_tbl[i] = NULL;
391
392 if (cl == cbqp->ifnp.root_)
393 cbqp->ifnp.root_ = NULL;
394 if (cl == cbqp->ifnp.default_)
395 cbqp->ifnp.default_ = NULL;
396
397 return (0);
398}
399
400/* convert class handle to class pointer */
401static struct rm_class *
402cbq_clh_to_clp(cbq_state_t *cbqp, u_int32_t chandle)
403{
404 int i;
405 struct rm_class *cl;
406
407 IFCQ_LOCK_ASSERT_HELD(cbqp->ifnp.ifq_);
408
409 /*
410 * first, try optimistically the slot matching the lower bits of
411 * the handle. if it fails, do the linear table search.
412 */
413 i = chandle % CBQ_MAX_CLASSES;
414 if ((cl = cbqp->cbq_class_tbl[i]) != NULL &&
415 cl->stats_.handle == chandle)
416 return (cl);
417 for (i = 0; i < CBQ_MAX_CLASSES; i++)
418 if ((cl = cbqp->cbq_class_tbl[i]) != NULL &&
419 cl->stats_.handle == chandle)
420 return (cl);
421 return (NULL);
422}
423
424static const char *
425cbq_style(cbq_state_t *cbqp)
426{
427 return ((cbqp->cbq_flags & CBQSF_ALTQ) ? "ALTQ_CBQ" : "CBQ");
428}
429
430static int
431cbq_clear_interface(cbq_state_t *cbqp)
432{
433 int again, i;
434 struct rm_class *cl;
435
436 IFCQ_LOCK_ASSERT_HELD(cbqp->ifnp.ifq_);
437
438 /* clear out the classes now */
439 do {
440 again = 0;
441 for (i = 0; i < CBQ_MAX_CLASSES; i++) {
442 if ((cl = cbqp->cbq_class_tbl[i]) != NULL) {
443 if (RMC_IS_A_PARENT_CLASS(cl))
444 again++;
445 else {
446 cbq_class_destroy(cbqp, cl);
447 cbqp->cbq_class_tbl[i] = NULL;
448 if (cl == cbqp->ifnp.root_)
449 cbqp->ifnp.root_ = NULL;
450 if (cl == cbqp->ifnp.default_)
451 cbqp->ifnp.default_ = NULL;
452 }
453 }
454 }
455 } while (again);
456
457 return (0);
458}
459
460/* copy the stats info in rm_class to class_states_t */
461int
462cbq_get_class_stats(cbq_state_t *cbqp, u_int32_t qid, class_stats_t *statsp)
463{
464 struct rm_class *cl;
465
466 IFCQ_LOCK_ASSERT_HELD(cbqp->ifnp.ifq_);
467
468 if ((cl = cbq_clh_to_clp(cbqp, qid)) == NULL)
469 return (EINVAL);
470
471 statsp->xmit_cnt = cl->stats_.xmit_cnt;
472 statsp->drop_cnt = cl->stats_.drop_cnt;
473 statsp->over = cl->stats_.over;
474 statsp->borrows = cl->stats_.borrows;
475 statsp->overactions = cl->stats_.overactions;
476 statsp->delays = cl->stats_.delays;
477
478 statsp->depth = cl->depth_;
479 statsp->priority = cl->pri_;
480 statsp->maxidle = cl->maxidle_;
481 statsp->minidle = cl->minidle_;
482 statsp->offtime = cl->offtime_;
483 statsp->qmax = qlimit(&cl->q_);
484 statsp->ns_per_byte = cl->ns_per_byte_;
485 statsp->wrr_allot = cl->w_allotment_;
486 statsp->qcnt = qlen(&cl->q_);
487 statsp->avgidle = cl->avgidle_;
488
489 statsp->qtype = qtype(&cl->q_);
490 statsp->qstate = qstate(&cl->q_);
491#if CLASSQ_RED
492 if (q_is_red(&cl->q_))
493 red_getstats(cl->red_, &statsp->red[0]);
494#endif /* CLASSQ_RED */
495#if CLASSQ_RIO
496 if (q_is_rio(&cl->q_))
497 rio_getstats(cl->rio_, &statsp->red[0]);
498#endif /* CLASSQ_RIO */
499#if CLASSQ_BLUE
500 if (q_is_blue(&cl->q_))
501 blue_getstats(cl->blue_, &statsp->blue);
502#endif /* CLASSQ_BLUE */
503 if (q_is_sfb(&cl->q_) && cl->sfb_ != NULL)
504 sfb_getstats(cl->sfb_, &statsp->sfb);
505
506 return (0);
507}
508
509int
510cbq_enqueue(cbq_state_t *cbqp, struct rm_class *cl, struct mbuf *m,
511 struct pf_mtag *t)
512{
513 struct ifclassq *ifq = cbqp->ifnp.ifq_;
514 int len, ret;
515
516 IFCQ_LOCK_ASSERT_HELD(ifq);
517
518 /* grab class set by classifier */
519 if (!(m->m_flags & M_PKTHDR)) {
520 /* should not happen */
521 log(LOG_ERR, "%s: packet for %s does not have pkthdr\n",
522 if_name(ifq->ifcq_ifp));
523 IFCQ_CONVERT_LOCK(ifq);
524 m_freem(m);
525 return (ENOBUFS);
526 }
527
528 if (cl == NULL) {
529 cl = cbq_clh_to_clp(cbqp, t->pftag_qid);
530 if (cl == NULL) {
531 cl = cbqp->ifnp.default_;
532 if (cl == NULL) {
533 IFCQ_CONVERT_LOCK(ifq);
534 m_freem(m);
535 return (ENOBUFS);
536 }
537 }
538 }
539
540 len = m_pktlen(m);
541
542 ret = rmc_queue_packet(cl, m, t);
543 if (ret != 0) {
544 if (ret == CLASSQEQ_SUCCESS_FC) {
545 /* packet enqueued, return advisory feedback */
546 ret = EQFULL;
547 } else {
548 VERIFY(ret == CLASSQEQ_DROPPED ||
549 ret == CLASSQEQ_DROPPED_FC ||
550 ret == CLASSQEQ_DROPPED_SP);
551 /* packet has been freed in rmc_queue_packet */
552 PKTCNTR_ADD(&cl->stats_.drop_cnt, 1, len);
553 IFCQ_DROP_ADD(ifq, 1, len);
554 switch (ret) {
555 case CLASSQEQ_DROPPED:
556 return (ENOBUFS);
557 case CLASSQEQ_DROPPED_FC:
558 return (EQFULL);
559 case CLASSQEQ_DROPPED_SP:
560 return (EQSUSPENDED);
561 }
562 /* NOT REACHED */
563 }
564 }
565
566 /* successfully queued. */
567 ++cbqp->cbq_qlen;
568 IFCQ_INC_LEN(ifq);
569
570 return (ret);
571}
572
573struct mbuf *
574cbq_dequeue(cbq_state_t *cbqp, cqdq_op_t op)
575{
576 struct ifclassq *ifq = cbqp->ifnp.ifq_;
577 struct mbuf *m;
578
579 IFCQ_LOCK_ASSERT_HELD(ifq);
580
581 m = rmc_dequeue_next(&cbqp->ifnp, op);
582
583 if (m && op == CLASSQDQ_REMOVE) {
584 --cbqp->cbq_qlen; /* decrement # of packets in cbq */
585 IFCQ_DEC_LEN(ifq);
586 IFCQ_XMIT_ADD(ifq, 1, m_pktlen(m));
587
588 /* Update the class. */
589 rmc_update_class_util(&cbqp->ifnp);
590 }
591 return (m);
592}
593
594/*
595 * void
596 * cbqrestart(queue_t *) - Restart sending of data.
597 * called from rmc_restart via timeout after waking up
598 * a suspended class.
599 * Returns: NONE
600 */
601
602static void
603cbqrestart(struct ifclassq *ifq)
604{
605 u_int32_t qlen;
606
607 IFCQ_LOCK(ifq);
608 qlen = IFCQ_LEN(ifq);
609 IFCQ_UNLOCK(ifq);
610
611 if (qlen > 0)
612 ifnet_start(ifq->ifcq_ifp);
613}
614
615void
616cbq_purge(cbq_state_t *cbqp)
617{
618 struct rm_class *cl;
619 int i;
620
621 IFCQ_LOCK_ASSERT_HELD(cbqp->ifnp.ifq_);
622
623 for (i = 0; i < CBQ_MAX_CLASSES; i++) {
624 if ((cl = cbqp->cbq_class_tbl[i]) != NULL) {
625 if (!qempty(&cl->q_) && pktsched_verbose) {
626 log(LOG_DEBUG, "%s: %s purge qid=%d pri=%d "
627 "qlen=%d\n", if_name(CBQS_IFP(cbqp)),
628 cbq_style(cbqp), cl->stats_.handle,
629 cl->pri_, qlen(&cl->q_));
630 }
631 rmc_dropall(cl);
632 }
633 }
634}
635
636void
637cbq_event(cbq_state_t *cbqp, cqev_t ev)
638{
639 struct rm_class *cl;
640 int i;
641
642 IFCQ_LOCK_ASSERT_HELD(cbqp->ifnp.ifq_);
643
644 for (i = 0; i < CBQ_MAX_CLASSES; i++) {
645 if ((cl = cbqp->cbq_class_tbl[i]) != NULL) {
646 if (pktsched_verbose) {
647 log(LOG_DEBUG, "%s: %s update qid=%d pri=%d "
648 "event=%s\n", if_name(CBQS_IFP(cbqp)),
649 cbq_style(cbqp), cl->stats_.handle,
650 cl->pri_, ifclassq_ev2str(ev));
651 }
652 rmc_updateq(cl, ev);
653 }
654 }
655}
656
657int
658cqb_setup_ifclassq(struct ifclassq *ifq, u_int32_t flags)
659{
660#pragma unused(ifq, flags)
661 return (ENXIO); /* not yet */
662}
663
664int
665cbq_teardown_ifclassq(struct ifclassq *ifq)
666{
667 cbq_state_t *cbqp = ifq->ifcq_disc;
668 int i;
669
670 IFCQ_LOCK_ASSERT_HELD(ifq);
671 VERIFY(cbqp != NULL && ifq->ifcq_type == PKTSCHEDT_CBQ);
672
673 (void) cbq_destroy_locked(cbqp);
674
675 ifq->ifcq_disc = NULL;
676 for (i = 0; i < IFCQ_SC_MAX; i++) {
677 ifq->ifcq_disc_slots[i].qid = 0;
678 ifq->ifcq_disc_slots[i].cl = NULL;
679 }
680
681 return (ifclassq_detach(ifq));
682}
683
684int
685cbq_getqstats_ifclassq(struct ifclassq *ifq, u_int32_t slot,
686 struct if_ifclassq_stats *ifqs)
687{
688 cbq_state_t *cbqp = ifq->ifcq_disc;
689
690 IFCQ_LOCK_ASSERT_HELD(ifq);
691 VERIFY(ifq->ifcq_type == PKTSCHEDT_CBQ);
692
693 if (slot >= IFCQ_SC_MAX)
694 return (EINVAL);
695
696 return (cbq_get_class_stats(cbqp, ifq->ifcq_disc_slots[slot].qid,
697 &ifqs->ifqs_cbq_stats));
698}
699#endif /* PKTSCHED_CBQ */