]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/pktsched/pktsched_cbq.c
0c8663899ba653716d30fcf0ecd606b33c7f96f3
[apple/xnu.git] / bsd / net / pktsched / pktsched_cbq.c
1 /*
2 * Copyright (c) 2007-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $OpenBSD: altq_cbq.c,v 1.23 2007/09/13 20:40:02 chl Exp $ */
30 /* $KAME: altq_cbq.c,v 1.9 2000/12/14 08:12:45 thorpej Exp $ */
31
32 /*
33 * Copyright (c) Sun Microsystems, Inc. 1993-1998 All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 *
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 *
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 *
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the SMCC Technology
49 * Development Group at Sun Microsystems, Inc.
50 *
51 * 4. The name of the Sun Microsystems, Inc nor may not be used to endorse or
52 * promote products derived from this software without specific prior
53 * written permission.
54 *
55 * SUN MICROSYSTEMS DOES NOT CLAIM MERCHANTABILITY OF THIS SOFTWARE OR THE
56 * SUITABILITY OF THIS SOFTWARE FOR ANY PARTICULAR PURPOSE. The software is
57 * provided "as is" without express or implied warranty of any kind.
58 *
59 * These notices must be retained in any copies of any part of this software.
60 */
61
62 #if PKTSCHED_CBQ
63
64 #include <sys/cdefs.h>
65 #include <sys/param.h>
66 #include <sys/malloc.h>
67 #include <sys/mbuf.h>
68 #include <sys/systm.h>
69 #include <sys/errno.h>
70 #include <sys/kernel.h>
71 #include <sys/syslog.h>
72
73 #include <kern/zalloc.h>
74
75 #include <net/if.h>
76 #include <net/net_osdep.h>
77
78 #include <net/pktsched/pktsched_cbq.h>
79 #include <netinet/in.h>
80
81 /*
82 * Forward Declarations.
83 */
84 #if 0
85 static int cbq_enqueue_ifclassq(struct ifclassq *, struct mbuf *);
86 static struct mbuf *cbq_dequeue_ifclassq(struct ifclassq *, cqdq_op_t);
87 static int cbq_request_ifclassq(struct ifclassq *, cqrq_t, void *);
88 #endif
89 static int cbq_class_destroy(cbq_state_t *, struct rm_class *);
90 static int cbq_destroy_locked(cbq_state_t *);
91 static struct rm_class *cbq_clh_to_clp(cbq_state_t *, u_int32_t);
92 static const char *cbq_style(cbq_state_t *);
93 static int cbq_clear_interface(cbq_state_t *);
94 static void cbqrestart(struct ifclassq *);
95
96 #define CBQ_ZONE_MAX 32 /* maximum elements in zone */
97 #define CBQ_ZONE_NAME "pktsched_cbq" /* zone name */
98
99 static unsigned int cbq_size; /* size of zone element */
100 static struct zone *cbq_zone; /* zone for cbq */
101
102 void
103 cbq_init(void)
104 {
105 _CASSERT(CBQCLF_RED == RMCF_RED);
106 _CASSERT(CBQCLF_ECN == RMCF_ECN);
107 _CASSERT(CBQCLF_RIO == RMCF_RIO);
108 _CASSERT(CBQCLF_FLOWVALVE == RMCF_FLOWVALVE);
109 _CASSERT(CBQCLF_CLEARDSCP == RMCF_CLEARDSCP);
110 _CASSERT(CBQCLF_WRR == RMCF_WRR);
111 _CASSERT(CBQCLF_EFFICIENT == RMCF_EFFICIENT);
112 _CASSERT(CBQCLF_BLUE == RMCF_BLUE);
113 _CASSERT(CBQCLF_SFB == RMCF_SFB);
114 _CASSERT(CBQCLF_FLOWCTL == RMCF_FLOWCTL);
115 _CASSERT(CBQCLF_LAZY == RMCF_LAZY);
116
117 cbq_size = sizeof (cbq_state_t);
118 cbq_zone = zinit(cbq_size, CBQ_ZONE_MAX * cbq_size, 0, CBQ_ZONE_NAME);
119 if (cbq_zone == NULL) {
120 panic("%s: failed allocating %s", __func__, CBQ_ZONE_NAME);
121 /* NOTREACHED */
122 }
123 zone_change(cbq_zone, Z_EXPAND, TRUE);
124 zone_change(cbq_zone, Z_CALLERACCT, TRUE);
125
126 rmclass_init();
127 }
128
129 cbq_state_t *
130 cbq_alloc(struct ifnet *ifp, int how, boolean_t altq)
131 {
132 cbq_state_t *cbqp;
133
134 /* allocate and initialize cbq_state_t */
135 cbqp = (how == M_WAITOK) ? zalloc(cbq_zone) : zalloc_noblock(cbq_zone);
136 if (cbqp == NULL)
137 return (NULL);
138
139 bzero(cbqp, cbq_size);
140 CALLOUT_INIT(&cbqp->cbq_callout);
141 cbqp->cbq_qlen = 0;
142 cbqp->ifnp.ifq_ = &ifp->if_snd; /* keep the ifclassq */
143 if (altq)
144 cbqp->cbq_flags |= CBQSF_ALTQ;
145
146 if (pktsched_verbose) {
147 log(LOG_DEBUG, "%s: %s scheduler allocated\n",
148 if_name(ifp), cbq_style(cbqp));
149 }
150
151 return (cbqp);
152 }
153
154 int
155 cbq_destroy(cbq_state_t *cbqp)
156 {
157 struct ifclassq *ifq = cbqp->ifnp.ifq_;
158 int err;
159
160 IFCQ_LOCK(ifq);
161 err = cbq_destroy_locked(cbqp);
162 IFCQ_UNLOCK(ifq);
163
164 return (err);
165 }
166
167 static int
168 cbq_destroy_locked(cbq_state_t *cbqp)
169 {
170 IFCQ_LOCK_ASSERT_HELD(cbqp->ifnp.ifq_);
171
172 (void) cbq_clear_interface(cbqp);
173
174 if (pktsched_verbose) {
175 log(LOG_DEBUG, "%s: %s scheduler destroyed\n",
176 if_name(CBQS_IFP(cbqp)), cbq_style(cbqp));
177 }
178
179 if (cbqp->ifnp.default_)
180 cbq_class_destroy(cbqp, cbqp->ifnp.default_);
181 if (cbqp->ifnp.root_)
182 cbq_class_destroy(cbqp, cbqp->ifnp.root_);
183
184 /* deallocate cbq_state_t */
185 zfree(cbq_zone, cbqp);
186
187 return (0);
188 }
189
190 int
191 cbq_add_queue(cbq_state_t *cbqp, u_int32_t qlimit, u_int32_t priority,
192 u_int32_t minburst, u_int32_t maxburst, u_int32_t pktsize,
193 u_int32_t maxpktsize, u_int32_t ns_per_byte, u_int32_t maxidle, int minidle,
194 u_int32_t offtime, u_int32_t flags, u_int32_t parent_qid, u_int32_t qid,
195 struct rm_class **clp)
196 {
197 #pragma unused(minburst, maxburst, maxpktsize)
198 struct rm_class *borrow, *parent;
199 struct rm_class *cl;
200 int i, error;
201
202 IFCQ_LOCK_ASSERT_HELD(cbqp->ifnp.ifq_);
203
204 /* Sanitize flags unless internally configured */
205 if (cbqp->cbq_flags & CBQSF_ALTQ)
206 flags &= CBQCLF_USERFLAGS;
207
208 /*
209 * find a free slot in the class table. if the slot matching
210 * the lower bits of qid is free, use this slot. otherwise,
211 * use the first free slot.
212 */
213 i = qid % CBQ_MAX_CLASSES;
214 if (cbqp->cbq_class_tbl[i] != NULL) {
215 for (i = 0; i < CBQ_MAX_CLASSES; i++)
216 if (cbqp->cbq_class_tbl[i] == NULL)
217 break;
218 if (i == CBQ_MAX_CLASSES)
219 return (EINVAL);
220 }
221
222 /* check parameters */
223 if (priority >= CBQ_MAXPRI)
224 return (EINVAL);
225
226 if (ns_per_byte == 0) {
227 log(LOG_ERR, "%s: %s invalid inverse data rate\n",
228 if_name(CBQS_IFP(cbqp)), cbq_style(cbqp));
229 return (EINVAL);
230 }
231
232 /* Get pointers to parent and borrow classes. */
233 parent = cbq_clh_to_clp(cbqp, parent_qid);
234 if (flags & CBQCLF_BORROW)
235 borrow = parent;
236 else
237 borrow = NULL;
238
239 /*
240 * A class must borrow from its parent or it can not
241 * borrow at all. Hence, borrow can be null.
242 */
243 if (parent == NULL && (flags & CBQCLF_ROOTCLASS) == 0) {
244 log(LOG_ERR, "%s: %s no parent class!\n",
245 if_name(CBQS_IFP(cbqp)), cbq_style(cbqp));
246 return (EINVAL);
247 }
248
249 if ((borrow != parent) && (borrow != NULL)) {
250 log(LOG_ERR, "%s: %s borrow class != parent\n",
251 if_name(CBQS_IFP(cbqp)), cbq_style(cbqp));
252 return (EINVAL);
253 }
254
255 /*
256 * check parameters
257 */
258 switch (flags & CBQCLF_CLASSMASK) {
259 case CBQCLF_ROOTCLASS:
260 if (parent != NULL) {
261 log(LOG_ERR, "%s: %s parent exists\n",
262 if_name(CBQS_IFP(cbqp)), cbq_style(cbqp));
263 return (EINVAL);
264 }
265 if (cbqp->ifnp.root_) {
266 log(LOG_ERR, "%s: %s root class exists\n",
267 if_name(CBQS_IFP(cbqp)), cbq_style(cbqp));
268 return (EINVAL);
269 }
270 break;
271 case CBQCLF_DEFCLASS:
272 if (cbqp->ifnp.default_) {
273 log(LOG_ERR, "%s: %s default class exists\n",
274 if_name(CBQS_IFP(cbqp)), cbq_style(cbqp));
275 return (EINVAL);
276 }
277 break;
278 case 0:
279 break;
280 default:
281 /* more than two flags bits set */
282 log(LOG_ERR, "%s: %s invalid class flags 0x%x\n",
283 if_name(CBQS_IFP(cbqp)), cbq_style(cbqp),
284 (flags & CBQCLF_CLASSMASK));
285 return (EINVAL);
286 }
287
288 /*
289 * create a class. if this is a root class, initialize the
290 * interface.
291 */
292 if ((flags & CBQCLF_CLASSMASK) == CBQCLF_ROOTCLASS) {
293 error = rmc_init(cbqp->ifnp.ifq_, &cbqp->ifnp, ns_per_byte,
294 cbqrestart, qid, qlimit, RM_MAXQUEUED, maxidle, minidle,
295 offtime, flags);
296 if (error != 0)
297 return (error);
298 cl = cbqp->ifnp.root_;
299 } else {
300 cl = rmc_newclass(priority, &cbqp->ifnp, ns_per_byte,
301 rmc_delay_action, qid, qlimit, parent, borrow, maxidle,
302 minidle, offtime, pktsize, flags);
303 }
304 if (cl == NULL)
305 return (ENOMEM);
306
307 /* return handle to user space. */
308 cl->stats_.handle = qid;
309 cl->stats_.depth = cl->depth_;
310
311 /* save the allocated class */
312 cbqp->cbq_class_tbl[i] = cl;
313
314 if ((flags & CBQCLF_CLASSMASK) == CBQCLF_DEFCLASS)
315 cbqp->ifnp.default_ = cl;
316
317 if (clp != NULL)
318 *clp = cl;
319
320 if (pktsched_verbose) {
321 log(LOG_DEBUG, "%s: %s created qid=%d pri=%d qlimit=%d "
322 "flags=%b\n", if_name(CBQS_IFP(cbqp)), cbq_style(cbqp),
323 qid, priority, qlimit, flags, CBQCLF_BITS);
324 }
325
326 return (0);
327 }
328
329 int
330 cbq_remove_queue(cbq_state_t *cbqp, u_int32_t qid)
331 {
332 struct rm_class *cl;
333 int i;
334
335 IFCQ_LOCK_ASSERT_HELD(cbqp->ifnp.ifq_);
336
337 if ((cl = cbq_clh_to_clp(cbqp, qid)) == NULL)
338 return (EINVAL);
339
340 /* if we are a parent class, then return an error. */
341 if (RMC_IS_A_PARENT_CLASS(cl))
342 return (EINVAL);
343
344 /* delete the class */
345 rmc_delete_class(&cbqp->ifnp, cl);
346
347 /*
348 * free the class handle
349 */
350 for (i = 0; i < CBQ_MAX_CLASSES; i++) {
351 if (cbqp->cbq_class_tbl[i] == cl) {
352 cbqp->cbq_class_tbl[i] = NULL;
353 if (cl == cbqp->ifnp.root_)
354 cbqp->ifnp.root_ = NULL;
355 if (cl == cbqp->ifnp.default_)
356 cbqp->ifnp.default_ = NULL;
357 break;
358 }
359 }
360 return (0);
361 }
362
363 /*
364 * int
365 * cbq_class_destroy(cbq_mod_state_t *, struct rm_class *) - This
366 * function destroys a given traffic class. Before destroying
367 * the class, all traffic for that class is released.
368 */
369 static int
370 cbq_class_destroy(cbq_state_t *cbqp, struct rm_class *cl)
371 {
372 int i;
373
374 IFCQ_LOCK_ASSERT_HELD(cbqp->ifnp.ifq_);
375
376 if (pktsched_verbose) {
377 log(LOG_DEBUG, "%s: %s destroyed qid=%d pri=%d\n",
378 if_name(CBQS_IFP(cbqp)), cbq_style(cbqp),
379 cl->stats_.handle, cl->pri_);
380 }
381
382 /* delete the class */
383 rmc_delete_class(&cbqp->ifnp, cl);
384
385 /*
386 * free the class handle
387 */
388 for (i = 0; i < CBQ_MAX_CLASSES; i++)
389 if (cbqp->cbq_class_tbl[i] == cl)
390 cbqp->cbq_class_tbl[i] = NULL;
391
392 if (cl == cbqp->ifnp.root_)
393 cbqp->ifnp.root_ = NULL;
394 if (cl == cbqp->ifnp.default_)
395 cbqp->ifnp.default_ = NULL;
396
397 return (0);
398 }
399
400 /* convert class handle to class pointer */
401 static struct rm_class *
402 cbq_clh_to_clp(cbq_state_t *cbqp, u_int32_t chandle)
403 {
404 int i;
405 struct rm_class *cl;
406
407 IFCQ_LOCK_ASSERT_HELD(cbqp->ifnp.ifq_);
408
409 /*
410 * first, try optimistically the slot matching the lower bits of
411 * the handle. if it fails, do the linear table search.
412 */
413 i = chandle % CBQ_MAX_CLASSES;
414 if ((cl = cbqp->cbq_class_tbl[i]) != NULL &&
415 cl->stats_.handle == chandle)
416 return (cl);
417 for (i = 0; i < CBQ_MAX_CLASSES; i++)
418 if ((cl = cbqp->cbq_class_tbl[i]) != NULL &&
419 cl->stats_.handle == chandle)
420 return (cl);
421 return (NULL);
422 }
423
424 static const char *
425 cbq_style(cbq_state_t *cbqp)
426 {
427 return ((cbqp->cbq_flags & CBQSF_ALTQ) ? "ALTQ_CBQ" : "CBQ");
428 }
429
430 static int
431 cbq_clear_interface(cbq_state_t *cbqp)
432 {
433 int again, i;
434 struct rm_class *cl;
435
436 IFCQ_LOCK_ASSERT_HELD(cbqp->ifnp.ifq_);
437
438 /* clear out the classes now */
439 do {
440 again = 0;
441 for (i = 0; i < CBQ_MAX_CLASSES; i++) {
442 if ((cl = cbqp->cbq_class_tbl[i]) != NULL) {
443 if (RMC_IS_A_PARENT_CLASS(cl))
444 again++;
445 else {
446 cbq_class_destroy(cbqp, cl);
447 cbqp->cbq_class_tbl[i] = NULL;
448 if (cl == cbqp->ifnp.root_)
449 cbqp->ifnp.root_ = NULL;
450 if (cl == cbqp->ifnp.default_)
451 cbqp->ifnp.default_ = NULL;
452 }
453 }
454 }
455 } while (again);
456
457 return (0);
458 }
459
460 /* copy the stats info in rm_class to class_states_t */
461 int
462 cbq_get_class_stats(cbq_state_t *cbqp, u_int32_t qid, class_stats_t *statsp)
463 {
464 struct rm_class *cl;
465
466 IFCQ_LOCK_ASSERT_HELD(cbqp->ifnp.ifq_);
467
468 if ((cl = cbq_clh_to_clp(cbqp, qid)) == NULL)
469 return (EINVAL);
470
471 statsp->xmit_cnt = cl->stats_.xmit_cnt;
472 statsp->drop_cnt = cl->stats_.drop_cnt;
473 statsp->over = cl->stats_.over;
474 statsp->borrows = cl->stats_.borrows;
475 statsp->overactions = cl->stats_.overactions;
476 statsp->delays = cl->stats_.delays;
477
478 statsp->depth = cl->depth_;
479 statsp->priority = cl->pri_;
480 statsp->maxidle = cl->maxidle_;
481 statsp->minidle = cl->minidle_;
482 statsp->offtime = cl->offtime_;
483 statsp->qmax = qlimit(&cl->q_);
484 statsp->ns_per_byte = cl->ns_per_byte_;
485 statsp->wrr_allot = cl->w_allotment_;
486 statsp->qcnt = qlen(&cl->q_);
487 statsp->avgidle = cl->avgidle_;
488
489 statsp->qtype = qtype(&cl->q_);
490 statsp->qstate = qstate(&cl->q_);
491 #if CLASSQ_RED
492 if (q_is_red(&cl->q_))
493 red_getstats(cl->red_, &statsp->red[0]);
494 #endif /* CLASSQ_RED */
495 #if CLASSQ_RIO
496 if (q_is_rio(&cl->q_))
497 rio_getstats(cl->rio_, &statsp->red[0]);
498 #endif /* CLASSQ_RIO */
499 #if CLASSQ_BLUE
500 if (q_is_blue(&cl->q_))
501 blue_getstats(cl->blue_, &statsp->blue);
502 #endif /* CLASSQ_BLUE */
503 if (q_is_sfb(&cl->q_) && cl->sfb_ != NULL)
504 sfb_getstats(cl->sfb_, &statsp->sfb);
505
506 return (0);
507 }
508
509 int
510 cbq_enqueue(cbq_state_t *cbqp, struct rm_class *cl, struct mbuf *m,
511 struct pf_mtag *t)
512 {
513 struct ifclassq *ifq = cbqp->ifnp.ifq_;
514 int len, ret;
515
516 IFCQ_LOCK_ASSERT_HELD(ifq);
517
518 /* grab class set by classifier */
519 if (!(m->m_flags & M_PKTHDR)) {
520 /* should not happen */
521 log(LOG_ERR, "%s: packet for %s does not have pkthdr\n",
522 if_name(ifq->ifcq_ifp));
523 IFCQ_CONVERT_LOCK(ifq);
524 m_freem(m);
525 return (ENOBUFS);
526 }
527
528 if (cl == NULL) {
529 #if PF_ALTQ
530 cl = cbq_clh_to_clp(cbqp, t->pftag_qid);
531 #else /* !PF_ALTQ */
532 cl = cbq_clh_to_clp(cbqp, 0);
533 #endif /* !PF_ALTQ */
534 if (cl == NULL) {
535 cl = cbqp->ifnp.default_;
536 if (cl == NULL) {
537 IFCQ_CONVERT_LOCK(ifq);
538 m_freem(m);
539 return (ENOBUFS);
540 }
541 }
542 }
543
544 len = m_pktlen(m);
545
546 ret = rmc_queue_packet(cl, m, t);
547 if (ret != 0) {
548 if (ret == CLASSQEQ_SUCCESS_FC) {
549 /* packet enqueued, return advisory feedback */
550 ret = EQFULL;
551 } else {
552 VERIFY(ret == CLASSQEQ_DROPPED ||
553 ret == CLASSQEQ_DROPPED_FC ||
554 ret == CLASSQEQ_DROPPED_SP);
555 /* packet has been freed in rmc_queue_packet */
556 PKTCNTR_ADD(&cl->stats_.drop_cnt, 1, len);
557 IFCQ_DROP_ADD(ifq, 1, len);
558 switch (ret) {
559 case CLASSQEQ_DROPPED:
560 return (ENOBUFS);
561 case CLASSQEQ_DROPPED_FC:
562 return (EQFULL);
563 case CLASSQEQ_DROPPED_SP:
564 return (EQSUSPENDED);
565 }
566 /* NOT REACHED */
567 }
568 }
569
570 /* successfully queued. */
571 ++cbqp->cbq_qlen;
572 IFCQ_INC_LEN(ifq);
573
574 return (ret);
575 }
576
577 struct mbuf *
578 cbq_dequeue(cbq_state_t *cbqp, cqdq_op_t op)
579 {
580 struct ifclassq *ifq = cbqp->ifnp.ifq_;
581 struct mbuf *m;
582
583 IFCQ_LOCK_ASSERT_HELD(ifq);
584
585 m = rmc_dequeue_next(&cbqp->ifnp, op);
586
587 if (m && op == CLASSQDQ_REMOVE) {
588 --cbqp->cbq_qlen; /* decrement # of packets in cbq */
589 IFCQ_DEC_LEN(ifq);
590 IFCQ_XMIT_ADD(ifq, 1, m_pktlen(m));
591
592 /* Update the class. */
593 rmc_update_class_util(&cbqp->ifnp);
594 }
595 return (m);
596 }
597
598 /*
599 * void
600 * cbqrestart(queue_t *) - Restart sending of data.
601 * called from rmc_restart via timeout after waking up
602 * a suspended class.
603 * Returns: NONE
604 */
605
606 static void
607 cbqrestart(struct ifclassq *ifq)
608 {
609 u_int32_t qlen;
610
611 IFCQ_LOCK(ifq);
612 qlen = IFCQ_LEN(ifq);
613 IFCQ_UNLOCK(ifq);
614
615 if (qlen > 0)
616 ifnet_start(ifq->ifcq_ifp);
617 }
618
619 void
620 cbq_purge(cbq_state_t *cbqp)
621 {
622 struct rm_class *cl;
623 int i;
624
625 IFCQ_LOCK_ASSERT_HELD(cbqp->ifnp.ifq_);
626
627 for (i = 0; i < CBQ_MAX_CLASSES; i++) {
628 if ((cl = cbqp->cbq_class_tbl[i]) != NULL) {
629 if (!qempty(&cl->q_) && pktsched_verbose) {
630 log(LOG_DEBUG, "%s: %s purge qid=%d pri=%d "
631 "qlen=%d\n", if_name(CBQS_IFP(cbqp)),
632 cbq_style(cbqp), cl->stats_.handle,
633 cl->pri_, qlen(&cl->q_));
634 }
635 rmc_dropall(cl);
636 }
637 }
638 }
639
640 void
641 cbq_event(cbq_state_t *cbqp, cqev_t ev)
642 {
643 struct rm_class *cl;
644 int i;
645
646 IFCQ_LOCK_ASSERT_HELD(cbqp->ifnp.ifq_);
647
648 for (i = 0; i < CBQ_MAX_CLASSES; i++) {
649 if ((cl = cbqp->cbq_class_tbl[i]) != NULL) {
650 if (pktsched_verbose) {
651 log(LOG_DEBUG, "%s: %s update qid=%d pri=%d "
652 "event=%s\n", if_name(CBQS_IFP(cbqp)),
653 cbq_style(cbqp), cl->stats_.handle,
654 cl->pri_, ifclassq_ev2str(ev));
655 }
656 rmc_updateq(cl, ev);
657 }
658 }
659 }
660
661 int
662 cqb_setup_ifclassq(struct ifclassq *ifq, u_int32_t flags)
663 {
664 #pragma unused(ifq, flags)
665 return (ENXIO); /* not yet */
666 }
667
668 int
669 cbq_teardown_ifclassq(struct ifclassq *ifq)
670 {
671 cbq_state_t *cbqp = ifq->ifcq_disc;
672 int i;
673
674 IFCQ_LOCK_ASSERT_HELD(ifq);
675 VERIFY(cbqp != NULL && ifq->ifcq_type == PKTSCHEDT_CBQ);
676
677 (void) cbq_destroy_locked(cbqp);
678
679 ifq->ifcq_disc = NULL;
680 for (i = 0; i < IFCQ_SC_MAX; i++) {
681 ifq->ifcq_disc_slots[i].qid = 0;
682 ifq->ifcq_disc_slots[i].cl = NULL;
683 }
684
685 return (ifclassq_detach(ifq));
686 }
687
688 int
689 cbq_getqstats_ifclassq(struct ifclassq *ifq, u_int32_t slot,
690 struct if_ifclassq_stats *ifqs)
691 {
692 cbq_state_t *cbqp = ifq->ifcq_disc;
693
694 IFCQ_LOCK_ASSERT_HELD(ifq);
695 VERIFY(ifq->ifcq_type == PKTSCHEDT_CBQ);
696
697 if (slot >= IFCQ_SC_MAX)
698 return (EINVAL);
699
700 return (cbq_get_class_stats(cbqp, ifq->ifcq_disc_slots[slot].qid,
701 &ifqs->ifqs_cbq_stats));
702 }
703 #endif /* PKTSCHED_CBQ */