]> git.saurik.com Git - apple/xnu.git/blame - bsd/net/pktsched/pktsched_priq.c
xnu-2050.7.9.tar.gz
[apple/xnu.git] / bsd / net / pktsched / pktsched_priq.c
CommitLineData
316670eb
A
1/*
2 * Copyright (c) 2007-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/* $OpenBSD: altq_priq.c,v 1.21 2007/09/13 20:40:02 chl Exp $ */
30/* $KAME: altq_priq.c,v 1.1 2000/10/18 09:15:23 kjc Exp $ */
31
32/*
33 * Copyright (C) 2000-2003
34 * Sony Computer Science Laboratories Inc. All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 *
45 * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 */
57
58/*
59 * priority queue
60 */
61
62#if PKTSCHED_PRIQ
63
64#include <sys/cdefs.h>
65#include <sys/param.h>
66#include <sys/malloc.h>
67#include <sys/mbuf.h>
68#include <sys/systm.h>
69#include <sys/errno.h>
70#include <sys/kernel.h>
71#include <sys/syslog.h>
72
73#include <kern/zalloc.h>
74
75#include <net/if.h>
76#include <net/net_osdep.h>
77
78#include <net/pktsched/pktsched_priq.h>
79#include <netinet/in.h>
80
81/*
82 * function prototypes
83 */
84static int priq_enqueue_ifclassq(struct ifclassq *, struct mbuf *);
85static struct mbuf *priq_dequeue_ifclassq(struct ifclassq *, cqdq_op_t);
86static int priq_request_ifclassq(struct ifclassq *, cqrq_t, void *);
87static int priq_clear_interface(struct priq_if *);
88static struct priq_class *priq_class_create(struct priq_if *, int, u_int32_t,
89 int, u_int32_t);
90static int priq_class_destroy(struct priq_if *, struct priq_class *);
91static int priq_destroy_locked(struct priq_if *);
92static inline int priq_addq(struct priq_class *, struct mbuf *,
93 struct pf_mtag *);
94static inline struct mbuf *priq_getq(struct priq_class *);
95static inline struct mbuf *priq_pollq(struct priq_class *);
96static void priq_purgeq(struct priq_if *, struct priq_class *, u_int32_t,
97 u_int32_t *, u_int32_t *);
98static void priq_purge_sc(struct priq_if *, cqrq_purge_sc_t *);
99static void priq_updateq(struct priq_if *, struct priq_class *, cqev_t);
100static int priq_throttle(struct priq_if *, cqrq_throttle_t *);
101static int priq_resumeq(struct priq_if *, struct priq_class *);
102static int priq_suspendq(struct priq_if *, struct priq_class *);
103static inline struct priq_class *priq_clh_to_clp(struct priq_if *, u_int32_t);
104static const char *priq_style(struct priq_if *);
105
106#define PRIQ_ZONE_MAX 32 /* maximum elements in zone */
107#define PRIQ_ZONE_NAME "pktsched_priq" /* zone name */
108
109static unsigned int priq_size; /* size of zone element */
110static struct zone *priq_zone; /* zone for priq */
111
112#define PRIQ_CL_ZONE_MAX 32 /* maximum elements in zone */
113#define PRIQ_CL_ZONE_NAME "pktsched_priq_cl" /* zone name */
114
115static unsigned int priq_cl_size; /* size of zone element */
116static struct zone *priq_cl_zone; /* zone for priq_class */
117
118void
119priq_init(void)
120{
121 priq_size = sizeof (struct priq_if);
122 priq_zone = zinit(priq_size, PRIQ_ZONE_MAX * priq_size,
123 0, PRIQ_ZONE_NAME);
124 if (priq_zone == NULL) {
125 panic("%s: failed allocating %s", __func__, PRIQ_ZONE_NAME);
126 /* NOTREACHED */
127 }
128 zone_change(priq_zone, Z_EXPAND, TRUE);
129 zone_change(priq_zone, Z_CALLERACCT, TRUE);
130
131 priq_cl_size = sizeof (struct priq_class);
132 priq_cl_zone = zinit(priq_cl_size, PRIQ_CL_ZONE_MAX * priq_cl_size,
133 0, PRIQ_CL_ZONE_NAME);
134 if (priq_cl_zone == NULL) {
135 panic("%s: failed allocating %s", __func__, PRIQ_CL_ZONE_NAME);
136 /* NOTREACHED */
137 }
138 zone_change(priq_cl_zone, Z_EXPAND, TRUE);
139 zone_change(priq_cl_zone, Z_CALLERACCT, TRUE);
140}
141
142struct priq_if *
143priq_alloc(struct ifnet *ifp, int how, boolean_t altq)
144{
145 struct priq_if *pif;
146
147 pif = (how == M_WAITOK) ? zalloc(priq_zone) : zalloc_noblock(priq_zone);
148 if (pif == NULL)
149 return (NULL);
150
151 bzero(pif, priq_size);
152 pif->pif_maxpri = -1;
153 pif->pif_ifq = &ifp->if_snd;
154 if (altq)
155 pif->pif_flags |= PRIQIFF_ALTQ;
156
157 if (pktsched_verbose) {
158 log(LOG_DEBUG, "%s: %s scheduler allocated\n",
159 if_name(ifp), priq_style(pif));
160 }
161
162 return (pif);
163}
164
165int
166priq_destroy(struct priq_if *pif)
167{
168 struct ifclassq *ifq = pif->pif_ifq;
169 int err;
170
171 IFCQ_LOCK(ifq);
172 err = priq_destroy_locked(pif);
173 IFCQ_UNLOCK(ifq);
174
175 return (err);
176}
177
178static int
179priq_destroy_locked(struct priq_if *pif)
180{
181 IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);
182
183 (void) priq_clear_interface(pif);
184
185 if (pktsched_verbose) {
186 log(LOG_DEBUG, "%s: %s scheduler destroyed\n",
187 if_name(PRIQIF_IFP(pif)), priq_style(pif));
188 }
189
190 zfree(priq_zone, pif);
191
192 return (0);
193}
194
195/*
196 * bring the interface back to the initial state by discarding
197 * all the filters and classes.
198 */
199static int
200priq_clear_interface(struct priq_if *pif)
201{
202 struct priq_class *cl;
203 int pri;
204
205 IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);
206
207 /* clear out the classes */
208 for (pri = 0; pri <= pif->pif_maxpri; pri++)
209 if ((cl = pif->pif_classes[pri]) != NULL)
210 priq_class_destroy(pif, cl);
211
212 return (0);
213}
214
215/* discard all the queued packets on the interface */
216void
217priq_purge(struct priq_if *pif)
218{
219 struct priq_class *cl;
220 int pri;
221
222 IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);
223
224 for (pri = 0; pri <= pif->pif_maxpri; pri++) {
225 if ((cl = pif->pif_classes[pri]) != NULL && !qempty(&cl->cl_q))
226 priq_purgeq(pif, cl, 0, NULL, NULL);
227 }
228#if !PF_ALTQ
229 /*
230 * This assertion is safe to be made only when PF_ALTQ is not
231 * configured; otherwise, IFCQ_LEN represents the sum of the
232 * packets managed by ifcq_disc and altq_disc instances, which
233 * is possible when transitioning between the two.
234 */
235 VERIFY(IFCQ_LEN(pif->pif_ifq) == 0);
236#endif /* !PF_ALTQ */
237}
238
239static void
240priq_purge_sc(struct priq_if *pif, cqrq_purge_sc_t *pr)
241{
242 struct ifclassq *ifq = pif->pif_ifq;
243 u_int32_t i;
244
245 IFCQ_LOCK_ASSERT_HELD(ifq);
246
247 VERIFY(pr->sc == MBUF_SC_UNSPEC || MBUF_VALID_SC(pr->sc));
248 VERIFY(pr->flow != 0);
249
250 if (pr->sc != MBUF_SC_UNSPEC) {
251 i = MBUF_SCIDX(pr->sc);
252 VERIFY(i < IFCQ_SC_MAX);
253
254 priq_purgeq(pif, ifq->ifcq_disc_slots[i].cl,
255 pr->flow, &pr->packets, &pr->bytes);
256 } else {
257 u_int32_t cnt, len;
258
259 pr->packets = 0;
260 pr->bytes = 0;
261
262 for (i = 0; i < IFCQ_SC_MAX; i++) {
263 priq_purgeq(pif, ifq->ifcq_disc_slots[i].cl,
264 pr->flow, &cnt, &len);
265 pr->packets += cnt;
266 pr->bytes += len;
267 }
268 }
269}
270
271void
272priq_event(struct priq_if *pif, cqev_t ev)
273{
274 struct priq_class *cl;
275 int pri;
276
277 IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);
278
279 for (pri = 0; pri <= pif->pif_maxpri; pri++)
280 if ((cl = pif->pif_classes[pri]) != NULL)
281 priq_updateq(pif, cl, ev);
282}
283
284int
285priq_add_queue(struct priq_if *pif, int priority, u_int32_t qlimit,
286 int flags, u_int32_t qid, struct priq_class **clp)
287{
288 struct priq_class *cl;
289
290 IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);
291
292 /* check parameters */
293 if (priority >= PRIQ_MAXPRI)
294 return (EINVAL);
295 if (pif->pif_classes[priority] != NULL)
296 return (EBUSY);
297 if (priq_clh_to_clp(pif, qid) != NULL)
298 return (EBUSY);
299
300 cl = priq_class_create(pif, priority, qlimit, flags, qid);
301 if (cl == NULL)
302 return (ENOMEM);
303
304 if (clp != NULL)
305 *clp = cl;
306
307 return (0);
308}
309
310static struct priq_class *
311priq_class_create(struct priq_if *pif, int pri, u_int32_t qlimit,
312 int flags, u_int32_t qid)
313{
314 struct ifnet *ifp;
315 struct ifclassq *ifq;
316 struct priq_class *cl;
317
318 IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);
319
320 /* Sanitize flags unless internally configured */
321 if (pif->pif_flags & PRIQIFF_ALTQ)
322 flags &= PRCF_USERFLAGS;
323
324#if !CLASSQ_RED
325 if (flags & PRCF_RED) {
326 log(LOG_ERR, "%s: %s RED not available!\n",
327 if_name(PRIQIF_IFP(pif)), priq_style(pif));
328 return (NULL);
329 }
330#endif /* !CLASSQ_RED */
331
332#if !CLASSQ_RIO
333 if (flags & PRCF_RIO) {
334 log(LOG_ERR, "%s: %s RIO not available!\n",
335 if_name(PRIQIF_IFP(pif)), priq_style(pif));
336 return (NULL);
337 }
338#endif /* CLASSQ_RIO */
339
340#if !CLASSQ_BLUE
341 if (flags & PRCF_BLUE) {
342 log(LOG_ERR, "%s: %s BLUE not available!\n",
343 if_name(PRIQIF_IFP(pif)), priq_style(pif));
344 return (NULL);
345 }
346#endif /* CLASSQ_BLUE */
347
348 /* These are mutually exclusive */
349 if ((flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) &&
350 (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) != PRCF_RED &&
351 (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) != PRCF_RIO &&
352 (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) != PRCF_BLUE &&
353 (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) != PRCF_SFB) {
354 log(LOG_ERR, "%s: %s more than one RED|RIO|BLUE|SFB\n",
355 if_name(PRIQIF_IFP(pif)), priq_style(pif));
356 return (NULL);
357 }
358
359 ifq = pif->pif_ifq;
360 ifp = PRIQIF_IFP(pif);
361
362 if ((cl = pif->pif_classes[pri]) != NULL) {
363 /* modify the class instead of creating a new one */
364 if (!qempty(&cl->cl_q))
365 priq_purgeq(pif, cl, 0, NULL, NULL);
366#if CLASSQ_RIO
367 if (q_is_rio(&cl->cl_q))
368 rio_destroy(cl->cl_rio);
369#endif /* CLASSQ_RIO */
370#if CLASSQ_RED
371 if (q_is_red(&cl->cl_q))
372 red_destroy(cl->cl_red);
373#endif /* CLASSQ_RED */
374#if CLASSQ_BLUE
375 if (q_is_blue(&cl->cl_q))
376 blue_destroy(cl->cl_blue);
377#endif /* CLASSQ_BLUE */
378 if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
379 sfb_destroy(cl->cl_sfb);
380 cl->cl_qalg.ptr = NULL;
381 qtype(&cl->cl_q) = Q_DROPTAIL;
382 qstate(&cl->cl_q) = QS_RUNNING;
383 } else {
384 cl = zalloc(priq_cl_zone);
385 if (cl == NULL)
386 return (NULL);
387
388 bzero(cl, priq_cl_size);
389 }
390
391 pif->pif_classes[pri] = cl;
392 if (flags & PRCF_DEFAULTCLASS)
393 pif->pif_default = cl;
394 if (qlimit == 0 || qlimit > IFCQ_MAXLEN(ifq)) {
395 qlimit = IFCQ_MAXLEN(ifq);
396 if (qlimit == 0)
397 qlimit = DEFAULT_QLIMIT; /* use default */
398 }
399 _qinit(&cl->cl_q, Q_DROPTAIL, qlimit);
400 cl->cl_flags = flags;
401 cl->cl_pri = pri;
402 if (pri > pif->pif_maxpri)
403 pif->pif_maxpri = pri;
404 cl->cl_pif = pif;
405 cl->cl_handle = qid;
406
407 if (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) {
408#if CLASSQ_RED || CLASSQ_RIO
409 u_int64_t ifbandwidth = ifnet_output_linkrate(ifp);
410 int pkttime;
411#endif /* CLASSQ_RED || CLASSQ_RIO */
412
413 cl->cl_qflags = 0;
414 if (flags & PRCF_ECN) {
415 if (flags & PRCF_BLUE)
416 cl->cl_qflags |= BLUEF_ECN;
417 else if (flags & PRCF_SFB)
418 cl->cl_qflags |= SFBF_ECN;
419 else if (flags & PRCF_RED)
420 cl->cl_qflags |= REDF_ECN;
421 else if (flags & PRCF_RIO)
422 cl->cl_qflags |= RIOF_ECN;
423 }
424 if (flags & PRCF_FLOWCTL) {
425 if (flags & PRCF_SFB)
426 cl->cl_qflags |= SFBF_FLOWCTL;
427 }
428 if (flags & PRCF_CLEARDSCP) {
429 if (flags & PRCF_RIO)
430 cl->cl_qflags |= RIOF_CLEARDSCP;
431 }
432#if CLASSQ_RED || CLASSQ_RIO
433 /*
434 * XXX: RED & RIO should be watching link speed and MTU
435 * events and recompute pkttime accordingly.
436 */
437 if (ifbandwidth < 8)
438 pkttime = 1000 * 1000 * 1000; /* 1 sec */
439 else
440 pkttime = (int64_t)ifp->if_mtu * 1000 * 1000 * 1000 /
441 (ifbandwidth / 8);
442
443 /* Test for exclusivity {RED,RIO,BLUE,SFB} was done above */
444#if CLASSQ_RED
445 if (flags & PRCF_RED) {
446 cl->cl_red = red_alloc(ifp, 0, 0,
447 qlimit(&cl->cl_q) * 10/100,
448 qlimit(&cl->cl_q) * 30/100,
449 cl->cl_qflags, pkttime);
450 if (cl->cl_red != NULL)
451 qtype(&cl->cl_q) = Q_RED;
452 }
453#endif /* CLASSQ_RED */
454#if CLASSQ_RIO
455 if (flags & PRCF_RIO) {
456 cl->cl_rio =
457 rio_alloc(ifp, 0, NULL, cl->cl_qflags, pkttime);
458 if (cl->cl_rio != NULL)
459 qtype(&cl->cl_q) = Q_RIO;
460 }
461#endif /* CLASSQ_RIO */
462#endif /* CLASSQ_RED || CLASSQ_RIO */
463#if CLASSQ_BLUE
464 if (flags & PRCF_BLUE) {
465 cl->cl_blue = blue_alloc(ifp, 0, 0, cl->cl_qflags);
466 if (cl->cl_blue != NULL)
467 qtype(&cl->cl_q) = Q_BLUE;
468 }
469#endif /* CLASSQ_BLUE */
470 if (flags & PRCF_SFB) {
471 if (!(cl->cl_flags & PRCF_LAZY))
472 cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
473 qlimit(&cl->cl_q), cl->cl_qflags);
474 if (cl->cl_sfb != NULL || (cl->cl_flags & PRCF_LAZY))
475 qtype(&cl->cl_q) = Q_SFB;
476 }
477 }
478
479 if (pktsched_verbose) {
480 log(LOG_DEBUG, "%s: %s created qid=%d pri=%d qlimit=%d "
481 "flags=%b\n", if_name(ifp), priq_style(pif),
482 cl->cl_handle, cl->cl_pri, qlimit, flags, PRCF_BITS);
483 }
484
485 return (cl);
486}
487
488int
489priq_remove_queue(struct priq_if *pif, u_int32_t qid)
490{
491 struct priq_class *cl;
492
493 IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);
494
495 if ((cl = priq_clh_to_clp(pif, qid)) == NULL)
496 return (EINVAL);
497
498 return (priq_class_destroy(pif, cl));
499}
500
501static int
502priq_class_destroy(struct priq_if *pif, struct priq_class *cl)
503{
504 struct ifclassq *ifq = pif->pif_ifq;
505 int pri;
506
507 IFCQ_LOCK_ASSERT_HELD(ifq);
508
509 if (!qempty(&cl->cl_q))
510 priq_purgeq(pif, cl, 0, NULL, NULL);
511
512 VERIFY(cl->cl_pri < PRIQ_MAXPRI);
513 VERIFY(!pktsched_bit_tst(cl->cl_pri, &pif->pif_bitmap));
514
515 pif->pif_classes[cl->cl_pri] = NULL;
516 if (pif->pif_maxpri == cl->cl_pri) {
517 for (pri = cl->cl_pri; pri >= 0; pri--)
518 if (pif->pif_classes[pri] != NULL) {
519 pif->pif_maxpri = pri;
520 break;
521 }
522 if (pri < 0)
523 pif->pif_maxpri = -1;
524 }
525
526 if (pif->pif_default == cl)
527 pif->pif_default = NULL;
528
529 if (cl->cl_qalg.ptr != NULL) {
530#if CLASSQ_RIO
531 if (q_is_rio(&cl->cl_q))
532 rio_destroy(cl->cl_rio);
533#endif /* CLASSQ_RIO */
534#if CLASSQ_RED
535 if (q_is_red(&cl->cl_q))
536 red_destroy(cl->cl_red);
537#endif /* CLASSQ_RED */
538#if CLASSQ_BLUE
539 if (q_is_blue(&cl->cl_q))
540 blue_destroy(cl->cl_blue);
541#endif /* CLASSQ_BLUE */
542 if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
543 sfb_destroy(cl->cl_sfb);
544 cl->cl_qalg.ptr = NULL;
545 qtype(&cl->cl_q) = Q_DROPTAIL;
546 qstate(&cl->cl_q) = QS_RUNNING;
547 }
548
549 if (pktsched_verbose) {
550 log(LOG_DEBUG, "%s: %s destroyed qid=%d pri=%d\n",
551 if_name(PRIQIF_IFP(pif)), priq_style(pif),
552 cl->cl_handle, cl->cl_pri);
553 }
554
555 zfree(priq_cl_zone, cl);
556
557 return (0);
558}
559
560int
561priq_enqueue(struct priq_if *pif, struct priq_class *cl, struct mbuf *m,
562 struct pf_mtag *t)
563{
564 struct ifclassq *ifq = pif->pif_ifq;
565 u_int32_t pri;
566 int len, ret;
567
568 IFCQ_LOCK_ASSERT_HELD(ifq);
569 VERIFY(cl == NULL || cl->cl_pif == pif);
570
571 if (cl == NULL) {
572 cl = priq_clh_to_clp(pif, t->pftag_qid);
573 if (cl == NULL) {
574 cl = pif->pif_default;
575 if (cl == NULL) {
576 IFCQ_CONVERT_LOCK(ifq);
577 m_freem(m);
578 return (ENOBUFS);
579 }
580 }
581 }
582 pri = cl->cl_pri;
583 VERIFY(pri < PRIQ_MAXPRI);
584
585 len = m_pktlen(m);
586
587 ret = priq_addq(cl, m, t);
588 if (ret != 0) {
589 if (ret == CLASSQEQ_SUCCESS_FC) {
590 /* packet enqueued, return advisory feedback */
591 ret = EQFULL;
592 } else {
593 VERIFY(ret == CLASSQEQ_DROPPED ||
594 ret == CLASSQEQ_DROPPED_FC ||
595 ret == CLASSQEQ_DROPPED_SP);
596 /* packet has been freed in priq_addq */
597 PKTCNTR_ADD(&cl->cl_dropcnt, 1, len);
598 IFCQ_DROP_ADD(ifq, 1, len);
599 switch (ret) {
600 case CLASSQEQ_DROPPED:
601 return (ENOBUFS);
602 case CLASSQEQ_DROPPED_FC:
603 return (EQFULL);
604 case CLASSQEQ_DROPPED_SP:
605 return (EQSUSPENDED);
606 }
607 /* NOT REACHED */
608 }
609 }
610 IFCQ_INC_LEN(ifq);
611
612 /* class is now active; indicate it as such */
613 if (!pktsched_bit_tst(pri, &pif->pif_bitmap))
614 pktsched_bit_set(pri, &pif->pif_bitmap);
615
616 /* successfully queued. */
617 return (ret);
618}
619
620/*
621 * note: CLASSQDQ_POLL returns the next packet without removing the packet
622 * from the queue. CLASSQDQ_REMOVE is a normal dequeue operation.
623 * CLASSQDQ_REMOVE must return the same packet if called immediately
624 * after CLASSQDQ_POLL.
625 */
626struct mbuf *
627priq_dequeue(struct priq_if *pif, cqdq_op_t op)
628{
629 struct ifclassq *ifq = pif->pif_ifq;
630 struct priq_class *cl;
631 struct mbuf *m;
632 u_int32_t pri, len;
633
634 IFCQ_LOCK_ASSERT_HELD(ifq);
635
636 if (pif->pif_bitmap == 0) {
637 /* no active class; nothing to dequeue */
638 return (NULL);
639 }
640 VERIFY(!IFCQ_IS_EMPTY(ifq));
641
642 pri = pktsched_fls(pif->pif_bitmap) - 1; /* zero based */
643 VERIFY(pri < PRIQ_MAXPRI);
644 cl = pif->pif_classes[pri];
645 VERIFY(cl != NULL && !qempty(&cl->cl_q));
646
647 if (op == CLASSQDQ_POLL)
648 return (priq_pollq(cl));
649
650 m = priq_getq(cl);
651 VERIFY(m != NULL); /* qalg must be work conserving */
652 len = m_pktlen(m);
653
654 IFCQ_DEC_LEN(ifq);
655 if (qempty(&cl->cl_q)) {
656 cl->cl_period++;
657 /* class is now inactive; indicate it as such */
658 pktsched_bit_clr(pri, &pif->pif_bitmap);
659 }
660 PKTCNTR_ADD(&cl->cl_xmitcnt, 1, len);
661 IFCQ_XMIT_ADD(ifq, 1, len);
662
663 return (m);
664}
665
666static inline int
667priq_addq(struct priq_class *cl, struct mbuf *m, struct pf_mtag *t)
668{
669 struct priq_if *pif = cl->cl_pif;
670 struct ifclassq *ifq = pif->pif_ifq;
671
672 IFCQ_LOCK_ASSERT_HELD(ifq);
673
674#if CLASSQ_RIO
675 if (q_is_rio(&cl->cl_q))
676 return (rio_addq(cl->cl_rio, &cl->cl_q, m, t));
677 else
678#endif /* CLASSQ_RIO */
679#if CLASSQ_RED
680 if (q_is_red(&cl->cl_q))
681 return (red_addq(cl->cl_red, &cl->cl_q, m, t));
682 else
683#endif /* CLASSQ_RED */
684#if CLASSQ_BLUE
685 if (q_is_blue(&cl->cl_q))
686 return (blue_addq(cl->cl_blue, &cl->cl_q, m, t));
687 else
688#endif /* CLASSQ_BLUE */
689 if (q_is_sfb(&cl->cl_q)) {
690 if (cl->cl_sfb == NULL) {
691 struct ifnet *ifp = PRIQIF_IFP(pif);
692
693 VERIFY(cl->cl_flags & PRCF_LAZY);
694 cl->cl_flags &= ~PRCF_LAZY;
695 IFCQ_CONVERT_LOCK(ifq);
696
697 cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
698 qlimit(&cl->cl_q), cl->cl_qflags);
699 if (cl->cl_sfb == NULL) {
700 /* fall back to droptail */
701 qtype(&cl->cl_q) = Q_DROPTAIL;
702 cl->cl_flags &= ~PRCF_SFB;
703 cl->cl_qflags &= ~(SFBF_ECN | SFBF_FLOWCTL);
704
705 log(LOG_ERR, "%s: %s SFB lazy allocation "
706 "failed for qid=%d pri=%d, falling back "
707 "to DROPTAIL\n", if_name(ifp),
708 priq_style(pif), cl->cl_handle,
709 cl->cl_pri);
710 } else if (pif->pif_throttle != IFNET_THROTTLE_OFF) {
711 /* if there's pending throttling, set it */
712 cqrq_throttle_t tr = { 1, pif->pif_throttle };
713 int err = priq_throttle(pif, &tr);
714
715 if (err == EALREADY)
716 err = 0;
717 if (err != 0) {
718 tr.level = IFNET_THROTTLE_OFF;
719 (void) priq_throttle(pif, &tr);
720 }
721 }
722 }
723 if (cl->cl_sfb != NULL)
724 return (sfb_addq(cl->cl_sfb, &cl->cl_q, m, t));
725 } else if (qlen(&cl->cl_q) >= qlimit(&cl->cl_q)) {
726 IFCQ_CONVERT_LOCK(ifq);
727 m_freem(m);
728 return (CLASSQEQ_DROPPED);
729 }
730
731 if (cl->cl_flags & PRCF_CLEARDSCP)
732 write_dsfield(m, t, 0);
733
734 _addq(&cl->cl_q, m);
735
736 return (0);
737}
738
739static inline struct mbuf *
740priq_getq(struct priq_class *cl)
741{
742 IFCQ_LOCK_ASSERT_HELD(cl->cl_pif->pif_ifq);
743
744#if CLASSQ_RIO
745 if (q_is_rio(&cl->cl_q))
746 return (rio_getq(cl->cl_rio, &cl->cl_q));
747 else
748#endif /* CLASSQ_RIO */
749#if CLASSQ_RED
750 if (q_is_red(&cl->cl_q))
751 return (red_getq(cl->cl_red, &cl->cl_q));
752 else
753#endif /* CLASSQ_RED */
754#if CLASSQ_BLUE
755 if (q_is_blue(&cl->cl_q))
756 return (blue_getq(cl->cl_blue, &cl->cl_q));
757 else
758#endif /* CLASSQ_BLUE */
759 if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
760 return (sfb_getq(cl->cl_sfb, &cl->cl_q));
761
762 return (_getq(&cl->cl_q));
763}
764
765static inline struct mbuf *
766priq_pollq(struct priq_class *cl)
767{
768 IFCQ_LOCK_ASSERT_HELD(cl->cl_pif->pif_ifq);
769
770 return (qhead(&cl->cl_q));
771}
772
773static void
774priq_purgeq(struct priq_if *pif, struct priq_class *cl, u_int32_t flow,
775 u_int32_t *packets, u_int32_t *bytes)
776{
777 struct ifclassq *ifq = pif->pif_ifq;
778 u_int32_t cnt = 0, len = 0, qlen;
779
780 IFCQ_LOCK_ASSERT_HELD(ifq);
781
782 if ((qlen = qlen(&cl->cl_q)) == 0) {
783 VERIFY(!pktsched_bit_tst(cl->cl_pri, &pif->pif_bitmap));
784 goto done;
785 }
786
787 /* become regular mutex before freeing mbufs */
788 IFCQ_CONVERT_LOCK(ifq);
789
790#if CLASSQ_RIO
791 if (q_is_rio(&cl->cl_q))
792 rio_purgeq(cl->cl_rio, &cl->cl_q, flow, &cnt, &len);
793 else
794#endif /* CLASSQ_RIO */
795#if CLASSQ_RED
796 if (q_is_red(&cl->cl_q))
797 red_purgeq(cl->cl_red, &cl->cl_q, flow, &cnt, &len);
798 else
799#endif /* CLASSQ_RED */
800#if CLASSQ_BLUE
801 if (q_is_blue(&cl->cl_q))
802 blue_purgeq(cl->cl_blue, &cl->cl_q, flow, &cnt, &len);
803 else
804#endif /* CLASSQ_BLUE */
805 if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
806 sfb_purgeq(cl->cl_sfb, &cl->cl_q, flow, &cnt, &len);
807 else
808 _flushq_flow(&cl->cl_q, flow, &cnt, &len);
809
810 if (cnt > 0) {
811 VERIFY(qlen(&cl->cl_q) == (qlen - cnt));
812
813 PKTCNTR_ADD(&cl->cl_dropcnt, cnt, len);
814 IFCQ_DROP_ADD(ifq, cnt, len);
815
816 VERIFY(((signed)IFCQ_LEN(ifq) - cnt) >= 0);
817 IFCQ_LEN(ifq) -= cnt;
818
819 if (qempty(&cl->cl_q))
820 pktsched_bit_clr(cl->cl_pri, &pif->pif_bitmap);
821
822 if (pktsched_verbose) {
823 log(LOG_DEBUG, "%s: %s purge qid=%d pri=%d "
824 "qlen=[%d,%d] cnt=%d len=%d flow=0x%x\n",
825 if_name(PRIQIF_IFP(pif)), priq_style(pif),
826 cl->cl_handle, cl->cl_pri, qlen, qlen(&cl->cl_q),
827 cnt, len, flow);
828 }
829 }
830done:
831 if (packets != NULL)
832 *packets = cnt;
833 if (bytes != NULL)
834 *bytes = len;
835}
836
837static void
838priq_updateq(struct priq_if *pif, struct priq_class *cl, cqev_t ev)
839{
840 IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);
841
842 if (pktsched_verbose) {
843 log(LOG_DEBUG, "%s: %s update qid=%d pri=%d event=%s\n",
844 if_name(PRIQIF_IFP(pif)), priq_style(pif),
845 cl->cl_handle, cl->cl_pri, ifclassq_ev2str(ev));
846 }
847
848#if CLASSQ_RIO
849 if (q_is_rio(&cl->cl_q))
850 return (rio_updateq(cl->cl_rio, ev));
851#endif /* CLASSQ_RIO */
852#if CLASSQ_RED
853 if (q_is_red(&cl->cl_q))
854 return (red_updateq(cl->cl_red, ev));
855#endif /* CLASSQ_RED */
856#if CLASSQ_BLUE
857 if (q_is_blue(&cl->cl_q))
858 return (blue_updateq(cl->cl_blue, ev));
859#endif /* CLASSQ_BLUE */
860 if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
861 return (sfb_updateq(cl->cl_sfb, ev));
862}
863
864int
865priq_get_class_stats(struct priq_if *pif, u_int32_t qid,
866 struct priq_classstats *sp)
867{
868 struct priq_class *cl;
869
870 IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);
871
872 if ((cl = priq_clh_to_clp(pif, qid)) == NULL)
873 return (EINVAL);
874
875 sp->class_handle = cl->cl_handle;
876 sp->priority = cl->cl_pri;
877 sp->qlength = qlen(&cl->cl_q);
878 sp->qlimit = qlimit(&cl->cl_q);
879 sp->period = cl->cl_period;
880 sp->xmitcnt = cl->cl_xmitcnt;
881 sp->dropcnt = cl->cl_dropcnt;
882
883 sp->qtype = qtype(&cl->cl_q);
884 sp->qstate = qstate(&cl->cl_q);
885#if CLASSQ_RED
886 if (q_is_red(&cl->cl_q))
887 red_getstats(cl->cl_red, &sp->red[0]);
888#endif /* CLASSQ_RED */
889#if CLASSQ_RIO
890 if (q_is_rio(&cl->cl_q))
891 rio_getstats(cl->cl_rio, &sp->red[0]);
892#endif /* CLASSQ_RIO */
893#if CLASSQ_BLUE
894 if (q_is_blue(&cl->cl_q))
895 blue_getstats(cl->cl_blue, &sp->blue);
896#endif /* CLASSQ_BLUE */
897 if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
898 sfb_getstats(cl->cl_sfb, &sp->sfb);
899
900 return (0);
901}
902
903/* convert a class handle to the corresponding class pointer */
904static inline struct priq_class *
905priq_clh_to_clp(struct priq_if *pif, u_int32_t chandle)
906{
907 struct priq_class *cl;
908 int idx;
909
910 IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);
911
912 for (idx = pif->pif_maxpri; idx >= 0; idx--)
913 if ((cl = pif->pif_classes[idx]) != NULL &&
914 cl->cl_handle == chandle)
915 return (cl);
916
917 return (NULL);
918}
919
920static const char *
921priq_style(struct priq_if *pif)
922{
923 return ((pif->pif_flags & PRIQIFF_ALTQ) ? "ALTQ_PRIQ" : "PRIQ");
924}
925
926/*
927 * priq_enqueue_ifclassq is an enqueue function to be registered to
928 * (*ifcq_enqueue) in struct ifclassq.
929 */
930static int
931priq_enqueue_ifclassq(struct ifclassq *ifq, struct mbuf *m)
932{
933 u_int32_t i;
934
935 IFCQ_LOCK_ASSERT_HELD(ifq);
936
937 if (!(m->m_flags & M_PKTHDR)) {
938 /* should not happen */
939 log(LOG_ERR, "%s: packet does not have pkthdr\n",
940 if_name(ifq->ifcq_ifp));
941 IFCQ_CONVERT_LOCK(ifq);
942 m_freem(m);
943 return (ENOBUFS);
944 }
945
946 i = MBUF_SCIDX(mbuf_get_service_class(m));
947 VERIFY((u_int32_t)i < IFCQ_SC_MAX);
948
949 return (priq_enqueue(ifq->ifcq_disc,
950 ifq->ifcq_disc_slots[i].cl, m, m_pftag(m)));
951}
952
953/*
954 * priq_dequeue_ifclassq is a dequeue function to be registered to
955 * (*ifcq_dequeue) in struct ifclass.
956 *
957 * note: CLASSQDQ_POLL returns the next packet without removing the packet
958 * from the queue. CLASSQDQ_REMOVE is a normal dequeue operation.
959 * CLASSQDQ_REMOVE must return the same packet if called immediately
960 * after CLASSQDQ_POLL.
961 */
962static struct mbuf *
963priq_dequeue_ifclassq(struct ifclassq *ifq, cqdq_op_t op)
964{
965 return (priq_dequeue(ifq->ifcq_disc, op));
966}
967
968static int
969priq_request_ifclassq(struct ifclassq *ifq, cqrq_t req, void *arg)
970{
971 struct priq_if *pif = (struct priq_if *)ifq->ifcq_disc;
972 int err = 0;
973
974 IFCQ_LOCK_ASSERT_HELD(ifq);
975
976 switch (req) {
977 case CLASSQRQ_PURGE:
978 priq_purge(pif);
979 break;
980
981 case CLASSQRQ_PURGE_SC:
982 priq_purge_sc(pif, (cqrq_purge_sc_t *)arg);
983 break;
984
985 case CLASSQRQ_EVENT:
986 priq_event(pif, (cqev_t)arg);
987 break;
988
989 case CLASSQRQ_THROTTLE:
990 err = priq_throttle(pif, (cqrq_throttle_t *)arg);
991 break;
992 }
993 return (err);
994}
995
996int
997priq_setup_ifclassq(struct ifclassq *ifq, u_int32_t flags)
998{
999 struct ifnet *ifp = ifq->ifcq_ifp;
1000 struct priq_class *cl0, *cl1, *cl2, *cl3, *cl4;
1001 struct priq_class *cl5, *cl6, *cl7, *cl8, *cl9;
1002 struct priq_if *pif;
1003 u_int32_t maxlen = 0, qflags = 0;
1004 int err = 0;
1005
1006 IFCQ_LOCK_ASSERT_HELD(ifq);
1007 VERIFY(ifq->ifcq_disc == NULL);
1008 VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE);
1009
1010 if (flags & PKTSCHEDF_QALG_RED)
1011 qflags |= PRCF_RED;
1012 if (flags & PKTSCHEDF_QALG_RIO)
1013 qflags |= PRCF_RIO;
1014 if (flags & PKTSCHEDF_QALG_BLUE)
1015 qflags |= PRCF_BLUE;
1016 if (flags & PKTSCHEDF_QALG_SFB)
1017 qflags |= PRCF_SFB;
1018 if (flags & PKTSCHEDF_QALG_ECN)
1019 qflags |= PRCF_ECN;
1020 if (flags & PKTSCHEDF_QALG_FLOWCTL)
1021 qflags |= PRCF_FLOWCTL;
1022
1023 pif = priq_alloc(ifp, M_WAITOK, FALSE);
1024 if (pif == NULL)
1025 return (ENOMEM);
1026
1027 if ((maxlen = IFCQ_MAXLEN(ifq)) == 0)
1028 maxlen = if_sndq_maxlen;
1029
1030 if ((err = priq_add_queue(pif, 0, maxlen,
1031 qflags | PRCF_LAZY, SCIDX_BK_SYS, &cl0)) != 0)
1032 goto cleanup;
1033
1034 if ((err = priq_add_queue(pif, 1, maxlen,
1035 qflags | PRCF_LAZY, SCIDX_BK, &cl1)) != 0)
1036 goto cleanup;
1037
1038 if ((err = priq_add_queue(pif, 2, maxlen,
1039 qflags | PRCF_DEFAULTCLASS, SCIDX_BE, &cl2)) != 0)
1040 goto cleanup;
1041
1042 if ((err = priq_add_queue(pif, 3, maxlen,
1043 qflags | PRCF_LAZY, SCIDX_RD, &cl3)) != 0)
1044 goto cleanup;
1045
1046 if ((err = priq_add_queue(pif, 4, maxlen,
1047 qflags | PRCF_LAZY, SCIDX_OAM, &cl4)) != 0)
1048 goto cleanup;
1049
1050 if ((err = priq_add_queue(pif, 5, maxlen,
1051 qflags | PRCF_LAZY, SCIDX_AV, &cl5)) != 0)
1052 goto cleanup;
1053
1054 if ((err = priq_add_queue(pif, 6, maxlen,
1055 qflags | PRCF_LAZY, SCIDX_RV, &cl6)) != 0)
1056 goto cleanup;
1057
1058 if ((err = priq_add_queue(pif, 7, maxlen,
1059 qflags | PRCF_LAZY, SCIDX_VI, &cl7)) != 0)
1060 goto cleanup;
1061
1062 if ((err = priq_add_queue(pif, 8, maxlen,
1063 qflags | PRCF_LAZY, SCIDX_VO, &cl8)) != 0)
1064 goto cleanup;
1065
1066 if ((err = priq_add_queue(pif, 9, maxlen,
1067 qflags, SCIDX_CTL, &cl9)) != 0)
1068 goto cleanup;
1069
1070 err = ifclassq_attach(ifq, PKTSCHEDT_PRIQ, pif,
1071 priq_enqueue_ifclassq, priq_dequeue_ifclassq, NULL,
1072 priq_request_ifclassq);
1073
1074 /* cache these for faster lookup */
1075 if (err == 0) {
1076 ifq->ifcq_disc_slots[SCIDX_BK_SYS].qid = SCIDX_BK_SYS;
1077 ifq->ifcq_disc_slots[SCIDX_BK_SYS].cl = cl0;
1078
1079 ifq->ifcq_disc_slots[SCIDX_BK].qid = SCIDX_BK;
1080 ifq->ifcq_disc_slots[SCIDX_BK].cl = cl1;
1081
1082 ifq->ifcq_disc_slots[SCIDX_BE].qid = SCIDX_BE;
1083 ifq->ifcq_disc_slots[SCIDX_BE].cl = cl2;
1084
1085 ifq->ifcq_disc_slots[SCIDX_RD].qid = SCIDX_RD;
1086 ifq->ifcq_disc_slots[SCIDX_RD].cl = cl3;
1087
1088 ifq->ifcq_disc_slots[SCIDX_OAM].qid = SCIDX_OAM;
1089 ifq->ifcq_disc_slots[SCIDX_OAM].cl = cl4;
1090
1091 ifq->ifcq_disc_slots[SCIDX_AV].qid = SCIDX_AV;
1092 ifq->ifcq_disc_slots[SCIDX_AV].cl = cl5;
1093
1094 ifq->ifcq_disc_slots[SCIDX_RV].qid = SCIDX_RV;
1095 ifq->ifcq_disc_slots[SCIDX_RV].cl = cl6;
1096
1097 ifq->ifcq_disc_slots[SCIDX_VI].qid = SCIDX_VI;
1098 ifq->ifcq_disc_slots[SCIDX_VI].cl = cl7;
1099
1100 ifq->ifcq_disc_slots[SCIDX_VO].qid = SCIDX_VO;
1101 ifq->ifcq_disc_slots[SCIDX_VO].cl = cl8;
1102
1103 ifq->ifcq_disc_slots[SCIDX_CTL].qid = SCIDX_CTL;
1104 ifq->ifcq_disc_slots[SCIDX_CTL].cl = cl9;
1105 }
1106
1107cleanup:
1108 if (err != 0)
1109 (void) priq_destroy_locked(pif);
1110
1111 return (err);
1112}
1113
1114int
1115priq_teardown_ifclassq(struct ifclassq *ifq)
1116{
1117 struct priq_if *pif = ifq->ifcq_disc;
1118 int i;
1119
1120 IFCQ_LOCK_ASSERT_HELD(ifq);
1121 VERIFY(pif != NULL && ifq->ifcq_type == PKTSCHEDT_PRIQ);
1122
1123 (void) priq_destroy_locked(pif);
1124
1125 ifq->ifcq_disc = NULL;
1126 for (i = 0; i < IFCQ_SC_MAX; i++) {
1127 ifq->ifcq_disc_slots[i].qid = 0;
1128 ifq->ifcq_disc_slots[i].cl = NULL;
1129 }
1130
1131 return (ifclassq_detach(ifq));
1132}
1133
1134int
1135priq_getqstats_ifclassq(struct ifclassq *ifq, u_int32_t slot,
1136 struct if_ifclassq_stats *ifqs)
1137{
1138 struct priq_if *pif = ifq->ifcq_disc;
1139
1140 IFCQ_LOCK_ASSERT_HELD(ifq);
1141 VERIFY(ifq->ifcq_type == PKTSCHEDT_PRIQ);
1142
1143 if (slot >= IFCQ_SC_MAX)
1144 return (EINVAL);
1145
1146 return (priq_get_class_stats(pif, ifq->ifcq_disc_slots[slot].qid,
1147 &ifqs->ifqs_priq_stats));
1148}
1149
1150static int
1151priq_throttle(struct priq_if *pif, cqrq_throttle_t *tr)
1152{
1153 struct ifclassq *ifq = pif->pif_ifq;
1154 struct priq_class *cl;
1155 int err;
1156
1157 IFCQ_LOCK_ASSERT_HELD(ifq);
1158 VERIFY(!(pif->pif_flags & PRIQIFF_ALTQ));
1159
1160 if (!tr->set) {
1161 tr->level = pif->pif_throttle;
1162 return (0);
1163 }
1164
1165 if (tr->level == pif->pif_throttle)
1166 return (EALREADY);
1167
1168 /* Current throttling levels only involve BK_SYS class */
1169 cl = ifq->ifcq_disc_slots[SCIDX_BK_SYS].cl;
1170
1171 switch (tr->level) {
1172 case IFNET_THROTTLE_OFF:
1173 err = priq_resumeq(pif, cl);
1174 break;
1175
1176 case IFNET_THROTTLE_OPPORTUNISTIC:
1177 err = priq_suspendq(pif, cl);
1178 break;
1179
1180 default:
1181 VERIFY(0);
1182 /* NOTREACHED */
1183 }
1184
1185 if (err == 0 || err == ENXIO) {
1186 if (pktsched_verbose) {
1187 log(LOG_DEBUG, "%s: %s throttling level %sset %d->%d\n",
1188 if_name(PRIQIF_IFP(pif)), priq_style(pif),
1189 (err == 0) ? "" : "lazy ", pif->pif_throttle,
1190 tr->level);
1191 }
1192 pif->pif_throttle = tr->level;
1193 if (err != 0)
1194 err = 0;
1195 else
1196 priq_purgeq(pif, cl, 0, NULL, NULL);
1197 } else {
1198 log(LOG_ERR, "%s: %s unable to set throttling level "
1199 "%d->%d [error=%d]\n", if_name(PRIQIF_IFP(pif)),
1200 priq_style(pif), pif->pif_throttle, tr->level, err);
1201 }
1202
1203 return (err);
1204}
1205
1206static int
1207priq_resumeq(struct priq_if *pif, struct priq_class *cl)
1208{
1209 struct ifclassq *ifq = pif->pif_ifq;
1210 int err = 0;
1211
1212 IFCQ_LOCK_ASSERT_HELD(ifq);
1213
1214#if CLASSQ_RIO
1215 if (q_is_rio(&cl->cl_q))
1216 err = rio_suspendq(cl->cl_rio, &cl->cl_q, FALSE);
1217 else
1218#endif /* CLASSQ_RIO */
1219#if CLASSQ_RED
1220 if (q_is_red(&cl->cl_q))
1221 err = red_suspendq(cl->cl_red, &cl->cl_q, FALSE);
1222 else
1223#endif /* CLASSQ_RED */
1224#if CLASSQ_BLUE
1225 if (q_is_blue(&cl->cl_q))
1226 err = blue_suspendq(cl->cl_blue, &cl->cl_q, FALSE);
1227 else
1228#endif /* CLASSQ_BLUE */
1229 if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
1230 err = sfb_suspendq(cl->cl_sfb, &cl->cl_q, FALSE);
1231
1232 if (err == 0)
1233 qstate(&cl->cl_q) = QS_RUNNING;
1234
1235 return (err);
1236}
1237
1238static int
1239priq_suspendq(struct priq_if *pif, struct priq_class *cl)
1240{
1241 struct ifclassq *ifq = pif->pif_ifq;
1242 int err = 0;
1243
1244 IFCQ_LOCK_ASSERT_HELD(ifq);
1245
1246#if CLASSQ_RIO
1247 if (q_is_rio(&cl->cl_q))
1248 err = rio_suspendq(cl->cl_rio, &cl->cl_q, TRUE);
1249 else
1250#endif /* CLASSQ_RIO */
1251#if CLASSQ_RED
1252 if (q_is_red(&cl->cl_q))
1253 err = red_suspendq(cl->cl_red, &cl->cl_q, TRUE);
1254 else
1255#endif /* CLASSQ_RED */
1256#if CLASSQ_BLUE
1257 if (q_is_blue(&cl->cl_q))
1258 err = blue_suspendq(cl->cl_blue, &cl->cl_q, TRUE);
1259 else
1260#endif /* CLASSQ_BLUE */
1261 if (q_is_sfb(&cl->cl_q)) {
1262 if (cl->cl_sfb != NULL) {
1263 err = sfb_suspendq(cl->cl_sfb, &cl->cl_q, TRUE);
1264 } else {
1265 VERIFY(cl->cl_flags & PRCF_LAZY);
1266 err = ENXIO; /* delayed throttling */
1267 }
1268 }
1269
1270 if (err == 0 || err == ENXIO)
1271 qstate(&cl->cl_q) = QS_SUSPENDED;
1272
1273 return (err);
1274}
1275#endif /* PKTSCHED_PRIQ */