]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/pktsched/pktsched_priq.c
xnu-3248.20.55.tar.gz
[apple/xnu.git] / bsd / net / pktsched / pktsched_priq.c
1 /*
2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $OpenBSD: altq_priq.c,v 1.21 2007/09/13 20:40:02 chl Exp $ */
30 /* $KAME: altq_priq.c,v 1.1 2000/10/18 09:15:23 kjc Exp $ */
31
32 /*
33 * Copyright (C) 2000-2003
34 * Sony Computer Science Laboratories Inc. All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 *
45 * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 */
57
58 /*
59 * priority queue
60 */
61
62 #if PKTSCHED_PRIQ
63
64 #include <sys/cdefs.h>
65 #include <sys/param.h>
66 #include <sys/malloc.h>
67 #include <sys/mbuf.h>
68 #include <sys/systm.h>
69 #include <sys/errno.h>
70 #include <sys/kernel.h>
71 #include <sys/syslog.h>
72
73 #include <kern/zalloc.h>
74
75 #include <net/if.h>
76 #include <net/net_osdep.h>
77
78 #include <net/pktsched/pktsched_priq.h>
79 #include <netinet/in.h>
80
81 /*
82 * function prototypes
83 */
84 static int priq_enqueue_ifclassq(struct ifclassq *, struct mbuf *);
85 static struct mbuf *priq_dequeue_ifclassq(struct ifclassq *, cqdq_op_t);
86 static int priq_request_ifclassq(struct ifclassq *, cqrq_t, void *);
87 static int priq_clear_interface(struct priq_if *);
88 static struct priq_class *priq_class_create(struct priq_if *, int, u_int32_t,
89 int, u_int32_t);
90 static int priq_class_destroy(struct priq_if *, struct priq_class *);
91 static int priq_destroy_locked(struct priq_if *);
92 static inline int priq_addq(struct priq_class *, struct mbuf *,
93 struct pf_mtag *);
94 static inline struct mbuf *priq_getq(struct priq_class *);
95 static inline struct mbuf *priq_pollq(struct priq_class *);
96 static void priq_purgeq(struct priq_if *, struct priq_class *, u_int32_t,
97 u_int32_t *, u_int32_t *);
98 static void priq_purge_sc(struct priq_if *, cqrq_purge_sc_t *);
99 static void priq_updateq(struct priq_if *, struct priq_class *, cqev_t);
100 static int priq_throttle(struct priq_if *, cqrq_throttle_t *);
101 static int priq_resumeq(struct priq_if *, struct priq_class *);
102 static int priq_suspendq(struct priq_if *, struct priq_class *);
103 static int priq_stat_sc(struct priq_if *, cqrq_stat_sc_t *);
104 static inline struct priq_class *priq_clh_to_clp(struct priq_if *, u_int32_t);
105 static const char *priq_style(struct priq_if *);
106
107 #define PRIQ_ZONE_MAX 32 /* maximum elements in zone */
108 #define PRIQ_ZONE_NAME "pktsched_priq" /* zone name */
109
110 static unsigned int priq_size; /* size of zone element */
111 static struct zone *priq_zone; /* zone for priq */
112
113 #define PRIQ_CL_ZONE_MAX 32 /* maximum elements in zone */
114 #define PRIQ_CL_ZONE_NAME "pktsched_priq_cl" /* zone name */
115
116 static unsigned int priq_cl_size; /* size of zone element */
117 static struct zone *priq_cl_zone; /* zone for priq_class */
118
119 void
120 priq_init(void)
121 {
122 priq_size = sizeof (struct priq_if);
123 priq_zone = zinit(priq_size, PRIQ_ZONE_MAX * priq_size,
124 0, PRIQ_ZONE_NAME);
125 if (priq_zone == NULL) {
126 panic("%s: failed allocating %s", __func__, PRIQ_ZONE_NAME);
127 /* NOTREACHED */
128 }
129 zone_change(priq_zone, Z_EXPAND, TRUE);
130 zone_change(priq_zone, Z_CALLERACCT, TRUE);
131
132 priq_cl_size = sizeof (struct priq_class);
133 priq_cl_zone = zinit(priq_cl_size, PRIQ_CL_ZONE_MAX * priq_cl_size,
134 0, PRIQ_CL_ZONE_NAME);
135 if (priq_cl_zone == NULL) {
136 panic("%s: failed allocating %s", __func__, PRIQ_CL_ZONE_NAME);
137 /* NOTREACHED */
138 }
139 zone_change(priq_cl_zone, Z_EXPAND, TRUE);
140 zone_change(priq_cl_zone, Z_CALLERACCT, TRUE);
141 }
142
143 struct priq_if *
144 priq_alloc(struct ifnet *ifp, int how, boolean_t altq)
145 {
146 struct priq_if *pif;
147
148 pif = (how == M_WAITOK) ? zalloc(priq_zone) : zalloc_noblock(priq_zone);
149 if (pif == NULL)
150 return (NULL);
151
152 bzero(pif, priq_size);
153 pif->pif_maxpri = -1;
154 pif->pif_ifq = &ifp->if_snd;
155 if (altq)
156 pif->pif_flags |= PRIQIFF_ALTQ;
157
158 if (pktsched_verbose) {
159 log(LOG_DEBUG, "%s: %s scheduler allocated\n",
160 if_name(ifp), priq_style(pif));
161 }
162
163 return (pif);
164 }
165
166 int
167 priq_destroy(struct priq_if *pif)
168 {
169 struct ifclassq *ifq = pif->pif_ifq;
170 int err;
171
172 IFCQ_LOCK(ifq);
173 err = priq_destroy_locked(pif);
174 IFCQ_UNLOCK(ifq);
175
176 return (err);
177 }
178
179 static int
180 priq_destroy_locked(struct priq_if *pif)
181 {
182 IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);
183
184 (void) priq_clear_interface(pif);
185
186 if (pktsched_verbose) {
187 log(LOG_DEBUG, "%s: %s scheduler destroyed\n",
188 if_name(PRIQIF_IFP(pif)), priq_style(pif));
189 }
190
191 zfree(priq_zone, pif);
192
193 return (0);
194 }
195
196 /*
197 * bring the interface back to the initial state by discarding
198 * all the filters and classes.
199 */
200 static int
201 priq_clear_interface(struct priq_if *pif)
202 {
203 struct priq_class *cl;
204 int pri;
205
206 IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);
207
208 /* clear out the classes */
209 for (pri = 0; pri <= pif->pif_maxpri; pri++)
210 if ((cl = pif->pif_classes[pri]) != NULL)
211 priq_class_destroy(pif, cl);
212
213 return (0);
214 }
215
216 /* discard all the queued packets on the interface */
217 void
218 priq_purge(struct priq_if *pif)
219 {
220 struct priq_class *cl;
221 int pri;
222
223 IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);
224
225 for (pri = 0; pri <= pif->pif_maxpri; pri++) {
226 if ((cl = pif->pif_classes[pri]) != NULL && !qempty(&cl->cl_q))
227 priq_purgeq(pif, cl, 0, NULL, NULL);
228 }
229 #if !PF_ALTQ
230 /*
231 * This assertion is safe to be made only when PF_ALTQ is not
232 * configured; otherwise, IFCQ_LEN represents the sum of the
233 * packets managed by ifcq_disc and altq_disc instances, which
234 * is possible when transitioning between the two.
235 */
236 VERIFY(IFCQ_LEN(pif->pif_ifq) == 0);
237 #endif /* !PF_ALTQ */
238 }
239
240 static void
241 priq_purge_sc(struct priq_if *pif, cqrq_purge_sc_t *pr)
242 {
243 struct ifclassq *ifq = pif->pif_ifq;
244 u_int32_t i;
245
246 IFCQ_LOCK_ASSERT_HELD(ifq);
247
248 VERIFY(pr->sc == MBUF_SC_UNSPEC || MBUF_VALID_SC(pr->sc));
249 VERIFY(pr->flow != 0);
250
251 if (pr->sc != MBUF_SC_UNSPEC) {
252 i = MBUF_SCIDX(pr->sc);
253 VERIFY(i < IFCQ_SC_MAX);
254
255 priq_purgeq(pif, ifq->ifcq_disc_slots[i].cl,
256 pr->flow, &pr->packets, &pr->bytes);
257 } else {
258 u_int32_t cnt, len;
259
260 pr->packets = 0;
261 pr->bytes = 0;
262
263 for (i = 0; i < IFCQ_SC_MAX; i++) {
264 priq_purgeq(pif, ifq->ifcq_disc_slots[i].cl,
265 pr->flow, &cnt, &len);
266 pr->packets += cnt;
267 pr->bytes += len;
268 }
269 }
270 }
271
272 void
273 priq_event(struct priq_if *pif, cqev_t ev)
274 {
275 struct priq_class *cl;
276 int pri;
277
278 IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);
279
280 for (pri = 0; pri <= pif->pif_maxpri; pri++)
281 if ((cl = pif->pif_classes[pri]) != NULL)
282 priq_updateq(pif, cl, ev);
283 }
284
285 int
286 priq_add_queue(struct priq_if *pif, int priority, u_int32_t qlimit,
287 int flags, u_int32_t qid, struct priq_class **clp)
288 {
289 struct priq_class *cl;
290
291 IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);
292
293 /* check parameters */
294 if (priority >= PRIQ_MAXPRI)
295 return (EINVAL);
296 if (pif->pif_classes[priority] != NULL)
297 return (EBUSY);
298 if (priq_clh_to_clp(pif, qid) != NULL)
299 return (EBUSY);
300
301 cl = priq_class_create(pif, priority, qlimit, flags, qid);
302 if (cl == NULL)
303 return (ENOMEM);
304
305 if (clp != NULL)
306 *clp = cl;
307
308 return (0);
309 }
310
311 static struct priq_class *
312 priq_class_create(struct priq_if *pif, int pri, u_int32_t qlimit,
313 int flags, u_int32_t qid)
314 {
315 struct ifnet *ifp;
316 struct ifclassq *ifq;
317 struct priq_class *cl;
318
319 IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);
320
321 /* Sanitize flags unless internally configured */
322 if (pif->pif_flags & PRIQIFF_ALTQ)
323 flags &= PRCF_USERFLAGS;
324
325 #if !CLASSQ_RED
326 if (flags & PRCF_RED) {
327 log(LOG_ERR, "%s: %s RED not available!\n",
328 if_name(PRIQIF_IFP(pif)), priq_style(pif));
329 return (NULL);
330 }
331 #endif /* !CLASSQ_RED */
332
333 #if !CLASSQ_RIO
334 if (flags & PRCF_RIO) {
335 log(LOG_ERR, "%s: %s RIO not available!\n",
336 if_name(PRIQIF_IFP(pif)), priq_style(pif));
337 return (NULL);
338 }
339 #endif /* CLASSQ_RIO */
340
341 #if !CLASSQ_BLUE
342 if (flags & PRCF_BLUE) {
343 log(LOG_ERR, "%s: %s BLUE not available!\n",
344 if_name(PRIQIF_IFP(pif)), priq_style(pif));
345 return (NULL);
346 }
347 #endif /* CLASSQ_BLUE */
348
349 /* These are mutually exclusive */
350 if ((flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) &&
351 (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) != PRCF_RED &&
352 (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) != PRCF_RIO &&
353 (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) != PRCF_BLUE &&
354 (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) != PRCF_SFB) {
355 log(LOG_ERR, "%s: %s more than one RED|RIO|BLUE|SFB\n",
356 if_name(PRIQIF_IFP(pif)), priq_style(pif));
357 return (NULL);
358 }
359
360 ifq = pif->pif_ifq;
361 ifp = PRIQIF_IFP(pif);
362
363 if ((cl = pif->pif_classes[pri]) != NULL) {
364 /* modify the class instead of creating a new one */
365 if (!qempty(&cl->cl_q))
366 priq_purgeq(pif, cl, 0, NULL, NULL);
367 #if CLASSQ_RIO
368 if (q_is_rio(&cl->cl_q))
369 rio_destroy(cl->cl_rio);
370 #endif /* CLASSQ_RIO */
371 #if CLASSQ_RED
372 if (q_is_red(&cl->cl_q))
373 red_destroy(cl->cl_red);
374 #endif /* CLASSQ_RED */
375 #if CLASSQ_BLUE
376 if (q_is_blue(&cl->cl_q))
377 blue_destroy(cl->cl_blue);
378 #endif /* CLASSQ_BLUE */
379 if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
380 sfb_destroy(cl->cl_sfb);
381 cl->cl_qalg.ptr = NULL;
382 qtype(&cl->cl_q) = Q_DROPTAIL;
383 qstate(&cl->cl_q) = QS_RUNNING;
384 } else {
385 cl = zalloc(priq_cl_zone);
386 if (cl == NULL)
387 return (NULL);
388
389 bzero(cl, priq_cl_size);
390 }
391
392 pif->pif_classes[pri] = cl;
393 if (flags & PRCF_DEFAULTCLASS)
394 pif->pif_default = cl;
395 if (qlimit == 0 || qlimit > IFCQ_MAXLEN(ifq)) {
396 qlimit = IFCQ_MAXLEN(ifq);
397 if (qlimit == 0)
398 qlimit = DEFAULT_QLIMIT; /* use default */
399 }
400 _qinit(&cl->cl_q, Q_DROPTAIL, qlimit);
401 cl->cl_flags = flags;
402 cl->cl_pri = pri;
403 if (pri > pif->pif_maxpri)
404 pif->pif_maxpri = pri;
405 cl->cl_pif = pif;
406 cl->cl_handle = qid;
407
408 if (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) {
409 #if CLASSQ_RED || CLASSQ_RIO
410 u_int64_t ifbandwidth = ifnet_output_linkrate(ifp);
411 int pkttime;
412 #endif /* CLASSQ_RED || CLASSQ_RIO */
413
414 cl->cl_qflags = 0;
415 if (flags & PRCF_ECN) {
416 if (flags & PRCF_BLUE)
417 cl->cl_qflags |= BLUEF_ECN;
418 else if (flags & PRCF_SFB)
419 cl->cl_qflags |= SFBF_ECN;
420 else if (flags & PRCF_RED)
421 cl->cl_qflags |= REDF_ECN;
422 else if (flags & PRCF_RIO)
423 cl->cl_qflags |= RIOF_ECN;
424 }
425 if (flags & PRCF_FLOWCTL) {
426 if (flags & PRCF_SFB)
427 cl->cl_qflags |= SFBF_FLOWCTL;
428 }
429 if (flags & PRCF_CLEARDSCP) {
430 if (flags & PRCF_RIO)
431 cl->cl_qflags |= RIOF_CLEARDSCP;
432 }
433 #if CLASSQ_RED || CLASSQ_RIO
434 /*
435 * XXX: RED & RIO should be watching link speed and MTU
436 * events and recompute pkttime accordingly.
437 */
438 if (ifbandwidth < 8)
439 pkttime = 1000 * 1000 * 1000; /* 1 sec */
440 else
441 pkttime = (int64_t)ifp->if_mtu * 1000 * 1000 * 1000 /
442 (ifbandwidth / 8);
443
444 /* Test for exclusivity {RED,RIO,BLUE,SFB} was done above */
445 #if CLASSQ_RED
446 if (flags & PRCF_RED) {
447 cl->cl_red = red_alloc(ifp, 0, 0,
448 qlimit(&cl->cl_q) * 10/100,
449 qlimit(&cl->cl_q) * 30/100,
450 cl->cl_qflags, pkttime);
451 if (cl->cl_red != NULL)
452 qtype(&cl->cl_q) = Q_RED;
453 }
454 #endif /* CLASSQ_RED */
455 #if CLASSQ_RIO
456 if (flags & PRCF_RIO) {
457 cl->cl_rio =
458 rio_alloc(ifp, 0, NULL, cl->cl_qflags, pkttime);
459 if (cl->cl_rio != NULL)
460 qtype(&cl->cl_q) = Q_RIO;
461 }
462 #endif /* CLASSQ_RIO */
463 #endif /* CLASSQ_RED || CLASSQ_RIO */
464 #if CLASSQ_BLUE
465 if (flags & PRCF_BLUE) {
466 cl->cl_blue = blue_alloc(ifp, 0, 0, cl->cl_qflags);
467 if (cl->cl_blue != NULL)
468 qtype(&cl->cl_q) = Q_BLUE;
469 }
470 #endif /* CLASSQ_BLUE */
471 if (flags & PRCF_SFB) {
472 if (!(cl->cl_flags & PRCF_LAZY))
473 cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
474 qlimit(&cl->cl_q), cl->cl_qflags);
475 if (cl->cl_sfb != NULL || (cl->cl_flags & PRCF_LAZY))
476 qtype(&cl->cl_q) = Q_SFB;
477 }
478 }
479
480 if (pktsched_verbose) {
481 log(LOG_DEBUG, "%s: %s created qid=%d pri=%d qlimit=%d "
482 "flags=%b\n", if_name(ifp), priq_style(pif),
483 cl->cl_handle, cl->cl_pri, qlimit, flags, PRCF_BITS);
484 }
485
486 return (cl);
487 }
488
489 int
490 priq_remove_queue(struct priq_if *pif, u_int32_t qid)
491 {
492 struct priq_class *cl;
493
494 IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);
495
496 if ((cl = priq_clh_to_clp(pif, qid)) == NULL)
497 return (EINVAL);
498
499 return (priq_class_destroy(pif, cl));
500 }
501
502 static int
503 priq_class_destroy(struct priq_if *pif, struct priq_class *cl)
504 {
505 struct ifclassq *ifq = pif->pif_ifq;
506 int pri;
507
508 IFCQ_LOCK_ASSERT_HELD(ifq);
509
510 if (!qempty(&cl->cl_q))
511 priq_purgeq(pif, cl, 0, NULL, NULL);
512
513 VERIFY(cl->cl_pri < PRIQ_MAXPRI);
514 VERIFY(!pktsched_bit_tst(cl->cl_pri, &pif->pif_bitmap));
515
516 pif->pif_classes[cl->cl_pri] = NULL;
517 if (pif->pif_maxpri == cl->cl_pri) {
518 for (pri = cl->cl_pri; pri >= 0; pri--)
519 if (pif->pif_classes[pri] != NULL) {
520 pif->pif_maxpri = pri;
521 break;
522 }
523 if (pri < 0)
524 pif->pif_maxpri = -1;
525 }
526
527 if (pif->pif_default == cl)
528 pif->pif_default = NULL;
529
530 if (cl->cl_qalg.ptr != NULL) {
531 #if CLASSQ_RIO
532 if (q_is_rio(&cl->cl_q))
533 rio_destroy(cl->cl_rio);
534 #endif /* CLASSQ_RIO */
535 #if CLASSQ_RED
536 if (q_is_red(&cl->cl_q))
537 red_destroy(cl->cl_red);
538 #endif /* CLASSQ_RED */
539 #if CLASSQ_BLUE
540 if (q_is_blue(&cl->cl_q))
541 blue_destroy(cl->cl_blue);
542 #endif /* CLASSQ_BLUE */
543 if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
544 sfb_destroy(cl->cl_sfb);
545 cl->cl_qalg.ptr = NULL;
546 qtype(&cl->cl_q) = Q_DROPTAIL;
547 qstate(&cl->cl_q) = QS_RUNNING;
548 }
549
550 if (pktsched_verbose) {
551 log(LOG_DEBUG, "%s: %s destroyed qid=%d pri=%d\n",
552 if_name(PRIQIF_IFP(pif)), priq_style(pif),
553 cl->cl_handle, cl->cl_pri);
554 }
555
556 zfree(priq_cl_zone, cl);
557
558 return (0);
559 }
560
561 int
562 priq_enqueue(struct priq_if *pif, struct priq_class *cl, struct mbuf *m,
563 struct pf_mtag *t)
564 {
565 struct ifclassq *ifq = pif->pif_ifq;
566 u_int32_t pri;
567 int len, ret;
568
569 IFCQ_LOCK_ASSERT_HELD(ifq);
570 VERIFY(cl == NULL || cl->cl_pif == pif);
571
572 if (cl == NULL) {
573 #if PF_ALTQ
574 cl = priq_clh_to_clp(pif, t->pftag_qid);
575 #else /* !PF_ALTQ */
576 cl = priq_clh_to_clp(pif, 0);
577 #endif /* !PF_ALTQ */
578 if (cl == NULL) {
579 cl = pif->pif_default;
580 if (cl == NULL) {
581 IFCQ_CONVERT_LOCK(ifq);
582 m_freem(m);
583 return (ENOBUFS);
584 }
585 }
586 }
587 pri = cl->cl_pri;
588 VERIFY(pri < PRIQ_MAXPRI);
589
590 len = m_pktlen(m);
591
592 ret = priq_addq(cl, m, t);
593 if (ret != 0) {
594 if (ret == CLASSQEQ_SUCCESS_FC) {
595 /* packet enqueued, return advisory feedback */
596 ret = EQFULL;
597 } else {
598 VERIFY(ret == CLASSQEQ_DROPPED ||
599 ret == CLASSQEQ_DROPPED_FC ||
600 ret == CLASSQEQ_DROPPED_SP);
601 /* packet has been freed in priq_addq */
602 PKTCNTR_ADD(&cl->cl_dropcnt, 1, len);
603 IFCQ_DROP_ADD(ifq, 1, len);
604 switch (ret) {
605 case CLASSQEQ_DROPPED:
606 return (ENOBUFS);
607 case CLASSQEQ_DROPPED_FC:
608 return (EQFULL);
609 case CLASSQEQ_DROPPED_SP:
610 return (EQSUSPENDED);
611 }
612 /* NOT REACHED */
613 }
614 }
615 IFCQ_INC_LEN(ifq);
616 IFCQ_INC_BYTES(ifq, len);
617
618 /* class is now active; indicate it as such */
619 if (!pktsched_bit_tst(pri, &pif->pif_bitmap))
620 pktsched_bit_set(pri, &pif->pif_bitmap);
621
622 /* successfully queued. */
623 return (ret);
624 }
625
626 /*
627 * note: CLASSQDQ_POLL returns the next packet without removing the packet
628 * from the queue. CLASSQDQ_REMOVE is a normal dequeue operation.
629 * CLASSQDQ_REMOVE must return the same packet if called immediately
630 * after CLASSQDQ_POLL.
631 */
632 struct mbuf *
633 priq_dequeue(struct priq_if *pif, cqdq_op_t op)
634 {
635 struct ifclassq *ifq = pif->pif_ifq;
636 struct priq_class *cl;
637 struct mbuf *m;
638 u_int32_t pri, len;
639
640 IFCQ_LOCK_ASSERT_HELD(ifq);
641
642 if (pif->pif_bitmap == 0) {
643 /* no active class; nothing to dequeue */
644 return (NULL);
645 }
646 VERIFY(!IFCQ_IS_EMPTY(ifq));
647
648 pri = pktsched_fls(pif->pif_bitmap) - 1; /* zero based */
649 VERIFY(pri < PRIQ_MAXPRI);
650 cl = pif->pif_classes[pri];
651 VERIFY(cl != NULL && !qempty(&cl->cl_q));
652
653 if (op == CLASSQDQ_POLL)
654 return (priq_pollq(cl));
655
656 m = priq_getq(cl);
657 VERIFY(m != NULL); /* qalg must be work conserving */
658 len = m_pktlen(m);
659
660 IFCQ_DEC_LEN(ifq);
661 IFCQ_DEC_BYTES(ifq, len);
662 if (qempty(&cl->cl_q)) {
663 cl->cl_period++;
664 /* class is now inactive; indicate it as such */
665 pktsched_bit_clr(pri, &pif->pif_bitmap);
666 }
667 PKTCNTR_ADD(&cl->cl_xmitcnt, 1, len);
668 IFCQ_XMIT_ADD(ifq, 1, len);
669
670 return (m);
671 }
672
673 static inline int
674 priq_addq(struct priq_class *cl, struct mbuf *m, struct pf_mtag *t)
675 {
676 struct priq_if *pif = cl->cl_pif;
677 struct ifclassq *ifq = pif->pif_ifq;
678
679 IFCQ_LOCK_ASSERT_HELD(ifq);
680
681 #if CLASSQ_RIO
682 if (q_is_rio(&cl->cl_q))
683 return (rio_addq(cl->cl_rio, &cl->cl_q, m, t));
684 else
685 #endif /* CLASSQ_RIO */
686 #if CLASSQ_RED
687 if (q_is_red(&cl->cl_q))
688 return (red_addq(cl->cl_red, &cl->cl_q, m, t));
689 else
690 #endif /* CLASSQ_RED */
691 #if CLASSQ_BLUE
692 if (q_is_blue(&cl->cl_q))
693 return (blue_addq(cl->cl_blue, &cl->cl_q, m, t));
694 else
695 #endif /* CLASSQ_BLUE */
696 if (q_is_sfb(&cl->cl_q)) {
697 if (cl->cl_sfb == NULL) {
698 struct ifnet *ifp = PRIQIF_IFP(pif);
699
700 VERIFY(cl->cl_flags & PRCF_LAZY);
701 cl->cl_flags &= ~PRCF_LAZY;
702 IFCQ_CONVERT_LOCK(ifq);
703
704 cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
705 qlimit(&cl->cl_q), cl->cl_qflags);
706 if (cl->cl_sfb == NULL) {
707 /* fall back to droptail */
708 qtype(&cl->cl_q) = Q_DROPTAIL;
709 cl->cl_flags &= ~PRCF_SFB;
710 cl->cl_qflags &= ~(SFBF_ECN | SFBF_FLOWCTL);
711
712 log(LOG_ERR, "%s: %s SFB lazy allocation "
713 "failed for qid=%d pri=%d, falling back "
714 "to DROPTAIL\n", if_name(ifp),
715 priq_style(pif), cl->cl_handle,
716 cl->cl_pri);
717 } else if (pif->pif_throttle != IFNET_THROTTLE_OFF) {
718 /* if there's pending throttling, set it */
719 cqrq_throttle_t tr = { 1, pif->pif_throttle };
720 int err = priq_throttle(pif, &tr);
721
722 if (err == EALREADY)
723 err = 0;
724 if (err != 0) {
725 tr.level = IFNET_THROTTLE_OFF;
726 (void) priq_throttle(pif, &tr);
727 }
728 }
729 }
730 if (cl->cl_sfb != NULL)
731 return (sfb_addq(cl->cl_sfb, &cl->cl_q, m, t));
732 } else if (qlen(&cl->cl_q) >= qlimit(&cl->cl_q)) {
733 IFCQ_CONVERT_LOCK(ifq);
734 m_freem(m);
735 return (CLASSQEQ_DROPPED);
736 }
737
738 #if PF_ECN
739 if (cl->cl_flags & PRCF_CLEARDSCP)
740 write_dsfield(m, t, 0);
741 #endif /* PF_ECN */
742
743 _addq(&cl->cl_q, m);
744
745 return (0);
746 }
747
748 static inline struct mbuf *
749 priq_getq(struct priq_class *cl)
750 {
751 IFCQ_LOCK_ASSERT_HELD(cl->cl_pif->pif_ifq);
752
753 #if CLASSQ_RIO
754 if (q_is_rio(&cl->cl_q))
755 return (rio_getq(cl->cl_rio, &cl->cl_q));
756 else
757 #endif /* CLASSQ_RIO */
758 #if CLASSQ_RED
759 if (q_is_red(&cl->cl_q))
760 return (red_getq(cl->cl_red, &cl->cl_q));
761 else
762 #endif /* CLASSQ_RED */
763 #if CLASSQ_BLUE
764 if (q_is_blue(&cl->cl_q))
765 return (blue_getq(cl->cl_blue, &cl->cl_q));
766 else
767 #endif /* CLASSQ_BLUE */
768 if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
769 return (sfb_getq(cl->cl_sfb, &cl->cl_q));
770
771 return (_getq(&cl->cl_q));
772 }
773
774 static inline struct mbuf *
775 priq_pollq(struct priq_class *cl)
776 {
777 IFCQ_LOCK_ASSERT_HELD(cl->cl_pif->pif_ifq);
778
779 return (qhead(&cl->cl_q));
780 }
781
782 static void
783 priq_purgeq(struct priq_if *pif, struct priq_class *cl, u_int32_t flow,
784 u_int32_t *packets, u_int32_t *bytes)
785 {
786 struct ifclassq *ifq = pif->pif_ifq;
787 u_int32_t cnt = 0, len = 0, qlen;
788
789 IFCQ_LOCK_ASSERT_HELD(ifq);
790
791 if ((qlen = qlen(&cl->cl_q)) == 0) {
792 VERIFY(!pktsched_bit_tst(cl->cl_pri, &pif->pif_bitmap));
793 goto done;
794 }
795
796 /* become regular mutex before freeing mbufs */
797 IFCQ_CONVERT_LOCK(ifq);
798
799 #if CLASSQ_RIO
800 if (q_is_rio(&cl->cl_q))
801 rio_purgeq(cl->cl_rio, &cl->cl_q, flow, &cnt, &len);
802 else
803 #endif /* CLASSQ_RIO */
804 #if CLASSQ_RED
805 if (q_is_red(&cl->cl_q))
806 red_purgeq(cl->cl_red, &cl->cl_q, flow, &cnt, &len);
807 else
808 #endif /* CLASSQ_RED */
809 #if CLASSQ_BLUE
810 if (q_is_blue(&cl->cl_q))
811 blue_purgeq(cl->cl_blue, &cl->cl_q, flow, &cnt, &len);
812 else
813 #endif /* CLASSQ_BLUE */
814 if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
815 sfb_purgeq(cl->cl_sfb, &cl->cl_q, flow, &cnt, &len);
816 else
817 _flushq_flow(&cl->cl_q, flow, &cnt, &len);
818
819 if (cnt > 0) {
820 VERIFY(qlen(&cl->cl_q) == (qlen - cnt));
821
822 PKTCNTR_ADD(&cl->cl_dropcnt, cnt, len);
823 IFCQ_DROP_ADD(ifq, cnt, len);
824
825 VERIFY(((signed)IFCQ_LEN(ifq) - cnt) >= 0);
826 IFCQ_LEN(ifq) -= cnt;
827
828 if (qempty(&cl->cl_q))
829 pktsched_bit_clr(cl->cl_pri, &pif->pif_bitmap);
830
831 if (pktsched_verbose) {
832 log(LOG_DEBUG, "%s: %s purge qid=%d pri=%d "
833 "qlen=[%d,%d] cnt=%d len=%d flow=0x%x\n",
834 if_name(PRIQIF_IFP(pif)), priq_style(pif),
835 cl->cl_handle, cl->cl_pri, qlen, qlen(&cl->cl_q),
836 cnt, len, flow);
837 }
838 }
839 done:
840 if (packets != NULL)
841 *packets = cnt;
842 if (bytes != NULL)
843 *bytes = len;
844 }
845
846 static void
847 priq_updateq(struct priq_if *pif, struct priq_class *cl, cqev_t ev)
848 {
849 IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);
850
851 if (pktsched_verbose) {
852 log(LOG_DEBUG, "%s: %s update qid=%d pri=%d event=%s\n",
853 if_name(PRIQIF_IFP(pif)), priq_style(pif),
854 cl->cl_handle, cl->cl_pri, ifclassq_ev2str(ev));
855 }
856
857 #if CLASSQ_RIO
858 if (q_is_rio(&cl->cl_q))
859 return (rio_updateq(cl->cl_rio, ev));
860 #endif /* CLASSQ_RIO */
861 #if CLASSQ_RED
862 if (q_is_red(&cl->cl_q))
863 return (red_updateq(cl->cl_red, ev));
864 #endif /* CLASSQ_RED */
865 #if CLASSQ_BLUE
866 if (q_is_blue(&cl->cl_q))
867 return (blue_updateq(cl->cl_blue, ev));
868 #endif /* CLASSQ_BLUE */
869 if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
870 return (sfb_updateq(cl->cl_sfb, ev));
871 }
872
873 int
874 priq_get_class_stats(struct priq_if *pif, u_int32_t qid,
875 struct priq_classstats *sp)
876 {
877 struct priq_class *cl;
878
879 IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);
880
881 if ((cl = priq_clh_to_clp(pif, qid)) == NULL)
882 return (EINVAL);
883
884 sp->class_handle = cl->cl_handle;
885 sp->priority = cl->cl_pri;
886 sp->qlength = qlen(&cl->cl_q);
887 sp->qlimit = qlimit(&cl->cl_q);
888 sp->period = cl->cl_period;
889 sp->xmitcnt = cl->cl_xmitcnt;
890 sp->dropcnt = cl->cl_dropcnt;
891
892 sp->qtype = qtype(&cl->cl_q);
893 sp->qstate = qstate(&cl->cl_q);
894 #if CLASSQ_RED
895 if (q_is_red(&cl->cl_q))
896 red_getstats(cl->cl_red, &sp->red[0]);
897 #endif /* CLASSQ_RED */
898 #if CLASSQ_RIO
899 if (q_is_rio(&cl->cl_q))
900 rio_getstats(cl->cl_rio, &sp->red[0]);
901 #endif /* CLASSQ_RIO */
902 #if CLASSQ_BLUE
903 if (q_is_blue(&cl->cl_q))
904 blue_getstats(cl->cl_blue, &sp->blue);
905 #endif /* CLASSQ_BLUE */
906 if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
907 sfb_getstats(cl->cl_sfb, &sp->sfb);
908
909 return (0);
910 }
911
912 static int
913 priq_stat_sc(struct priq_if *pif, cqrq_stat_sc_t *sr)
914 {
915 struct ifclassq *ifq = pif->pif_ifq;
916 struct priq_class *cl;
917 u_int32_t i;
918
919 IFCQ_LOCK_ASSERT_HELD(ifq);
920
921 VERIFY(sr->sc == MBUF_SC_UNSPEC || MBUF_VALID_SC(sr->sc));
922
923 i = MBUF_SCIDX(sr->sc);
924 VERIFY(i < IFCQ_SC_MAX);
925
926 cl = ifq->ifcq_disc_slots[i].cl;
927 sr->packets = qlen(&cl->cl_q);
928 sr->bytes = qsize(&cl->cl_q);
929
930 return (0);
931 }
932
933 /* convert a class handle to the corresponding class pointer */
934 static inline struct priq_class *
935 priq_clh_to_clp(struct priq_if *pif, u_int32_t chandle)
936 {
937 struct priq_class *cl;
938 int idx;
939
940 IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);
941
942 for (idx = pif->pif_maxpri; idx >= 0; idx--)
943 if ((cl = pif->pif_classes[idx]) != NULL &&
944 cl->cl_handle == chandle)
945 return (cl);
946
947 return (NULL);
948 }
949
950 static const char *
951 priq_style(struct priq_if *pif)
952 {
953 return ((pif->pif_flags & PRIQIFF_ALTQ) ? "ALTQ_PRIQ" : "PRIQ");
954 }
955
956 /*
957 * priq_enqueue_ifclassq is an enqueue function to be registered to
958 * (*ifcq_enqueue) in struct ifclassq.
959 */
960 static int
961 priq_enqueue_ifclassq(struct ifclassq *ifq, struct mbuf *m)
962 {
963 u_int32_t i;
964
965 IFCQ_LOCK_ASSERT_HELD(ifq);
966
967 if (!(m->m_flags & M_PKTHDR)) {
968 /* should not happen */
969 log(LOG_ERR, "%s: packet does not have pkthdr\n",
970 if_name(ifq->ifcq_ifp));
971 IFCQ_CONVERT_LOCK(ifq);
972 m_freem(m);
973 return (ENOBUFS);
974 }
975
976 i = MBUF_SCIDX(mbuf_get_service_class(m));
977 VERIFY((u_int32_t)i < IFCQ_SC_MAX);
978
979 return (priq_enqueue(ifq->ifcq_disc,
980 ifq->ifcq_disc_slots[i].cl, m, m_pftag(m)));
981 }
982
983 /*
984 * priq_dequeue_ifclassq is a dequeue function to be registered to
985 * (*ifcq_dequeue) in struct ifclass.
986 *
987 * note: CLASSQDQ_POLL returns the next packet without removing the packet
988 * from the queue. CLASSQDQ_REMOVE is a normal dequeue operation.
989 * CLASSQDQ_REMOVE must return the same packet if called immediately
990 * after CLASSQDQ_POLL.
991 */
992 static struct mbuf *
993 priq_dequeue_ifclassq(struct ifclassq *ifq, cqdq_op_t op)
994 {
995 return (priq_dequeue(ifq->ifcq_disc, op));
996 }
997
998 static int
999 priq_request_ifclassq(struct ifclassq *ifq, cqrq_t req, void *arg)
1000 {
1001 struct priq_if *pif = (struct priq_if *)ifq->ifcq_disc;
1002 int err = 0;
1003
1004 IFCQ_LOCK_ASSERT_HELD(ifq);
1005
1006 switch (req) {
1007 case CLASSQRQ_PURGE:
1008 priq_purge(pif);
1009 break;
1010
1011 case CLASSQRQ_PURGE_SC:
1012 priq_purge_sc(pif, (cqrq_purge_sc_t *)arg);
1013 break;
1014
1015 case CLASSQRQ_EVENT:
1016 priq_event(pif, (cqev_t)arg);
1017 break;
1018
1019 case CLASSQRQ_THROTTLE:
1020 err = priq_throttle(pif, (cqrq_throttle_t *)arg);
1021 break;
1022
1023 case CLASSQRQ_STAT_SC:
1024 err = priq_stat_sc(pif, (cqrq_stat_sc_t *)arg);
1025 break;
1026 }
1027 return (err);
1028 }
1029
1030 int
1031 priq_setup_ifclassq(struct ifclassq *ifq, u_int32_t flags)
1032 {
1033 struct ifnet *ifp = ifq->ifcq_ifp;
1034 struct priq_class *cl0, *cl1, *cl2, *cl3, *cl4;
1035 struct priq_class *cl5, *cl6, *cl7, *cl8, *cl9;
1036 struct priq_if *pif;
1037 u_int32_t maxlen = 0, qflags = 0;
1038 int err = 0;
1039
1040 IFCQ_LOCK_ASSERT_HELD(ifq);
1041 VERIFY(ifq->ifcq_disc == NULL);
1042 VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE);
1043
1044 if (flags & PKTSCHEDF_QALG_RED)
1045 qflags |= PRCF_RED;
1046 if (flags & PKTSCHEDF_QALG_RIO)
1047 qflags |= PRCF_RIO;
1048 if (flags & PKTSCHEDF_QALG_BLUE)
1049 qflags |= PRCF_BLUE;
1050 if (flags & PKTSCHEDF_QALG_SFB)
1051 qflags |= PRCF_SFB;
1052 if (flags & PKTSCHEDF_QALG_ECN)
1053 qflags |= PRCF_ECN;
1054 if (flags & PKTSCHEDF_QALG_FLOWCTL)
1055 qflags |= PRCF_FLOWCTL;
1056
1057 pif = priq_alloc(ifp, M_WAITOK, FALSE);
1058 if (pif == NULL)
1059 return (ENOMEM);
1060
1061 if ((maxlen = IFCQ_MAXLEN(ifq)) == 0)
1062 maxlen = if_sndq_maxlen;
1063
1064 if ((err = priq_add_queue(pif, 0, maxlen,
1065 qflags | PRCF_LAZY, SCIDX_BK_SYS, &cl0)) != 0)
1066 goto cleanup;
1067
1068 if ((err = priq_add_queue(pif, 1, maxlen,
1069 qflags | PRCF_LAZY, SCIDX_BK, &cl1)) != 0)
1070 goto cleanup;
1071
1072 if ((err = priq_add_queue(pif, 2, maxlen,
1073 qflags | PRCF_DEFAULTCLASS, SCIDX_BE, &cl2)) != 0)
1074 goto cleanup;
1075
1076 if ((err = priq_add_queue(pif, 3, maxlen,
1077 qflags | PRCF_LAZY, SCIDX_RD, &cl3)) != 0)
1078 goto cleanup;
1079
1080 if ((err = priq_add_queue(pif, 4, maxlen,
1081 qflags | PRCF_LAZY, SCIDX_OAM, &cl4)) != 0)
1082 goto cleanup;
1083
1084 if ((err = priq_add_queue(pif, 5, maxlen,
1085 qflags | PRCF_LAZY, SCIDX_AV, &cl5)) != 0)
1086 goto cleanup;
1087
1088 if ((err = priq_add_queue(pif, 6, maxlen,
1089 qflags | PRCF_LAZY, SCIDX_RV, &cl6)) != 0)
1090 goto cleanup;
1091
1092 if ((err = priq_add_queue(pif, 7, maxlen,
1093 qflags | PRCF_LAZY, SCIDX_VI, &cl7)) != 0)
1094 goto cleanup;
1095
1096 if ((err = priq_add_queue(pif, 8, maxlen,
1097 qflags | PRCF_LAZY, SCIDX_VO, &cl8)) != 0)
1098 goto cleanup;
1099
1100 if ((err = priq_add_queue(pif, 9, maxlen,
1101 qflags, SCIDX_CTL, &cl9)) != 0)
1102 goto cleanup;
1103
1104 err = ifclassq_attach(ifq, PKTSCHEDT_PRIQ, pif,
1105 priq_enqueue_ifclassq, priq_dequeue_ifclassq, NULL,
1106 priq_request_ifclassq);
1107
1108 /* cache these for faster lookup */
1109 if (err == 0) {
1110 ifq->ifcq_disc_slots[SCIDX_BK_SYS].qid = SCIDX_BK_SYS;
1111 ifq->ifcq_disc_slots[SCIDX_BK_SYS].cl = cl0;
1112
1113 ifq->ifcq_disc_slots[SCIDX_BK].qid = SCIDX_BK;
1114 ifq->ifcq_disc_slots[SCIDX_BK].cl = cl1;
1115
1116 ifq->ifcq_disc_slots[SCIDX_BE].qid = SCIDX_BE;
1117 ifq->ifcq_disc_slots[SCIDX_BE].cl = cl2;
1118
1119 ifq->ifcq_disc_slots[SCIDX_RD].qid = SCIDX_RD;
1120 ifq->ifcq_disc_slots[SCIDX_RD].cl = cl3;
1121
1122 ifq->ifcq_disc_slots[SCIDX_OAM].qid = SCIDX_OAM;
1123 ifq->ifcq_disc_slots[SCIDX_OAM].cl = cl4;
1124
1125 ifq->ifcq_disc_slots[SCIDX_AV].qid = SCIDX_AV;
1126 ifq->ifcq_disc_slots[SCIDX_AV].cl = cl5;
1127
1128 ifq->ifcq_disc_slots[SCIDX_RV].qid = SCIDX_RV;
1129 ifq->ifcq_disc_slots[SCIDX_RV].cl = cl6;
1130
1131 ifq->ifcq_disc_slots[SCIDX_VI].qid = SCIDX_VI;
1132 ifq->ifcq_disc_slots[SCIDX_VI].cl = cl7;
1133
1134 ifq->ifcq_disc_slots[SCIDX_VO].qid = SCIDX_VO;
1135 ifq->ifcq_disc_slots[SCIDX_VO].cl = cl8;
1136
1137 ifq->ifcq_disc_slots[SCIDX_CTL].qid = SCIDX_CTL;
1138 ifq->ifcq_disc_slots[SCIDX_CTL].cl = cl9;
1139 }
1140
1141 cleanup:
1142 if (err != 0)
1143 (void) priq_destroy_locked(pif);
1144
1145 return (err);
1146 }
1147
1148 int
1149 priq_teardown_ifclassq(struct ifclassq *ifq)
1150 {
1151 struct priq_if *pif = ifq->ifcq_disc;
1152 int i;
1153
1154 IFCQ_LOCK_ASSERT_HELD(ifq);
1155 VERIFY(pif != NULL && ifq->ifcq_type == PKTSCHEDT_PRIQ);
1156
1157 (void) priq_destroy_locked(pif);
1158
1159 ifq->ifcq_disc = NULL;
1160 for (i = 0; i < IFCQ_SC_MAX; i++) {
1161 ifq->ifcq_disc_slots[i].qid = 0;
1162 ifq->ifcq_disc_slots[i].cl = NULL;
1163 }
1164
1165 return (ifclassq_detach(ifq));
1166 }
1167
1168 int
1169 priq_getqstats_ifclassq(struct ifclassq *ifq, u_int32_t slot,
1170 struct if_ifclassq_stats *ifqs)
1171 {
1172 struct priq_if *pif = ifq->ifcq_disc;
1173
1174 IFCQ_LOCK_ASSERT_HELD(ifq);
1175 VERIFY(ifq->ifcq_type == PKTSCHEDT_PRIQ);
1176
1177 if (slot >= IFCQ_SC_MAX)
1178 return (EINVAL);
1179
1180 return (priq_get_class_stats(pif, ifq->ifcq_disc_slots[slot].qid,
1181 &ifqs->ifqs_priq_stats));
1182 }
1183
1184 static int
1185 priq_throttle(struct priq_if *pif, cqrq_throttle_t *tr)
1186 {
1187 struct ifclassq *ifq = pif->pif_ifq;
1188 struct priq_class *cl;
1189 int err = 0;
1190
1191 IFCQ_LOCK_ASSERT_HELD(ifq);
1192 VERIFY(!(pif->pif_flags & PRIQIFF_ALTQ));
1193
1194 if (!tr->set) {
1195 tr->level = pif->pif_throttle;
1196 return (0);
1197 }
1198
1199 if (tr->level == pif->pif_throttle)
1200 return (EALREADY);
1201
1202 /* Current throttling levels only involve BK_SYS class */
1203 cl = ifq->ifcq_disc_slots[SCIDX_BK_SYS].cl;
1204
1205 switch (tr->level) {
1206 case IFNET_THROTTLE_OFF:
1207 err = priq_resumeq(pif, cl);
1208 break;
1209
1210 case IFNET_THROTTLE_OPPORTUNISTIC:
1211 err = priq_suspendq(pif, cl);
1212 break;
1213
1214 default:
1215 VERIFY(0);
1216 /* NOTREACHED */
1217 }
1218
1219 if (err == 0 || err == ENXIO) {
1220 if (pktsched_verbose) {
1221 log(LOG_DEBUG, "%s: %s throttling level %sset %d->%d\n",
1222 if_name(PRIQIF_IFP(pif)), priq_style(pif),
1223 (err == 0) ? "" : "lazy ", pif->pif_throttle,
1224 tr->level);
1225 }
1226 pif->pif_throttle = tr->level;
1227 if (err != 0)
1228 err = 0;
1229 else
1230 priq_purgeq(pif, cl, 0, NULL, NULL);
1231 } else {
1232 log(LOG_ERR, "%s: %s unable to set throttling level "
1233 "%d->%d [error=%d]\n", if_name(PRIQIF_IFP(pif)),
1234 priq_style(pif), pif->pif_throttle, tr->level, err);
1235 }
1236
1237 return (err);
1238 }
1239
1240 static int
1241 priq_resumeq(struct priq_if *pif, struct priq_class *cl)
1242 {
1243 struct ifclassq *ifq = pif->pif_ifq;
1244 int err = 0;
1245
1246 IFCQ_LOCK_ASSERT_HELD(ifq);
1247
1248 #if CLASSQ_RIO
1249 if (q_is_rio(&cl->cl_q))
1250 err = rio_suspendq(cl->cl_rio, &cl->cl_q, FALSE);
1251 else
1252 #endif /* CLASSQ_RIO */
1253 #if CLASSQ_RED
1254 if (q_is_red(&cl->cl_q))
1255 err = red_suspendq(cl->cl_red, &cl->cl_q, FALSE);
1256 else
1257 #endif /* CLASSQ_RED */
1258 #if CLASSQ_BLUE
1259 if (q_is_blue(&cl->cl_q))
1260 err = blue_suspendq(cl->cl_blue, &cl->cl_q, FALSE);
1261 else
1262 #endif /* CLASSQ_BLUE */
1263 if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
1264 err = sfb_suspendq(cl->cl_sfb, &cl->cl_q, FALSE);
1265
1266 if (err == 0)
1267 qstate(&cl->cl_q) = QS_RUNNING;
1268
1269 return (err);
1270 }
1271
1272 static int
1273 priq_suspendq(struct priq_if *pif, struct priq_class *cl)
1274 {
1275 struct ifclassq *ifq = pif->pif_ifq;
1276 int err = 0;
1277
1278 IFCQ_LOCK_ASSERT_HELD(ifq);
1279
1280 #if CLASSQ_RIO
1281 if (q_is_rio(&cl->cl_q))
1282 err = rio_suspendq(cl->cl_rio, &cl->cl_q, TRUE);
1283 else
1284 #endif /* CLASSQ_RIO */
1285 #if CLASSQ_RED
1286 if (q_is_red(&cl->cl_q))
1287 err = red_suspendq(cl->cl_red, &cl->cl_q, TRUE);
1288 else
1289 #endif /* CLASSQ_RED */
1290 #if CLASSQ_BLUE
1291 if (q_is_blue(&cl->cl_q))
1292 err = blue_suspendq(cl->cl_blue, &cl->cl_q, TRUE);
1293 else
1294 #endif /* CLASSQ_BLUE */
1295 if (q_is_sfb(&cl->cl_q)) {
1296 if (cl->cl_sfb != NULL) {
1297 err = sfb_suspendq(cl->cl_sfb, &cl->cl_q, TRUE);
1298 } else {
1299 VERIFY(cl->cl_flags & PRCF_LAZY);
1300 err = ENXIO; /* delayed throttling */
1301 }
1302 }
1303
1304 if (err == 0 || err == ENXIO)
1305 qstate(&cl->cl_q) = QS_SUSPENDED;
1306
1307 return (err);
1308 }
1309 #endif /* PKTSCHED_PRIQ */