]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/pktsched/pktsched_tcq.c
5a57824e6515befbcdd8ac1306aa0106ed2b697a
[apple/xnu.git] / bsd / net / pktsched / pktsched_tcq.c
1 /*
2 * Copyright (c) 2011-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * traffic class queue
31 */
32
33 #include <sys/cdefs.h>
34 #include <sys/param.h>
35 #include <sys/malloc.h>
36 #include <sys/mbuf.h>
37 #include <sys/systm.h>
38 #include <sys/errno.h>
39 #include <sys/kernel.h>
40 #include <sys/syslog.h>
41
42 #include <kern/zalloc.h>
43
44 #include <net/if.h>
45 #include <net/net_osdep.h>
46
47 #include <net/pktsched/pktsched_tcq.h>
48 #include <netinet/in.h>
49
50 /*
51 * function prototypes
52 */
53 static int tcq_enqueue_ifclassq(struct ifclassq *, struct mbuf *);
54 static struct mbuf *tcq_dequeue_tc_ifclassq(struct ifclassq *,
55 mbuf_svc_class_t, cqdq_op_t);
56 static int tcq_request_ifclassq(struct ifclassq *, cqrq_t, void *);
57 static int tcq_clear_interface(struct tcq_if *);
58 static struct tcq_class *tcq_class_create(struct tcq_if *, int, u_int32_t,
59 int, u_int32_t);
60 static int tcq_class_destroy(struct tcq_if *, struct tcq_class *);
61 static int tcq_destroy_locked(struct tcq_if *);
62 static inline int tcq_addq(struct tcq_class *, struct mbuf *,
63 struct pf_mtag *);
64 static inline struct mbuf *tcq_getq(struct tcq_class *);
65 static inline struct mbuf *tcq_pollq(struct tcq_class *);
66 static void tcq_purgeq(struct tcq_if *, struct tcq_class *, u_int32_t,
67 u_int32_t *, u_int32_t *);
68 static void tcq_purge_sc(struct tcq_if *, cqrq_purge_sc_t *);
69 static void tcq_updateq(struct tcq_if *, struct tcq_class *, cqev_t);
70 static int tcq_throttle(struct tcq_if *, cqrq_throttle_t *);
71 static int tcq_resumeq(struct tcq_if *, struct tcq_class *);
72 static int tcq_suspendq(struct tcq_if *, struct tcq_class *);
73 static int tcq_stat_sc(struct tcq_if *, cqrq_stat_sc_t *);
74 static struct mbuf *tcq_dequeue_cl(struct tcq_if *, struct tcq_class *,
75 mbuf_svc_class_t, cqdq_op_t);
76 static inline struct tcq_class *tcq_clh_to_clp(struct tcq_if *, u_int32_t);
77 static const char *tcq_style(struct tcq_if *);
78
79 #define TCQ_ZONE_MAX 32 /* maximum elements in zone */
80 #define TCQ_ZONE_NAME "pktsched_tcq" /* zone name */
81
82 static unsigned int tcq_size; /* size of zone element */
83 static struct zone *tcq_zone; /* zone for tcq */
84
85 #define TCQ_CL_ZONE_MAX 32 /* maximum elements in zone */
86 #define TCQ_CL_ZONE_NAME "pktsched_tcq_cl" /* zone name */
87
88 static unsigned int tcq_cl_size; /* size of zone element */
89 static struct zone *tcq_cl_zone; /* zone for tcq_class */
90
91 void
92 tcq_init(void)
93 {
94 tcq_size = sizeof (struct tcq_if);
95 tcq_zone = zinit(tcq_size, TCQ_ZONE_MAX * tcq_size,
96 0, TCQ_ZONE_NAME);
97 if (tcq_zone == NULL) {
98 panic("%s: failed allocating %s", __func__, TCQ_ZONE_NAME);
99 /* NOTREACHED */
100 }
101 zone_change(tcq_zone, Z_EXPAND, TRUE);
102 zone_change(tcq_zone, Z_CALLERACCT, TRUE);
103
104 tcq_cl_size = sizeof (struct tcq_class);
105 tcq_cl_zone = zinit(tcq_cl_size, TCQ_CL_ZONE_MAX * tcq_cl_size,
106 0, TCQ_CL_ZONE_NAME);
107 if (tcq_cl_zone == NULL) {
108 panic("%s: failed allocating %s", __func__, TCQ_CL_ZONE_NAME);
109 /* NOTREACHED */
110 }
111 zone_change(tcq_cl_zone, Z_EXPAND, TRUE);
112 zone_change(tcq_cl_zone, Z_CALLERACCT, TRUE);
113 }
114
115 struct tcq_if *
116 tcq_alloc(struct ifnet *ifp, int how, boolean_t altq)
117 {
118 struct tcq_if *tif;
119
120 tif = (how == M_WAITOK) ? zalloc(tcq_zone) : zalloc_noblock(tcq_zone);
121 if (tif == NULL)
122 return (NULL);
123
124 bzero(tif, tcq_size);
125 tif->tif_maxpri = -1;
126 tif->tif_ifq = &ifp->if_snd;
127 if (altq)
128 tif->tif_flags |= TCQIFF_ALTQ;
129
130 if (pktsched_verbose) {
131 log(LOG_DEBUG, "%s: %s scheduler allocated\n",
132 if_name(ifp), tcq_style(tif));
133 }
134
135 return (tif);
136 }
137
138 int
139 tcq_destroy(struct tcq_if *tif)
140 {
141 struct ifclassq *ifq = tif->tif_ifq;
142 int err;
143
144 IFCQ_LOCK(ifq);
145 err = tcq_destroy_locked(tif);
146 IFCQ_UNLOCK(ifq);
147
148 return (err);
149 }
150
151 static int
152 tcq_destroy_locked(struct tcq_if *tif)
153 {
154 IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq);
155
156 (void) tcq_clear_interface(tif);
157
158 if (pktsched_verbose) {
159 log(LOG_DEBUG, "%s: %s scheduler destroyed\n",
160 if_name(TCQIF_IFP(tif)), tcq_style(tif));
161 }
162
163 zfree(tcq_zone, tif);
164
165 return (0);
166 }
167
168 /*
169 * bring the interface back to the initial state by discarding
170 * all the filters and classes.
171 */
172 static int
173 tcq_clear_interface(struct tcq_if *tif)
174 {
175 struct tcq_class *cl;
176 int pri;
177
178 IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq);
179
180 /* clear out the classes */
181 for (pri = 0; pri <= tif->tif_maxpri; pri++)
182 if ((cl = tif->tif_classes[pri]) != NULL)
183 tcq_class_destroy(tif, cl);
184
185 return (0);
186 }
187
188 /* discard all the queued packets on the interface */
189 void
190 tcq_purge(struct tcq_if *tif)
191 {
192 struct tcq_class *cl;
193 int pri;
194
195 IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq);
196
197 for (pri = 0; pri <= tif->tif_maxpri; pri++) {
198 if ((cl = tif->tif_classes[pri]) != NULL && !qempty(&cl->cl_q))
199 tcq_purgeq(tif, cl, 0, NULL, NULL);
200 }
201 #if !PF_ALTQ
202 /*
203 * This assertion is safe to be made only when PF_ALTQ is not
204 * configured; otherwise, IFCQ_LEN represents the sum of the
205 * packets managed by ifcq_disc and altq_disc instances, which
206 * is possible when transitioning between the two.
207 */
208 VERIFY(IFCQ_LEN(tif->tif_ifq) == 0);
209 #endif /* !PF_ALTQ */
210 }
211
212 static void
213 tcq_purge_sc(struct tcq_if *tif, cqrq_purge_sc_t *pr)
214 {
215 struct ifclassq *ifq = tif->tif_ifq;
216 u_int32_t i;
217
218 IFCQ_LOCK_ASSERT_HELD(ifq);
219
220 VERIFY(pr->sc == MBUF_SC_UNSPEC || MBUF_VALID_SC(pr->sc));
221 VERIFY(pr->flow != 0);
222
223 if (pr->sc != MBUF_SC_UNSPEC) {
224 i = MBUF_SCIDX(pr->sc);
225 VERIFY(i < IFCQ_SC_MAX);
226
227 tcq_purgeq(tif, ifq->ifcq_disc_slots[i].cl,
228 pr->flow, &pr->packets, &pr->bytes);
229 } else {
230 u_int32_t cnt, len;
231
232 pr->packets = 0;
233 pr->bytes = 0;
234
235 for (i = 0; i < IFCQ_SC_MAX; i++) {
236 tcq_purgeq(tif, ifq->ifcq_disc_slots[i].cl,
237 pr->flow, &cnt, &len);
238 pr->packets += cnt;
239 pr->bytes += len;
240 }
241 }
242 }
243
244 void
245 tcq_event(struct tcq_if *tif, cqev_t ev)
246 {
247 struct tcq_class *cl;
248 int pri;
249
250 IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq);
251
252 for (pri = 0; pri <= tif->tif_maxpri; pri++)
253 if ((cl = tif->tif_classes[pri]) != NULL)
254 tcq_updateq(tif, cl, ev);
255 }
256
257 int
258 tcq_add_queue(struct tcq_if *tif, int priority, u_int32_t qlimit,
259 int flags, u_int32_t qid, struct tcq_class **clp)
260 {
261 struct tcq_class *cl;
262
263 IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq);
264
265 /* check parameters */
266 if (priority >= TCQ_MAXPRI)
267 return (EINVAL);
268 if (tif->tif_classes[priority] != NULL)
269 return (EBUSY);
270 if (tcq_clh_to_clp(tif, qid) != NULL)
271 return (EBUSY);
272
273 cl = tcq_class_create(tif, priority, qlimit, flags, qid);
274 if (cl == NULL)
275 return (ENOMEM);
276
277 if (clp != NULL)
278 *clp = cl;
279
280 return (0);
281 }
282
283 static struct tcq_class *
284 tcq_class_create(struct tcq_if *tif, int pri, u_int32_t qlimit,
285 int flags, u_int32_t qid)
286 {
287 struct ifnet *ifp;
288 struct ifclassq *ifq;
289 struct tcq_class *cl;
290
291 IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq);
292
293 /* Sanitize flags unless internally configured */
294 if (tif->tif_flags & TCQIFF_ALTQ)
295 flags &= TQCF_USERFLAGS;
296
297 #if !CLASSQ_RED
298 if (flags & TQCF_RED) {
299 log(LOG_ERR, "%s: %s RED not available!\n",
300 if_name(TCQIF_IFP(tif)), tcq_style(tif));
301 return (NULL);
302 }
303 #endif /* !CLASSQ_RED */
304
305 #if !CLASSQ_RIO
306 if (flags & TQCF_RIO) {
307 log(LOG_ERR, "%s: %s RIO not available!\n",
308 if_name(TCQIF_IFP(tif)), tcq_style(tif));
309 return (NULL);
310 }
311 #endif /* CLASSQ_RIO */
312
313 #if !CLASSQ_BLUE
314 if (flags & TQCF_BLUE) {
315 log(LOG_ERR, "%s: %s BLUE not available!\n",
316 if_name(TCQIF_IFP(tif)), tcq_style(tif));
317 return (NULL);
318 }
319 #endif /* CLASSQ_BLUE */
320
321 /* These are mutually exclusive */
322 if ((flags & (TQCF_RED|TQCF_RIO|TQCF_BLUE|TQCF_SFB)) &&
323 (flags & (TQCF_RED|TQCF_RIO|TQCF_BLUE|TQCF_SFB)) != TQCF_RED &&
324 (flags & (TQCF_RED|TQCF_RIO|TQCF_BLUE|TQCF_SFB)) != TQCF_RIO &&
325 (flags & (TQCF_RED|TQCF_RIO|TQCF_BLUE|TQCF_SFB)) != TQCF_BLUE &&
326 (flags & (TQCF_RED|TQCF_RIO|TQCF_BLUE|TQCF_SFB)) != TQCF_SFB) {
327 log(LOG_ERR, "%s: %s more than one RED|RIO|BLUE|SFB\n",
328 if_name(TCQIF_IFP(tif)), tcq_style(tif));
329 return (NULL);
330 }
331
332 ifq = tif->tif_ifq;
333 ifp = TCQIF_IFP(tif);
334
335 if ((cl = tif->tif_classes[pri]) != NULL) {
336 /* modify the class instead of creating a new one */
337 if (!qempty(&cl->cl_q))
338 tcq_purgeq(tif, cl, 0, NULL, NULL);
339 #if CLASSQ_RIO
340 if (q_is_rio(&cl->cl_q))
341 rio_destroy(cl->cl_rio);
342 #endif /* CLASSQ_RIO */
343 #if CLASSQ_RED
344 if (q_is_red(&cl->cl_q))
345 red_destroy(cl->cl_red);
346 #endif /* CLASSQ_RED */
347 #if CLASSQ_BLUE
348 if (q_is_blue(&cl->cl_q))
349 blue_destroy(cl->cl_blue);
350 #endif /* CLASSQ_BLUE */
351 if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
352 sfb_destroy(cl->cl_sfb);
353 cl->cl_qalg.ptr = NULL;
354 qtype(&cl->cl_q) = Q_DROPTAIL;
355 qstate(&cl->cl_q) = QS_RUNNING;
356 } else {
357 cl = zalloc(tcq_cl_zone);
358 if (cl == NULL)
359 return (NULL);
360
361 bzero(cl, tcq_cl_size);
362 }
363
364 tif->tif_classes[pri] = cl;
365 if (flags & TQCF_DEFAULTCLASS)
366 tif->tif_default = cl;
367 if (qlimit == 0 || qlimit > IFCQ_MAXLEN(ifq)) {
368 qlimit = IFCQ_MAXLEN(ifq);
369 if (qlimit == 0)
370 qlimit = DEFAULT_QLIMIT; /* use default */
371 }
372 _qinit(&cl->cl_q, Q_DROPTAIL, qlimit);
373 cl->cl_flags = flags;
374 cl->cl_pri = pri;
375 if (pri > tif->tif_maxpri)
376 tif->tif_maxpri = pri;
377 cl->cl_tif = tif;
378 cl->cl_handle = qid;
379
380 if (flags & (TQCF_RED|TQCF_RIO|TQCF_BLUE|TQCF_SFB)) {
381 #if CLASSQ_RED || CLASSQ_RIO
382 u_int64_t ifbandwidth = ifnet_output_linkrate(ifp);
383 int pkttime;
384 #endif /* CLASSQ_RED || CLASSQ_RIO */
385
386 cl->cl_qflags = 0;
387 if (flags & TQCF_ECN) {
388 if (flags & TQCF_BLUE)
389 cl->cl_qflags |= BLUEF_ECN;
390 else if (flags & TQCF_SFB)
391 cl->cl_qflags |= SFBF_ECN;
392 else if (flags & TQCF_RED)
393 cl->cl_qflags |= REDF_ECN;
394 else if (flags & TQCF_RIO)
395 cl->cl_qflags |= RIOF_ECN;
396 }
397 if (flags & TQCF_FLOWCTL) {
398 if (flags & TQCF_SFB)
399 cl->cl_qflags |= SFBF_FLOWCTL;
400 }
401 if (flags & TQCF_DELAYBASED) {
402 if (flags & TQCF_SFB)
403 cl->cl_qflags |= SFBF_DELAYBASED;
404 }
405 if (flags & TQCF_CLEARDSCP) {
406 if (flags & TQCF_RIO)
407 cl->cl_qflags |= RIOF_CLEARDSCP;
408 }
409 #if CLASSQ_RED || CLASSQ_RIO
410 /*
411 * XXX: RED & RIO should be watching link speed and MTU
412 * events and recompute pkttime accordingly.
413 */
414 if (ifbandwidth < 8)
415 pkttime = 1000 * 1000 * 1000; /* 1 sec */
416 else
417 pkttime = (int64_t)ifp->if_mtu * 1000 * 1000 * 1000 /
418 (ifbandwidth / 8);
419
420 /* Test for exclusivity {RED,RIO,BLUE,SFB} was done above */
421 #if CLASSQ_RED
422 if (flags & TQCF_RED) {
423 cl->cl_red = red_alloc(ifp, 0, 0,
424 qlimit(&cl->cl_q) * 10/100,
425 qlimit(&cl->cl_q) * 30/100,
426 cl->cl_qflags, pkttime);
427 if (cl->cl_red != NULL)
428 qtype(&cl->cl_q) = Q_RED;
429 }
430 #endif /* CLASSQ_RED */
431 #if CLASSQ_RIO
432 if (flags & TQCF_RIO) {
433 cl->cl_rio =
434 rio_alloc(ifp, 0, NULL, cl->cl_qflags, pkttime);
435 if (cl->cl_rio != NULL)
436 qtype(&cl->cl_q) = Q_RIO;
437 }
438 #endif /* CLASSQ_RIO */
439 #endif /* CLASSQ_RED || CLASSQ_RIO */
440 #if CLASSQ_BLUE
441 if (flags & TQCF_BLUE) {
442 cl->cl_blue = blue_alloc(ifp, 0, 0, cl->cl_qflags);
443 if (cl->cl_blue != NULL)
444 qtype(&cl->cl_q) = Q_BLUE;
445 }
446 #endif /* CLASSQ_BLUE */
447 if (flags & TQCF_SFB) {
448 if (!(cl->cl_flags & TQCF_LAZY))
449 cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
450 qlimit(&cl->cl_q), cl->cl_qflags);
451 if (cl->cl_sfb != NULL || (cl->cl_flags & TQCF_LAZY))
452 qtype(&cl->cl_q) = Q_SFB;
453 }
454 }
455
456 if (pktsched_verbose) {
457 log(LOG_DEBUG, "%s: %s created qid=%d pri=%d qlimit=%d "
458 "flags=%b\n", if_name(ifp), tcq_style(tif),
459 cl->cl_handle, cl->cl_pri, qlimit, flags, TQCF_BITS);
460 }
461
462 return (cl);
463 }
464
465 int
466 tcq_remove_queue(struct tcq_if *tif, u_int32_t qid)
467 {
468 struct tcq_class *cl;
469
470 IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq);
471
472 if ((cl = tcq_clh_to_clp(tif, qid)) == NULL)
473 return (EINVAL);
474
475 return (tcq_class_destroy(tif, cl));
476 }
477
478 static int
479 tcq_class_destroy(struct tcq_if *tif, struct tcq_class *cl)
480 {
481 struct ifclassq *ifq = tif->tif_ifq;
482 int pri;
483
484 IFCQ_LOCK_ASSERT_HELD(ifq);
485
486 if (!qempty(&cl->cl_q))
487 tcq_purgeq(tif, cl, 0, NULL, NULL);
488
489 tif->tif_classes[cl->cl_pri] = NULL;
490 if (tif->tif_maxpri == cl->cl_pri) {
491 for (pri = cl->cl_pri; pri >= 0; pri--)
492 if (tif->tif_classes[pri] != NULL) {
493 tif->tif_maxpri = pri;
494 break;
495 }
496 if (pri < 0)
497 tif->tif_maxpri = -1;
498 }
499
500 if (tif->tif_default == cl)
501 tif->tif_default = NULL;
502
503 if (cl->cl_qalg.ptr != NULL) {
504 #if CLASSQ_RIO
505 if (q_is_rio(&cl->cl_q))
506 rio_destroy(cl->cl_rio);
507 #endif /* CLASSQ_RIO */
508 #if CLASSQ_RED
509 if (q_is_red(&cl->cl_q))
510 red_destroy(cl->cl_red);
511 #endif /* CLASSQ_RED */
512 #if CLASSQ_BLUE
513 if (q_is_blue(&cl->cl_q))
514 blue_destroy(cl->cl_blue);
515 #endif /* CLASSQ_BLUE */
516 if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
517 sfb_destroy(cl->cl_sfb);
518 cl->cl_qalg.ptr = NULL;
519 qtype(&cl->cl_q) = Q_DROPTAIL;
520 qstate(&cl->cl_q) = QS_RUNNING;
521 }
522
523 if (pktsched_verbose) {
524 log(LOG_DEBUG, "%s: %s destroyed qid=%d pri=%d\n",
525 if_name(TCQIF_IFP(tif)), tcq_style(tif),
526 cl->cl_handle, cl->cl_pri);
527 }
528
529 zfree(tcq_cl_zone, cl);
530 return (0);
531 }
532
533 int
534 tcq_enqueue(struct tcq_if *tif, struct tcq_class *cl, struct mbuf *m,
535 struct pf_mtag *t)
536 {
537 struct ifclassq *ifq = tif->tif_ifq;
538 int len, ret;
539
540 IFCQ_LOCK_ASSERT_HELD(ifq);
541 VERIFY(cl == NULL || cl->cl_tif == tif);
542
543 if (cl == NULL) {
544 #if PF_ALTQ
545 cl = tcq_clh_to_clp(tif, t->pftag_qid);
546 #else /* !PF_ALTQ */
547 cl = tcq_clh_to_clp(tif, 0);
548 #endif /* !PF_ALTQ */
549 if (cl == NULL) {
550 cl = tif->tif_default;
551 if (cl == NULL) {
552 IFCQ_CONVERT_LOCK(ifq);
553 m_freem(m);
554 return (ENOBUFS);
555 }
556 }
557 }
558
559 len = m_pktlen(m);
560
561 ret = tcq_addq(cl, m, t);
562 if (ret != 0) {
563 if (ret == CLASSQEQ_SUCCESS_FC) {
564 /* packet enqueued, return advisory feedback */
565 ret = EQFULL;
566 } else {
567 VERIFY(ret == CLASSQEQ_DROPPED ||
568 ret == CLASSQEQ_DROPPED_FC ||
569 ret == CLASSQEQ_DROPPED_SP);
570 /* packet has been freed in tcq_addq */
571 PKTCNTR_ADD(&cl->cl_dropcnt, 1, len);
572 IFCQ_DROP_ADD(ifq, 1, len);
573 switch (ret) {
574 case CLASSQEQ_DROPPED:
575 return (ENOBUFS);
576 case CLASSQEQ_DROPPED_FC:
577 return (EQFULL);
578 case CLASSQEQ_DROPPED_SP:
579 return (EQSUSPENDED);
580 }
581 /* NOT REACHED */
582 }
583 }
584 IFCQ_INC_LEN(ifq);
585 IFCQ_INC_BYTES(ifq, len);
586
587 /* successfully queued. */
588 return (ret);
589 }
590
591 /*
592 * note: CLASSQDQ_POLL returns the next packet without removing the packet
593 * from the queue. CLASSQDQ_REMOVE is a normal dequeue operation.
594 * CLASSQDQ_REMOVE must return the same packet if called immediately
595 * after CLASSQDQ_POLL.
596 */
597 struct mbuf *
598 tcq_dequeue_tc(struct tcq_if *tif, mbuf_svc_class_t sc, cqdq_op_t op)
599 {
600 return (tcq_dequeue_cl(tif, NULL, sc, op));
601 }
602
603 static struct mbuf *
604 tcq_dequeue_cl(struct tcq_if *tif, struct tcq_class *cl,
605 mbuf_svc_class_t sc, cqdq_op_t op)
606 {
607 struct ifclassq *ifq = tif->tif_ifq;
608 struct mbuf *m;
609
610 IFCQ_LOCK_ASSERT_HELD(ifq);
611
612 if (cl == NULL) {
613 cl = tcq_clh_to_clp(tif, MBUF_SCIDX(sc));
614 if (cl == NULL)
615 return (NULL);
616 }
617
618 if (qempty(&cl->cl_q))
619 return (NULL);
620
621 VERIFY(!IFCQ_IS_EMPTY(ifq));
622
623 if (op == CLASSQDQ_POLL)
624 return (tcq_pollq(cl));
625
626 m = tcq_getq(cl);
627 if (m != NULL) {
628 IFCQ_DEC_LEN(ifq);
629 IFCQ_DEC_BYTES(ifq, m_pktlen(m));
630 if (qempty(&cl->cl_q))
631 cl->cl_period++;
632 PKTCNTR_ADD(&cl->cl_xmitcnt, 1, m_pktlen(m));
633 IFCQ_XMIT_ADD(ifq, 1, m_pktlen(m));
634 }
635 return (m);
636 }
637
638 static inline int
639 tcq_addq(struct tcq_class *cl, struct mbuf *m, struct pf_mtag *t)
640 {
641 struct tcq_if *tif = cl->cl_tif;
642 struct ifclassq *ifq = tif->tif_ifq;
643
644 IFCQ_LOCK_ASSERT_HELD(ifq);
645
646 #if CLASSQ_RIO
647 if (q_is_rio(&cl->cl_q))
648 return (rio_addq(cl->cl_rio, &cl->cl_q, m, t));
649 else
650 #endif /* CLASSQ_RIO */
651 #if CLASSQ_RED
652 if (q_is_red(&cl->cl_q))
653 return (red_addq(cl->cl_red, &cl->cl_q, m, t));
654 else
655 #endif /* CLASSQ_RED */
656 #if CLASSQ_BLUE
657 if (q_is_blue(&cl->cl_q))
658 return (blue_addq(cl->cl_blue, &cl->cl_q, m, t));
659 else
660 #endif /* CLASSQ_BLUE */
661 if (q_is_sfb(&cl->cl_q)) {
662 if (cl->cl_sfb == NULL) {
663 struct ifnet *ifp = TCQIF_IFP(tif);
664
665 VERIFY(cl->cl_flags & TQCF_LAZY);
666 cl->cl_flags &= ~TQCF_LAZY;
667 IFCQ_CONVERT_LOCK(ifq);
668
669 cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
670 qlimit(&cl->cl_q), cl->cl_qflags);
671 if (cl->cl_sfb == NULL) {
672 /* fall back to droptail */
673 qtype(&cl->cl_q) = Q_DROPTAIL;
674 cl->cl_flags &= ~TQCF_SFB;
675 cl->cl_qflags &= ~(SFBF_ECN | SFBF_FLOWCTL);
676
677 log(LOG_ERR, "%s: %s SFB lazy allocation "
678 "failed for qid=%d pri=%d, falling back "
679 "to DROPTAIL\n", if_name(ifp),
680 tcq_style(tif), cl->cl_handle,
681 cl->cl_pri);
682 } else if (tif->tif_throttle != IFNET_THROTTLE_OFF) {
683 /* if there's pending throttling, set it */
684 cqrq_throttle_t tr = { 1, tif->tif_throttle };
685 int err = tcq_throttle(tif, &tr);
686
687 if (err == EALREADY)
688 err = 0;
689 if (err != 0) {
690 tr.level = IFNET_THROTTLE_OFF;
691 (void) tcq_throttle(tif, &tr);
692 }
693 }
694 }
695 if (cl->cl_sfb != NULL)
696 return (sfb_addq(cl->cl_sfb, &cl->cl_q, m, t));
697 } else if (qlen(&cl->cl_q) >= qlimit(&cl->cl_q)) {
698 IFCQ_CONVERT_LOCK(ifq);
699 m_freem(m);
700 return (CLASSQEQ_DROPPED);
701 }
702
703 #if PF_ECN
704 if (cl->cl_flags & TQCF_CLEARDSCP)
705 write_dsfield(m, t, 0);
706 #endif /* PF_ECN */
707
708 _addq(&cl->cl_q, m);
709
710 return (0);
711 }
712
713 static inline struct mbuf *
714 tcq_getq(struct tcq_class *cl)
715 {
716 IFCQ_LOCK_ASSERT_HELD(cl->cl_tif->tif_ifq);
717
718 #if CLASSQ_RIO
719 if (q_is_rio(&cl->cl_q))
720 return (rio_getq(cl->cl_rio, &cl->cl_q));
721 else
722 #endif /* CLASSQ_RIO */
723 #if CLASSQ_RED
724 if (q_is_red(&cl->cl_q))
725 return (red_getq(cl->cl_red, &cl->cl_q));
726 else
727 #endif /* CLASSQ_RED */
728 #if CLASSQ_BLUE
729 if (q_is_blue(&cl->cl_q))
730 return (blue_getq(cl->cl_blue, &cl->cl_q));
731 else
732 #endif /* CLASSQ_BLUE */
733 if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
734 return (sfb_getq(cl->cl_sfb, &cl->cl_q));
735
736 return (_getq(&cl->cl_q));
737 }
738
739 static inline struct mbuf *
740 tcq_pollq(struct tcq_class *cl)
741 {
742 IFCQ_LOCK_ASSERT_HELD(cl->cl_tif->tif_ifq);
743
744 return (qhead(&cl->cl_q));
745 }
746
747 static void
748 tcq_purgeq(struct tcq_if *tif, struct tcq_class *cl, u_int32_t flow,
749 u_int32_t *packets, u_int32_t *bytes)
750 {
751 struct ifclassq *ifq = tif->tif_ifq;
752 u_int32_t cnt = 0, len = 0, qlen;
753
754 IFCQ_LOCK_ASSERT_HELD(ifq);
755
756 if ((qlen = qlen(&cl->cl_q)) == 0)
757 goto done;
758
759 /* become regular mutex before freeing mbufs */
760 IFCQ_CONVERT_LOCK(ifq);
761
762 #if CLASSQ_RIO
763 if (q_is_rio(&cl->cl_q))
764 rio_purgeq(cl->cl_rio, &cl->cl_q, flow, &cnt, &len);
765 else
766 #endif /* CLASSQ_RIO */
767 #if CLASSQ_RED
768 if (q_is_red(&cl->cl_q))
769 red_purgeq(cl->cl_red, &cl->cl_q, flow, &cnt, &len);
770 else
771 #endif /* CLASSQ_RED */
772 #if CLASSQ_BLUE
773 if (q_is_blue(&cl->cl_q))
774 blue_purgeq(cl->cl_blue, &cl->cl_q, flow, &cnt, &len);
775 else
776 #endif /* CLASSQ_BLUE */
777 if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
778 sfb_purgeq(cl->cl_sfb, &cl->cl_q, flow, &cnt, &len);
779 else
780 _flushq_flow(&cl->cl_q, flow, &cnt, &len);
781
782 if (cnt > 0) {
783 VERIFY(qlen(&cl->cl_q) == (qlen - cnt));
784
785 PKTCNTR_ADD(&cl->cl_dropcnt, cnt, len);
786 IFCQ_DROP_ADD(ifq, cnt, len);
787
788 VERIFY(((signed)IFCQ_LEN(ifq) - cnt) >= 0);
789 IFCQ_LEN(ifq) -= cnt;
790
791 if (pktsched_verbose) {
792 log(LOG_DEBUG, "%s: %s purge qid=%d pri=%d "
793 "qlen=[%d,%d] cnt=%d len=%d flow=0x%x\n",
794 if_name(TCQIF_IFP(tif)), tcq_style(tif),
795 cl->cl_handle, cl->cl_pri, qlen, qlen(&cl->cl_q),
796 cnt, len, flow);
797 }
798 }
799 done:
800 if (packets != NULL)
801 *packets = cnt;
802 if (bytes != NULL)
803 *bytes = len;
804 }
805
806 static void
807 tcq_updateq(struct tcq_if *tif, struct tcq_class *cl, cqev_t ev)
808 {
809 IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq);
810
811 if (pktsched_verbose) {
812 log(LOG_DEBUG, "%s: %s update qid=%d pri=%d event=%s\n",
813 if_name(TCQIF_IFP(tif)), tcq_style(tif),
814 cl->cl_handle, cl->cl_pri, ifclassq_ev2str(ev));
815 }
816
817 #if CLASSQ_RIO
818 if (q_is_rio(&cl->cl_q))
819 return (rio_updateq(cl->cl_rio, ev));
820 #endif /* CLASSQ_RIO */
821 #if CLASSQ_RED
822 if (q_is_red(&cl->cl_q))
823 return (red_updateq(cl->cl_red, ev));
824 #endif /* CLASSQ_RED */
825 #if CLASSQ_BLUE
826 if (q_is_blue(&cl->cl_q))
827 return (blue_updateq(cl->cl_blue, ev));
828 #endif /* CLASSQ_BLUE */
829 if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
830 return (sfb_updateq(cl->cl_sfb, ev));
831 }
832
833 int
834 tcq_get_class_stats(struct tcq_if *tif, u_int32_t qid,
835 struct tcq_classstats *sp)
836 {
837 struct tcq_class *cl;
838
839 IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq);
840
841 if ((cl = tcq_clh_to_clp(tif, qid)) == NULL)
842 return (EINVAL);
843
844 sp->class_handle = cl->cl_handle;
845 sp->priority = cl->cl_pri;
846 sp->qlength = qlen(&cl->cl_q);
847 sp->qlimit = qlimit(&cl->cl_q);
848 sp->period = cl->cl_period;
849 sp->xmitcnt = cl->cl_xmitcnt;
850 sp->dropcnt = cl->cl_dropcnt;
851
852 sp->qtype = qtype(&cl->cl_q);
853 sp->qstate = qstate(&cl->cl_q);
854 #if CLASSQ_RED
855 if (q_is_red(&cl->cl_q))
856 red_getstats(cl->cl_red, &sp->red[0]);
857 #endif /* CLASSQ_RED */
858 #if CLASSQ_RIO
859 if (q_is_rio(&cl->cl_q))
860 rio_getstats(cl->cl_rio, &sp->red[0]);
861 #endif /* CLASSQ_RIO */
862 #if CLASSQ_BLUE
863 if (q_is_blue(&cl->cl_q))
864 blue_getstats(cl->cl_blue, &sp->blue);
865 #endif /* CLASSQ_BLUE */
866 if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
867 sfb_getstats(cl->cl_sfb, &sp->sfb);
868
869 return (0);
870 }
871
872 static int
873 tcq_stat_sc(struct tcq_if *tif, cqrq_stat_sc_t *sr)
874 {
875 struct ifclassq *ifq = tif->tif_ifq;
876 struct tcq_class *cl;
877 u_int32_t i;
878
879 IFCQ_LOCK_ASSERT_HELD(ifq);
880
881 VERIFY(sr->sc == MBUF_SC_UNSPEC || MBUF_VALID_SC(sr->sc));
882
883 i = MBUF_SCIDX(sr->sc);
884 VERIFY(i < IFCQ_SC_MAX);
885
886 cl = ifq->ifcq_disc_slots[i].cl;
887 sr->packets = qlen(&cl->cl_q);
888 sr->bytes = qsize(&cl->cl_q);
889
890 return (0);
891 }
892
893 /* convert a class handle to the corresponding class pointer */
894 static inline struct tcq_class *
895 tcq_clh_to_clp(struct tcq_if *tif, u_int32_t chandle)
896 {
897 struct tcq_class *cl;
898 int idx;
899
900 IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq);
901
902 for (idx = tif->tif_maxpri; idx >= 0; idx--)
903 if ((cl = tif->tif_classes[idx]) != NULL &&
904 cl->cl_handle == chandle)
905 return (cl);
906
907 return (NULL);
908 }
909
910 static const char *
911 tcq_style(struct tcq_if *tif)
912 {
913 return ((tif->tif_flags & TCQIFF_ALTQ) ? "ALTQ_TCQ" : "TCQ");
914 }
915
916 /*
917 * tcq_enqueue_ifclassq is an enqueue function to be registered to
918 * (*ifcq_enqueue) in struct ifclassq.
919 */
920 static int
921 tcq_enqueue_ifclassq(struct ifclassq *ifq, struct mbuf *m)
922 {
923 u_int32_t i;
924
925 IFCQ_LOCK_ASSERT_HELD(ifq);
926
927 if (!(m->m_flags & M_PKTHDR)) {
928 /* should not happen */
929 log(LOG_ERR, "%s: packet does not have pkthdr\n",
930 if_name(ifq->ifcq_ifp));
931 IFCQ_CONVERT_LOCK(ifq);
932 m_freem(m);
933 return (ENOBUFS);
934 }
935
936 i = MBUF_SCIDX(mbuf_get_service_class(m));
937 VERIFY((u_int32_t)i < IFCQ_SC_MAX);
938
939 return (tcq_enqueue(ifq->ifcq_disc,
940 ifq->ifcq_disc_slots[i].cl, m, m_pftag(m)));
941 }
942
943 /*
944 * tcq_dequeue_tc_ifclassq is a dequeue function to be registered to
945 * (*ifcq_dequeue) in struct ifclass.
946 *
947 * note: CLASSQDQ_POLL returns the next packet without removing the packet
948 * from the queue. CLASSQDQ_REMOVE is a normal dequeue operation.
949 * CLASSQDQ_REMOVE must return the same packet if called immediately
950 * after CLASSQDQ_POLL.
951 */
952 static struct mbuf *
953 tcq_dequeue_tc_ifclassq(struct ifclassq *ifq, mbuf_svc_class_t sc,
954 cqdq_op_t op)
955 {
956 u_int32_t i = MBUF_SCIDX(sc);
957
958 VERIFY((u_int32_t)i < IFCQ_SC_MAX);
959
960 return (tcq_dequeue_cl(ifq->ifcq_disc,
961 ifq->ifcq_disc_slots[i].cl, sc, op));
962 }
963
964 static int
965 tcq_request_ifclassq(struct ifclassq *ifq, cqrq_t req, void *arg)
966 {
967 struct tcq_if *tif = (struct tcq_if *)ifq->ifcq_disc;
968 int err = 0;
969
970 IFCQ_LOCK_ASSERT_HELD(ifq);
971
972 switch (req) {
973 case CLASSQRQ_PURGE:
974 tcq_purge(tif);
975 break;
976
977 case CLASSQRQ_PURGE_SC:
978 tcq_purge_sc(tif, (cqrq_purge_sc_t *)arg);
979 break;
980
981 case CLASSQRQ_EVENT:
982 tcq_event(tif, (cqev_t)arg);
983 break;
984
985 case CLASSQRQ_THROTTLE:
986 err = tcq_throttle(tif, (cqrq_throttle_t *)arg);
987 break;
988
989 case CLASSQRQ_STAT_SC:
990 err = tcq_stat_sc(tif, (cqrq_stat_sc_t *)arg);
991 break;
992 }
993 return (err);
994 }
995
996 int
997 tcq_setup_ifclassq(struct ifclassq *ifq, u_int32_t flags)
998 {
999 struct ifnet *ifp = ifq->ifcq_ifp;
1000 struct tcq_class *cl0, *cl1, *cl2, *cl3;
1001 struct tcq_if *tif;
1002 u_int32_t maxlen = 0, qflags = 0;
1003 int err = 0;
1004
1005 IFCQ_LOCK_ASSERT_HELD(ifq);
1006 VERIFY(ifq->ifcq_disc == NULL);
1007 VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE);
1008
1009 if (flags & PKTSCHEDF_QALG_RED)
1010 qflags |= TQCF_RED;
1011 if (flags & PKTSCHEDF_QALG_RIO)
1012 qflags |= TQCF_RIO;
1013 if (flags & PKTSCHEDF_QALG_BLUE)
1014 qflags |= TQCF_BLUE;
1015 if (flags & PKTSCHEDF_QALG_SFB)
1016 qflags |= TQCF_SFB;
1017 if (flags & PKTSCHEDF_QALG_ECN)
1018 qflags |= TQCF_ECN;
1019 if (flags & PKTSCHEDF_QALG_FLOWCTL)
1020 qflags |= TQCF_FLOWCTL;
1021 if (flags & PKTSCHEDF_QALG_DELAYBASED)
1022 qflags |= TQCF_DELAYBASED;
1023
1024 tif = tcq_alloc(ifp, M_WAITOK, FALSE);
1025 if (tif == NULL)
1026 return (ENOMEM);
1027
1028 if ((maxlen = IFCQ_MAXLEN(ifq)) == 0)
1029 maxlen = if_sndq_maxlen;
1030
1031 if ((err = tcq_add_queue(tif, 0, maxlen,
1032 qflags | PRCF_LAZY, SCIDX_BK, &cl0)) != 0)
1033 goto cleanup;
1034
1035 if ((err = tcq_add_queue(tif, 1, maxlen,
1036 qflags | TQCF_DEFAULTCLASS, SCIDX_BE, &cl1)) != 0)
1037 goto cleanup;
1038
1039 if ((err = tcq_add_queue(tif, 2, maxlen,
1040 qflags | PRCF_LAZY, SCIDX_VI, &cl2)) != 0)
1041 goto cleanup;
1042
1043 if ((err = tcq_add_queue(tif, 3, maxlen,
1044 qflags, SCIDX_VO, &cl3)) != 0)
1045 goto cleanup;
1046
1047 err = ifclassq_attach(ifq, PKTSCHEDT_TCQ, tif,
1048 tcq_enqueue_ifclassq, NULL, tcq_dequeue_tc_ifclassq,
1049 tcq_request_ifclassq);
1050
1051 /* cache these for faster lookup */
1052 if (err == 0) {
1053 /* Map {BK_SYS,BK} to TC_BK */
1054 ifq->ifcq_disc_slots[SCIDX_BK_SYS].qid = SCIDX_BK;
1055 ifq->ifcq_disc_slots[SCIDX_BK_SYS].cl = cl0;
1056
1057 ifq->ifcq_disc_slots[SCIDX_BK].qid = SCIDX_BK;
1058 ifq->ifcq_disc_slots[SCIDX_BK].cl = cl0;
1059
1060 /* Map {BE,RD,OAM} to TC_BE */
1061 ifq->ifcq_disc_slots[SCIDX_BE].qid = SCIDX_BE;
1062 ifq->ifcq_disc_slots[SCIDX_BE].cl = cl1;
1063
1064 ifq->ifcq_disc_slots[SCIDX_RD].qid = SCIDX_BE;
1065 ifq->ifcq_disc_slots[SCIDX_RD].cl = cl1;
1066
1067 ifq->ifcq_disc_slots[SCIDX_OAM].qid = SCIDX_BE;
1068 ifq->ifcq_disc_slots[SCIDX_OAM].cl = cl1;
1069
1070 /* Map {AV,RV,VI} to TC_VI */
1071 ifq->ifcq_disc_slots[SCIDX_AV].qid = SCIDX_VI;
1072 ifq->ifcq_disc_slots[SCIDX_AV].cl = cl2;
1073
1074 ifq->ifcq_disc_slots[SCIDX_RV].qid = SCIDX_VI;
1075 ifq->ifcq_disc_slots[SCIDX_RV].cl = cl2;
1076
1077 ifq->ifcq_disc_slots[SCIDX_VI].qid = SCIDX_VI;
1078 ifq->ifcq_disc_slots[SCIDX_VI].cl = cl2;
1079
1080 /* Map {VO,CTL} to TC_VO */
1081 ifq->ifcq_disc_slots[SCIDX_VO].qid = SCIDX_VO;
1082 ifq->ifcq_disc_slots[SCIDX_VO].cl = cl3;
1083
1084 ifq->ifcq_disc_slots[SCIDX_CTL].qid = SCIDX_VO;
1085 ifq->ifcq_disc_slots[SCIDX_CTL].cl = cl3;
1086 }
1087
1088 cleanup:
1089 if (err != 0)
1090 (void) tcq_destroy_locked(tif);
1091
1092 return (err);
1093 }
1094
1095 int
1096 tcq_teardown_ifclassq(struct ifclassq *ifq)
1097 {
1098 struct tcq_if *tif = ifq->ifcq_disc;
1099 int i;
1100
1101 IFCQ_LOCK_ASSERT_HELD(ifq);
1102 VERIFY(tif != NULL && ifq->ifcq_type == PKTSCHEDT_TCQ);
1103
1104 (void) tcq_destroy_locked(tif);
1105
1106 ifq->ifcq_disc = NULL;
1107 for (i = 0; i < IFCQ_SC_MAX; i++) {
1108 ifq->ifcq_disc_slots[i].qid = 0;
1109 ifq->ifcq_disc_slots[i].cl = NULL;
1110 }
1111
1112 return (ifclassq_detach(ifq));
1113 }
1114
1115 int
1116 tcq_getqstats_ifclassq(struct ifclassq *ifq, u_int32_t slot,
1117 struct if_ifclassq_stats *ifqs)
1118 {
1119 struct tcq_if *tif = ifq->ifcq_disc;
1120
1121 IFCQ_LOCK_ASSERT_HELD(ifq);
1122 VERIFY(ifq->ifcq_type == PKTSCHEDT_TCQ);
1123
1124 if (slot >= IFCQ_SC_MAX)
1125 return (EINVAL);
1126
1127 return (tcq_get_class_stats(tif, ifq->ifcq_disc_slots[slot].qid,
1128 &ifqs->ifqs_tcq_stats));
1129 }
1130
1131 static int
1132 tcq_throttle(struct tcq_if *tif, cqrq_throttle_t *tr)
1133 {
1134 struct ifclassq *ifq = tif->tif_ifq;
1135 struct tcq_class *cl;
1136 int err = 0;
1137
1138 IFCQ_LOCK_ASSERT_HELD(ifq);
1139 VERIFY(!(tif->tif_flags & TCQIFF_ALTQ));
1140
1141 if (!tr->set) {
1142 tr->level = tif->tif_throttle;
1143 return (0);
1144 }
1145
1146 if (tr->level == tif->tif_throttle)
1147 return (EALREADY);
1148
1149 /* Current throttling levels only involve BK_SYS class */
1150 cl = ifq->ifcq_disc_slots[SCIDX_BK_SYS].cl;
1151
1152 switch (tr->level) {
1153 case IFNET_THROTTLE_OFF:
1154 err = tcq_resumeq(tif, cl);
1155 break;
1156
1157 case IFNET_THROTTLE_OPPORTUNISTIC:
1158 err = tcq_suspendq(tif, cl);
1159 break;
1160
1161 default:
1162 VERIFY(0);
1163 /* NOTREACHED */
1164 }
1165
1166 if (err == 0 || err == ENXIO) {
1167 if (pktsched_verbose) {
1168 log(LOG_DEBUG, "%s: %s throttling %slevel set %d->%d\n",
1169 if_name(TCQIF_IFP(tif)), tcq_style(tif),
1170 (err == 0) ? "" : "lazy ", tif->tif_throttle,
1171 tr->level);
1172 }
1173 tif->tif_throttle = tr->level;
1174 if (err != 0)
1175 err = 0;
1176 else
1177 tcq_purgeq(tif, cl, 0, NULL, NULL);
1178 } else {
1179 log(LOG_ERR, "%s: %s unable to set throttling level "
1180 "%d->%d [error=%d]\n", if_name(TCQIF_IFP(tif)),
1181 tcq_style(tif), tif->tif_throttle, tr->level, err);
1182 }
1183
1184 return (err);
1185 }
1186
1187 static int
1188 tcq_resumeq(struct tcq_if *tif, struct tcq_class *cl)
1189 {
1190 struct ifclassq *ifq = tif->tif_ifq;
1191 int err = 0;
1192
1193 IFCQ_LOCK_ASSERT_HELD(ifq);
1194
1195 #if CLASSQ_RIO
1196 if (q_is_rio(&cl->cl_q))
1197 err = rio_suspendq(cl->cl_rio, &cl->cl_q, FALSE);
1198 else
1199 #endif /* CLASSQ_RIO */
1200 #if CLASSQ_RED
1201 if (q_is_red(&cl->cl_q))
1202 err = red_suspendq(cl->cl_red, &cl->cl_q, FALSE);
1203 else
1204 #endif /* CLASSQ_RED */
1205 #if CLASSQ_BLUE
1206 if (q_is_blue(&cl->cl_q))
1207 err = blue_suspendq(cl->cl_blue, &cl->cl_q, FALSE);
1208 else
1209 #endif /* CLASSQ_BLUE */
1210 if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
1211 err = sfb_suspendq(cl->cl_sfb, &cl->cl_q, FALSE);
1212
1213 if (err == 0)
1214 qstate(&cl->cl_q) = QS_RUNNING;
1215
1216 return (err);
1217 }
1218
1219 static int
1220 tcq_suspendq(struct tcq_if *tif, struct tcq_class *cl)
1221 {
1222 struct ifclassq *ifq = tif->tif_ifq;
1223 int err = 0;
1224
1225 IFCQ_LOCK_ASSERT_HELD(ifq);
1226
1227 #if CLASSQ_RIO
1228 if (q_is_rio(&cl->cl_q))
1229 err = rio_suspendq(cl->cl_rio, &cl->cl_q, TRUE);
1230 else
1231 #endif /* CLASSQ_RIO */
1232 #if CLASSQ_RED
1233 if (q_is_red(&cl->cl_q))
1234 err = red_suspendq(cl->cl_red, &cl->cl_q, TRUE);
1235 else
1236 #endif /* CLASSQ_RED */
1237 #if CLASSQ_BLUE
1238 if (q_is_blue(&cl->cl_q))
1239 err = blue_suspendq(cl->cl_blue, &cl->cl_q, TRUE);
1240 else
1241 #endif /* CLASSQ_BLUE */
1242 if (q_is_sfb(&cl->cl_q)) {
1243 if (cl->cl_sfb != NULL) {
1244 err = sfb_suspendq(cl->cl_sfb, &cl->cl_q, TRUE);
1245 } else {
1246 VERIFY(cl->cl_flags & TQCF_LAZY);
1247 err = ENXIO; /* delayed throttling */
1248 }
1249 }
1250
1251 if (err == 0 || err == ENXIO)
1252 qstate(&cl->cl_q) = QS_SUSPENDED;
1253
1254 return (err);
1255 }