]>
Commit | Line | Data |
---|---|---|
316670eb | 1 | /* |
5ba3f43e | 2 | * Copyright (c) 2011-2017 Apple Inc. All rights reserved. |
316670eb A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <sys/cdefs.h> | |
30 | #include <sys/param.h> | |
31 | #include <sys/mbuf.h> | |
32 | #include <sys/errno.h> | |
33 | #include <sys/random.h> | |
34 | #include <sys/kernel_types.h> | |
35 | #include <sys/sysctl.h> | |
36 | ||
37 | #include <kern/zalloc.h> | |
38 | ||
39 | #include <net/if.h> | |
40 | #include <net/net_osdep.h> | |
41 | #include <net/classq/classq.h> | |
39037602 | 42 | #include <pexpert/pexpert.h> |
316670eb | 43 | #include <net/classq/classq_sfb.h> |
a39ff7e2 | 44 | #include <net/classq/classq_fq_codel.h> |
316670eb | 45 | #include <net/pktsched/pktsched.h> |
39037602 | 46 | #include <net/pktsched/pktsched_fq_codel.h> |
316670eb A |
47 | |
48 | #include <libkern/libkern.h> | |
49 | ||
316670eb A |
50 | |
51 | static errno_t ifclassq_dequeue_common(struct ifclassq *, mbuf_svc_class_t, | |
5ba3f43e A |
52 | u_int32_t, u_int32_t, void **, void **, u_int32_t *, u_int32_t *, |
53 | boolean_t, classq_pkt_type_t *); | |
54 | static void *ifclassq_tbr_dequeue_common(struct ifclassq *, mbuf_svc_class_t, | |
55 | boolean_t, classq_pkt_type_t *); | |
316670eb | 56 | |
39037602 A |
57 | static u_int64_t ifclassq_target_qdelay = 0; |
58 | SYSCTL_QUAD(_net_classq, OID_AUTO, target_qdelay, CTLFLAG_RW|CTLFLAG_LOCKED, | |
59 | &ifclassq_target_qdelay, "target queue delay in nanoseconds"); | |
60 | ||
61 | static u_int64_t ifclassq_update_interval = 0; | |
62 | SYSCTL_QUAD(_net_classq, OID_AUTO, update_interval, | |
63 | CTLFLAG_RW|CTLFLAG_LOCKED, &ifclassq_update_interval, | |
64 | "update interval in nanoseconds"); | |
65 | ||
66 | static int32_t ifclassq_sched_fq_codel; | |
67 | ||
316670eb A |
68 | void |
69 | classq_init(void) | |
70 | { | |
71 | _CASSERT(MBUF_TC_BE == 0); | |
72 | _CASSERT(MBUF_SC_BE == 0); | |
73 | _CASSERT(IFCQ_SC_MAX == MBUF_SC_MAX_CLASSES); | |
74 | ||
316670eb | 75 | sfb_init(); |
39037602 A |
76 | fq_codel_scheduler_init(); |
77 | ||
78 | if (!PE_parse_boot_argn("fq_codel", &ifclassq_sched_fq_codel, | |
79 | sizeof (ifclassq_sched_fq_codel))) | |
5ba3f43e | 80 | ifclassq_sched_fq_codel = 1; |
316670eb A |
81 | } |
82 | ||
83 | int | |
84 | ifclassq_setup(struct ifnet *ifp, u_int32_t sflags, boolean_t reuse) | |
85 | { | |
86 | #pragma unused(reuse) | |
87 | struct ifclassq *ifq = &ifp->if_snd; | |
88 | int err = 0; | |
89 | ||
90 | IFCQ_LOCK(ifq); | |
91 | VERIFY(IFCQ_IS_EMPTY(ifq)); | |
92 | ifq->ifcq_ifp = ifp; | |
93 | IFCQ_LEN(ifq) = 0; | |
3e170ce0 | 94 | IFCQ_BYTES(ifq) = 0; |
316670eb A |
95 | bzero(&ifq->ifcq_xmitcnt, sizeof (ifq->ifcq_xmitcnt)); |
96 | bzero(&ifq->ifcq_dropcnt, sizeof (ifq->ifcq_dropcnt)); | |
97 | ||
98 | VERIFY(!IFCQ_TBR_IS_ENABLED(ifq)); | |
99 | VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE); | |
100 | VERIFY(ifq->ifcq_flags == 0); | |
101 | VERIFY(ifq->ifcq_sflags == 0); | |
102 | VERIFY(ifq->ifcq_disc == NULL); | |
103 | VERIFY(ifq->ifcq_enqueue == NULL); | |
104 | VERIFY(ifq->ifcq_dequeue == NULL); | |
105 | VERIFY(ifq->ifcq_dequeue_sc == NULL); | |
106 | VERIFY(ifq->ifcq_request == NULL); | |
107 | ||
108 | if (ifp->if_eflags & IFEF_TXSTART) { | |
109 | u_int32_t maxlen = 0; | |
110 | ||
111 | if ((maxlen = IFCQ_MAXLEN(ifq)) == 0) | |
112 | maxlen = if_sndq_maxlen; | |
113 | IFCQ_SET_MAXLEN(ifq, maxlen); | |
114 | ||
fe8ab488 A |
115 | if (IFCQ_MAXLEN(ifq) != if_sndq_maxlen && |
116 | IFCQ_TARGET_QDELAY(ifq) == 0) { | |
117 | /* | |
118 | * Choose static queues because the interface has | |
119 | * maximum queue size set | |
120 | */ | |
121 | sflags &= ~PKTSCHEDF_QALG_DELAYBASED; | |
122 | } | |
316670eb A |
123 | ifq->ifcq_sflags = sflags; |
124 | err = ifclassq_pktsched_setup(ifq); | |
125 | if (err == 0) | |
126 | ifq->ifcq_flags = (IFCQF_READY | IFCQF_ENABLED); | |
127 | } | |
316670eb | 128 | IFCQ_UNLOCK(ifq); |
316670eb A |
129 | return (err); |
130 | } | |
131 | ||
132 | void | |
133 | ifclassq_teardown(struct ifnet *ifp) | |
134 | { | |
135 | struct ifclassq *ifq = &ifp->if_snd; | |
136 | ||
137 | IFCQ_LOCK(ifq); | |
316670eb A |
138 | |
139 | if (IFCQ_IS_READY(ifq)) { | |
140 | if (IFCQ_TBR_IS_ENABLED(ifq)) { | |
141 | struct tb_profile tb = { 0, 0, 0 }; | |
142 | (void) ifclassq_tbr_set(ifq, &tb, FALSE); | |
143 | } | |
144 | (void) pktsched_teardown(ifq); | |
145 | ifq->ifcq_flags = 0; | |
146 | } | |
147 | ifq->ifcq_sflags = 0; | |
148 | ||
149 | VERIFY(IFCQ_IS_EMPTY(ifq)); | |
150 | VERIFY(!IFCQ_TBR_IS_ENABLED(ifq)); | |
151 | VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE); | |
152 | VERIFY(ifq->ifcq_flags == 0); | |
153 | VERIFY(ifq->ifcq_sflags == 0); | |
154 | VERIFY(ifq->ifcq_disc == NULL); | |
155 | VERIFY(ifq->ifcq_enqueue == NULL); | |
156 | VERIFY(ifq->ifcq_dequeue == NULL); | |
157 | VERIFY(ifq->ifcq_dequeue_sc == NULL); | |
158 | VERIFY(ifq->ifcq_request == NULL); | |
159 | IFCQ_LEN(ifq) = 0; | |
3e170ce0 | 160 | IFCQ_BYTES(ifq) = 0; |
316670eb A |
161 | IFCQ_MAXLEN(ifq) = 0; |
162 | bzero(&ifq->ifcq_xmitcnt, sizeof (ifq->ifcq_xmitcnt)); | |
163 | bzero(&ifq->ifcq_dropcnt, sizeof (ifq->ifcq_dropcnt)); | |
164 | ||
165 | IFCQ_UNLOCK(ifq); | |
166 | } | |
167 | ||
168 | int | |
169 | ifclassq_pktsched_setup(struct ifclassq *ifq) | |
170 | { | |
171 | struct ifnet *ifp = ifq->ifcq_ifp; | |
5ba3f43e | 172 | classq_pkt_type_t ptype = QP_MBUF; |
316670eb A |
173 | int err = 0; |
174 | ||
175 | IFCQ_LOCK_ASSERT_HELD(ifq); | |
176 | VERIFY(ifp->if_eflags & IFEF_TXSTART); | |
177 | ||
178 | switch (ifp->if_output_sched_model) { | |
179 | case IFNET_SCHED_MODEL_DRIVER_MANAGED: | |
5ba3f43e A |
180 | if (ifclassq_sched_fq_codel != 0) { |
181 | err = pktsched_setup(ifq, PKTSCHEDT_FQ_CODEL, | |
182 | ifq->ifcq_sflags, ptype); | |
183 | } else { | |
184 | err = pktsched_setup(ifq, PKTSCHEDT_TCQ, | |
185 | ifq->ifcq_sflags, ptype); | |
186 | } | |
316670eb A |
187 | break; |
188 | ||
189 | case IFNET_SCHED_MODEL_NORMAL: | |
39037602 A |
190 | if (ifclassq_sched_fq_codel != 0) { |
191 | err = pktsched_setup(ifq, PKTSCHEDT_FQ_CODEL, | |
5ba3f43e | 192 | ifq->ifcq_sflags, ptype); |
39037602 A |
193 | } else { |
194 | err = pktsched_setup(ifq, PKTSCHEDT_QFQ, | |
5ba3f43e | 195 | ifq->ifcq_sflags, ptype); |
39037602 A |
196 | } |
197 | break; | |
198 | case IFNET_SCHED_MODEL_FQ_CODEL: | |
199 | err = pktsched_setup(ifq, PKTSCHEDT_FQ_CODEL, | |
5ba3f43e | 200 | ifq->ifcq_sflags, ptype); |
316670eb | 201 | break; |
316670eb A |
202 | default: |
203 | VERIFY(0); | |
204 | /* NOTREACHED */ | |
205 | } | |
206 | ||
207 | return (err); | |
208 | } | |
209 | ||
210 | void | |
211 | ifclassq_set_maxlen(struct ifclassq *ifq, u_int32_t maxqlen) | |
212 | { | |
213 | IFCQ_LOCK(ifq); | |
214 | if (maxqlen == 0) | |
215 | maxqlen = if_sndq_maxlen; | |
216 | IFCQ_SET_MAXLEN(ifq, maxqlen); | |
217 | IFCQ_UNLOCK(ifq); | |
218 | } | |
219 | ||
220 | u_int32_t | |
221 | ifclassq_get_maxlen(struct ifclassq *ifq) | |
222 | { | |
223 | return (IFCQ_MAXLEN(ifq)); | |
224 | } | |
225 | ||
39236c6e A |
226 | int |
227 | ifclassq_get_len(struct ifclassq *ifq, mbuf_svc_class_t sc, u_int32_t *packets, | |
228 | u_int32_t *bytes) | |
316670eb | 229 | { |
39236c6e A |
230 | int err = 0; |
231 | ||
232 | IFCQ_LOCK(ifq); | |
233 | if (sc == MBUF_SC_UNSPEC) { | |
234 | VERIFY(packets != NULL); | |
235 | *packets = IFCQ_LEN(ifq); | |
236 | } else { | |
237 | VERIFY(MBUF_VALID_SC(sc)); | |
238 | VERIFY(packets != NULL && bytes != NULL); | |
239 | IFCQ_LEN_SC(ifq, sc, packets, bytes, err); | |
240 | } | |
241 | IFCQ_UNLOCK(ifq); | |
242 | ||
243 | return (err); | |
316670eb A |
244 | } |
245 | ||
5ba3f43e A |
246 | inline void |
247 | ifclassq_set_packet_metadata(struct ifclassq *ifq, struct ifnet *ifp, | |
248 | void *p, classq_pkt_type_t ptype) | |
249 | { | |
250 | if (!IFNET_IS_CELLULAR(ifp)) | |
251 | return; | |
252 | ||
253 | switch (ptype) { | |
254 | case QP_MBUF: { | |
255 | struct mbuf *m = p; | |
256 | m->m_pkthdr.pkt_flags |= PKTF_VALID_UNSENT_DATA; | |
257 | m->m_pkthdr.bufstatus_if = IFCQ_BYTES(ifq); | |
258 | m->m_pkthdr.bufstatus_sndbuf = ifp->if_sndbyte_unsent; | |
259 | break; | |
260 | } | |
261 | ||
262 | ||
263 | default: | |
264 | VERIFY(0); | |
265 | /* NOTREACHED */ | |
266 | } | |
267 | } | |
268 | ||
316670eb | 269 | errno_t |
5ba3f43e A |
270 | ifclassq_enqueue(struct ifclassq *ifq, void *p, classq_pkt_type_t ptype, |
271 | boolean_t *pdrop) | |
316670eb A |
272 | { |
273 | errno_t err; | |
274 | ||
5ba3f43e A |
275 | switch (ptype) { |
276 | case QP_MBUF: | |
277 | IFCQ_LOCK_SPIN(ifq); | |
278 | break; | |
316670eb | 279 | |
5ba3f43e A |
280 | default: |
281 | IFCQ_LOCK(ifq); | |
282 | break; | |
316670eb | 283 | } |
316670eb | 284 | |
5ba3f43e | 285 | IFCQ_ENQUEUE(ifq, p, ptype, err, pdrop); |
316670eb | 286 | IFCQ_UNLOCK(ifq); |
316670eb A |
287 | return (err); |
288 | } | |
289 | ||
290 | errno_t | |
39037602 | 291 | ifclassq_dequeue(struct ifclassq *ifq, u_int32_t pkt_limit, |
5ba3f43e A |
292 | u_int32_t byte_limit, void **head, void **tail, |
293 | u_int32_t *cnt, u_int32_t *len, classq_pkt_type_t *ptype) | |
316670eb | 294 | { |
39037602 | 295 | return (ifclassq_dequeue_common(ifq, MBUF_SC_UNSPEC, pkt_limit, |
5ba3f43e | 296 | byte_limit, head, tail, cnt, len, FALSE, ptype)); |
316670eb A |
297 | } |
298 | ||
299 | errno_t | |
300 | ifclassq_dequeue_sc(struct ifclassq *ifq, mbuf_svc_class_t sc, | |
5ba3f43e A |
301 | u_int32_t pkt_limit, u_int32_t byte_limit, void **head, void **tail, |
302 | u_int32_t *cnt, u_int32_t *len, classq_pkt_type_t *ptype) | |
316670eb | 303 | { |
5ba3f43e A |
304 | return (ifclassq_dequeue_common(ifq, sc, pkt_limit, byte_limit, |
305 | head, tail, cnt, len, TRUE, ptype)); | |
316670eb A |
306 | } |
307 | ||
308 | static errno_t | |
309 | ifclassq_dequeue_common(struct ifclassq *ifq, mbuf_svc_class_t sc, | |
5ba3f43e A |
310 | u_int32_t pkt_limit, u_int32_t byte_limit, void **head, |
311 | void **tail, u_int32_t *cnt, u_int32_t *len, boolean_t drvmgt, | |
312 | classq_pkt_type_t *ptype) | |
316670eb A |
313 | { |
314 | struct ifnet *ifp = ifq->ifcq_ifp; | |
5ba3f43e A |
315 | u_int32_t i = 0, l = 0, lock_spin = 1 ; |
316 | void **first, *last; | |
316670eb A |
317 | |
318 | VERIFY(!drvmgt || MBUF_VALID_SC(sc)); | |
319 | ||
5ba3f43e A |
320 | *ptype = 0; |
321 | ||
322 | ||
323 | if (IFCQ_TBR_IS_ENABLED(ifq)) | |
324 | goto dequeue_loop; | |
325 | ||
39037602 A |
326 | /* |
327 | * If the scheduler support dequeueing multiple packets at the | |
328 | * same time, call that one instead. | |
329 | */ | |
5ba3f43e A |
330 | if (drvmgt && ifq->ifcq_dequeue_sc_multi != NULL) { |
331 | int err; | |
39037602 | 332 | |
5ba3f43e A |
333 | if (lock_spin) |
334 | IFCQ_LOCK_SPIN(ifq); | |
335 | else | |
336 | IFCQ_LOCK(ifq); | |
337 | err = ifq->ifcq_dequeue_sc_multi(ifq, sc, pkt_limit, | |
338 | byte_limit, head, tail, cnt, len, ptype); | |
339 | IFCQ_UNLOCK(ifq); | |
340 | ||
341 | if (err == 0 && (*head) == NULL) | |
342 | err = EAGAIN; | |
343 | return (err); | |
344 | } else if (ifq->ifcq_dequeue_multi != NULL) { | |
39037602 | 345 | int err; |
5ba3f43e A |
346 | |
347 | if (lock_spin) | |
348 | IFCQ_LOCK_SPIN(ifq); | |
349 | else | |
350 | IFCQ_LOCK(ifq); | |
351 | ||
352 | err = ifq->ifcq_dequeue_multi(ifq, pkt_limit, byte_limit, | |
353 | head, tail, cnt, len, ptype); | |
39037602 A |
354 | IFCQ_UNLOCK(ifq); |
355 | ||
356 | if (err == 0 && (*head) == NULL) | |
357 | err = EAGAIN; | |
358 | return (err); | |
359 | } | |
360 | ||
5ba3f43e | 361 | dequeue_loop: |
316670eb A |
362 | *head = NULL; |
363 | first = &(*head); | |
364 | last = NULL; | |
365 | ||
5ba3f43e A |
366 | if (lock_spin) |
367 | IFCQ_LOCK_SPIN(ifq); | |
368 | else | |
369 | IFCQ_LOCK(ifq); | |
316670eb | 370 | |
39037602 | 371 | while (i < pkt_limit && l < byte_limit) { |
5ba3f43e | 372 | classq_pkt_type_t tmp_ptype; |
316670eb A |
373 | if (drvmgt) { |
374 | if (IFCQ_TBR_IS_ENABLED(ifq)) | |
5ba3f43e | 375 | IFCQ_TBR_DEQUEUE_SC(ifq, sc, *head, &tmp_ptype); |
316670eb | 376 | else |
5ba3f43e | 377 | IFCQ_DEQUEUE_SC(ifq, sc, *head, &tmp_ptype); |
316670eb A |
378 | } else { |
379 | if (IFCQ_TBR_IS_ENABLED(ifq)) | |
5ba3f43e | 380 | IFCQ_TBR_DEQUEUE(ifq, *head, &tmp_ptype); |
316670eb | 381 | else |
5ba3f43e | 382 | IFCQ_DEQUEUE(ifq, *head, &tmp_ptype); |
316670eb | 383 | } |
316670eb A |
384 | |
385 | if (*head == NULL) | |
386 | break; | |
387 | ||
5ba3f43e A |
388 | switch (tmp_ptype) { |
389 | case QP_MBUF: | |
390 | (*((mbuf_t *)head))->m_nextpkt = NULL; | |
391 | last = *head; | |
392 | l += (*((mbuf_t *)head))->m_pkthdr.len; | |
393 | ifclassq_set_packet_metadata(ifq, ifp, (*head), | |
394 | QP_MBUF); | |
395 | head = (void **)&(*((mbuf_t *)head))->m_nextpkt; | |
396 | break; | |
316670eb | 397 | |
316670eb | 398 | |
5ba3f43e A |
399 | default: |
400 | VERIFY(0); | |
401 | /* NOTREACHED */ | |
3e170ce0 | 402 | } |
5ba3f43e A |
403 | |
404 | *ptype = tmp_ptype; | |
316670eb A |
405 | i++; |
406 | } | |
407 | ||
408 | IFCQ_UNLOCK(ifq); | |
409 | ||
410 | if (tail != NULL) | |
411 | *tail = last; | |
412 | if (cnt != NULL) | |
413 | *cnt = i; | |
414 | if (len != NULL) | |
415 | *len = l; | |
416 | ||
417 | return ((*first != NULL) ? 0 : EAGAIN); | |
418 | } | |
419 | ||
316670eb A |
420 | void |
421 | ifclassq_update(struct ifclassq *ifq, cqev_t ev) | |
422 | { | |
423 | IFCQ_LOCK_ASSERT_HELD(ifq); | |
424 | VERIFY(IFCQ_IS_READY(ifq)); | |
316670eb A |
425 | IFCQ_UPDATE(ifq, ev); |
426 | } | |
427 | ||
428 | int | |
429 | ifclassq_attach(struct ifclassq *ifq, u_int32_t type, void *discipline, | |
430 | ifclassq_enq_func enqueue, ifclassq_deq_func dequeue, | |
39037602 | 431 | ifclassq_deq_sc_func dequeue_sc, ifclassq_deq_multi_func dequeue_multi, |
5ba3f43e | 432 | ifclassq_deq_sc_multi_func dequeue_sc_multi, ifclassq_req_func request) |
316670eb A |
433 | { |
434 | IFCQ_LOCK_ASSERT_HELD(ifq); | |
435 | ||
436 | VERIFY(ifq->ifcq_disc == NULL); | |
437 | VERIFY(enqueue != NULL); | |
316670eb A |
438 | VERIFY(request != NULL); |
439 | ||
440 | ifq->ifcq_type = type; | |
441 | ifq->ifcq_disc = discipline; | |
442 | ifq->ifcq_enqueue = enqueue; | |
443 | ifq->ifcq_dequeue = dequeue; | |
444 | ifq->ifcq_dequeue_sc = dequeue_sc; | |
39037602 | 445 | ifq->ifcq_dequeue_multi = dequeue_multi; |
5ba3f43e | 446 | ifq->ifcq_dequeue_sc_multi = dequeue_sc_multi; |
316670eb A |
447 | ifq->ifcq_request = request; |
448 | ||
449 | return (0); | |
450 | } | |
451 | ||
452 | int | |
453 | ifclassq_detach(struct ifclassq *ifq) | |
454 | { | |
455 | IFCQ_LOCK_ASSERT_HELD(ifq); | |
456 | ||
457 | VERIFY(ifq->ifcq_disc == NULL); | |
458 | ||
459 | ifq->ifcq_type = PKTSCHEDT_NONE; | |
460 | ifq->ifcq_disc = NULL; | |
461 | ifq->ifcq_enqueue = NULL; | |
462 | ifq->ifcq_dequeue = NULL; | |
463 | ifq->ifcq_dequeue_sc = NULL; | |
464 | ifq->ifcq_request = NULL; | |
465 | ||
466 | return (0); | |
467 | } | |
468 | ||
469 | int | |
470 | ifclassq_getqstats(struct ifclassq *ifq, u_int32_t qid, void *ubuf, | |
471 | u_int32_t *nbytes) | |
472 | { | |
473 | struct if_ifclassq_stats *ifqs; | |
474 | int err; | |
475 | ||
476 | if (*nbytes < sizeof (*ifqs)) | |
477 | return (EINVAL); | |
478 | ||
479 | ifqs = _MALLOC(sizeof (*ifqs), M_TEMP, M_WAITOK | M_ZERO); | |
480 | if (ifqs == NULL) | |
481 | return (ENOMEM); | |
482 | ||
483 | IFCQ_LOCK(ifq); | |
484 | if (!IFCQ_IS_READY(ifq)) { | |
485 | IFCQ_UNLOCK(ifq); | |
486 | _FREE(ifqs, M_TEMP); | |
487 | return (ENXIO); | |
488 | } | |
489 | ||
490 | ifqs->ifqs_len = IFCQ_LEN(ifq); | |
491 | ifqs->ifqs_maxlen = IFCQ_MAXLEN(ifq); | |
492 | *(&ifqs->ifqs_xmitcnt) = *(&ifq->ifcq_xmitcnt); | |
493 | *(&ifqs->ifqs_dropcnt) = *(&ifq->ifcq_dropcnt); | |
494 | ifqs->ifqs_scheduler = ifq->ifcq_type; | |
495 | ||
496 | err = pktsched_getqstats(ifq, qid, ifqs); | |
497 | IFCQ_UNLOCK(ifq); | |
498 | ||
499 | if (err == 0 && (err = copyout((caddr_t)ifqs, | |
500 | (user_addr_t)(uintptr_t)ubuf, sizeof (*ifqs))) == 0) | |
501 | *nbytes = sizeof (*ifqs); | |
502 | ||
503 | _FREE(ifqs, M_TEMP); | |
504 | ||
505 | return (err); | |
506 | } | |
507 | ||
508 | const char * | |
509 | ifclassq_ev2str(cqev_t ev) | |
510 | { | |
511 | const char *c; | |
512 | ||
513 | switch (ev) { | |
39236c6e A |
514 | case CLASSQ_EV_LINK_BANDWIDTH: |
515 | c = "LINK_BANDWIDTH"; | |
516 | break; | |
517 | ||
518 | case CLASSQ_EV_LINK_LATENCY: | |
519 | c = "LINK_LATENCY"; | |
316670eb A |
520 | break; |
521 | ||
522 | case CLASSQ_EV_LINK_MTU: | |
523 | c = "LINK_MTU"; | |
524 | break; | |
525 | ||
526 | case CLASSQ_EV_LINK_UP: | |
527 | c = "LINK_UP"; | |
528 | break; | |
529 | ||
530 | case CLASSQ_EV_LINK_DOWN: | |
531 | c = "LINK_DOWN"; | |
532 | break; | |
533 | ||
534 | default: | |
535 | c = "UNKNOWN"; | |
536 | break; | |
537 | } | |
538 | ||
539 | return (c); | |
540 | } | |
541 | ||
542 | /* | |
543 | * internal representation of token bucket parameters | |
544 | * rate: byte_per_unittime << 32 | |
545 | * (((bits_per_sec) / 8) << 32) / machclk_freq | |
546 | * depth: byte << 32 | |
547 | * | |
548 | */ | |
549 | #define TBR_SHIFT 32 | |
550 | #define TBR_SCALE(x) ((int64_t)(x) << TBR_SHIFT) | |
551 | #define TBR_UNSCALE(x) ((x) >> TBR_SHIFT) | |
552 | ||
5ba3f43e A |
553 | void * |
554 | ifclassq_tbr_dequeue(struct ifclassq *ifq, classq_pkt_type_t *ptype) | |
316670eb | 555 | { |
5ba3f43e | 556 | return (ifclassq_tbr_dequeue_common(ifq, MBUF_SC_UNSPEC, FALSE, ptype)); |
316670eb A |
557 | } |
558 | ||
5ba3f43e A |
559 | void * |
560 | ifclassq_tbr_dequeue_sc(struct ifclassq *ifq, mbuf_svc_class_t sc, | |
561 | classq_pkt_type_t *ptype) | |
316670eb | 562 | { |
5ba3f43e | 563 | return (ifclassq_tbr_dequeue_common(ifq, sc, TRUE, ptype)); |
316670eb A |
564 | } |
565 | ||
5ba3f43e A |
566 | static void * |
567 | ifclassq_tbr_dequeue_common(struct ifclassq *ifq, mbuf_svc_class_t sc, | |
568 | boolean_t drvmgt, classq_pkt_type_t *ptype) | |
316670eb A |
569 | { |
570 | struct tb_regulator *tbr; | |
5ba3f43e | 571 | void *p; |
316670eb A |
572 | int64_t interval; |
573 | u_int64_t now; | |
574 | ||
575 | IFCQ_LOCK_ASSERT_HELD(ifq); | |
576 | ||
577 | VERIFY(!drvmgt || MBUF_VALID_SC(sc)); | |
578 | VERIFY(IFCQ_TBR_IS_ENABLED(ifq)); | |
579 | ||
580 | tbr = &ifq->ifcq_tbr; | |
5ba3f43e A |
581 | /* update token only when it is negative */ |
582 | if (tbr->tbr_token <= 0) { | |
583 | now = read_machclk(); | |
584 | interval = now - tbr->tbr_last; | |
585 | if (interval >= tbr->tbr_filluptime) { | |
586 | tbr->tbr_token = tbr->tbr_depth; | |
587 | } else { | |
588 | tbr->tbr_token += interval * tbr->tbr_rate; | |
589 | if (tbr->tbr_token > tbr->tbr_depth) | |
316670eb | 590 | tbr->tbr_token = tbr->tbr_depth; |
316670eb | 591 | } |
5ba3f43e | 592 | tbr->tbr_last = now; |
316670eb | 593 | } |
5ba3f43e A |
594 | /* if token is still negative, don't allow dequeue */ |
595 | if (tbr->tbr_token <= 0) | |
596 | return (NULL); | |
316670eb A |
597 | |
598 | /* | |
599 | * ifclassq takes precedence over ALTQ queue; | |
600 | * ifcq_drain count is adjusted by the caller. | |
601 | */ | |
5ba3f43e A |
602 | if (drvmgt) |
603 | IFCQ_DEQUEUE_SC(ifq, sc, p, ptype); | |
604 | else | |
605 | IFCQ_DEQUEUE(ifq, p, ptype); | |
606 | ||
607 | if (p != NULL) { | |
608 | switch (*ptype) { | |
609 | case QP_MBUF: | |
610 | tbr->tbr_token -= TBR_SCALE(m_pktlen((mbuf_t)p)); | |
611 | break; | |
612 | ||
613 | ||
614 | default: | |
615 | VERIFY(0); | |
616 | /* NOTREACHED */ | |
316670eb A |
617 | } |
618 | } | |
316670eb | 619 | |
5ba3f43e | 620 | return (p); |
316670eb A |
621 | } |
622 | ||
623 | /* | |
624 | * set a token bucket regulator. | |
625 | * if the specified rate is zero, the token bucket regulator is deleted. | |
626 | */ | |
627 | int | |
628 | ifclassq_tbr_set(struct ifclassq *ifq, struct tb_profile *profile, | |
629 | boolean_t update) | |
630 | { | |
631 | struct tb_regulator *tbr; | |
632 | struct ifnet *ifp = ifq->ifcq_ifp; | |
633 | u_int64_t rate, old_rate; | |
634 | ||
635 | IFCQ_LOCK_ASSERT_HELD(ifq); | |
636 | VERIFY(IFCQ_IS_READY(ifq)); | |
637 | ||
638 | VERIFY(machclk_freq != 0); | |
639 | ||
640 | tbr = &ifq->ifcq_tbr; | |
641 | old_rate = tbr->tbr_rate_raw; | |
642 | ||
643 | rate = profile->rate; | |
644 | if (profile->percent > 0) { | |
645 | u_int64_t eff_rate; | |
646 | ||
647 | if (profile->percent > 100) | |
648 | return (EINVAL); | |
649 | if ((eff_rate = ifp->if_output_bw.eff_bw) == 0) | |
650 | return (ENODEV); | |
651 | rate = (eff_rate * profile->percent) / 100; | |
652 | } | |
653 | ||
654 | if (rate == 0) { | |
655 | if (!IFCQ_TBR_IS_ENABLED(ifq)) | |
656 | return (ENOENT); | |
657 | ||
658 | if (pktsched_verbose) | |
659 | printf("%s: TBR disabled\n", if_name(ifp)); | |
660 | ||
661 | /* disable this TBR */ | |
662 | ifq->ifcq_flags &= ~IFCQF_TBR; | |
663 | bzero(tbr, sizeof (*tbr)); | |
664 | ifnet_set_start_cycle(ifp, NULL); | |
665 | if (update) | |
39236c6e | 666 | ifclassq_update(ifq, CLASSQ_EV_LINK_BANDWIDTH); |
316670eb A |
667 | return (0); |
668 | } | |
669 | ||
670 | if (pktsched_verbose) { | |
671 | printf("%s: TBR %s (rate %llu bps depth %u)\n", if_name(ifp), | |
672 | (ifq->ifcq_flags & IFCQF_TBR) ? "reconfigured" : | |
673 | "enabled", rate, profile->depth); | |
674 | } | |
675 | ||
676 | /* set the new TBR */ | |
677 | bzero(tbr, sizeof (*tbr)); | |
678 | tbr->tbr_rate_raw = rate; | |
679 | tbr->tbr_percent = profile->percent; | |
680 | ifq->ifcq_flags |= IFCQF_TBR; | |
681 | ||
682 | /* | |
683 | * Note that the TBR fill up time (hence the ifnet restart time) | |
684 | * is directly related to the specified TBR depth. The ideal | |
685 | * depth value should be computed such that the interval time | |
686 | * between each successive wakeup is adequately spaced apart, | |
687 | * in order to reduce scheduling overheads. A target interval | |
688 | * of 10 ms seems to provide good performance balance. This can be | |
689 | * overridden by specifying the depth profile. Values smaller than | |
690 | * the ideal depth will reduce delay at the expense of CPU cycles. | |
691 | */ | |
692 | tbr->tbr_rate = TBR_SCALE(rate / 8) / machclk_freq; | |
693 | if (tbr->tbr_rate > 0) { | |
694 | u_int32_t mtu = ifp->if_mtu; | |
695 | int64_t ival, idepth = 0; | |
696 | int i; | |
697 | ||
698 | if (mtu < IF_MINMTU) | |
699 | mtu = IF_MINMTU; | |
700 | ||
701 | ival = pktsched_nsecs_to_abstime(10 * NSEC_PER_MSEC); /* 10ms */ | |
702 | ||
703 | for (i = 1; ; i++) { | |
704 | idepth = TBR_SCALE(i * mtu); | |
705 | if ((idepth / tbr->tbr_rate) > ival) | |
706 | break; | |
707 | } | |
708 | VERIFY(idepth > 0); | |
709 | ||
710 | tbr->tbr_depth = TBR_SCALE(profile->depth); | |
711 | if (tbr->tbr_depth == 0) { | |
712 | tbr->tbr_filluptime = idepth / tbr->tbr_rate; | |
713 | /* a little fudge factor to get closer to rate */ | |
714 | tbr->tbr_depth = idepth + (idepth >> 3); | |
715 | } else { | |
716 | tbr->tbr_filluptime = tbr->tbr_depth / tbr->tbr_rate; | |
717 | } | |
718 | } else { | |
719 | tbr->tbr_depth = TBR_SCALE(profile->depth); | |
720 | tbr->tbr_filluptime = 0xffffffffffffffffLL; | |
721 | } | |
722 | tbr->tbr_token = tbr->tbr_depth; | |
723 | tbr->tbr_last = read_machclk(); | |
316670eb A |
724 | |
725 | if (tbr->tbr_rate > 0 && (ifp->if_flags & IFF_UP)) { | |
726 | struct timespec ts = | |
727 | { 0, pktsched_abs_to_nsecs(tbr->tbr_filluptime) }; | |
728 | if (pktsched_verbose) { | |
729 | printf("%s: TBR calculated tokens %lld " | |
730 | "filluptime %llu ns\n", if_name(ifp), | |
731 | TBR_UNSCALE(tbr->tbr_token), | |
732 | pktsched_abs_to_nsecs(tbr->tbr_filluptime)); | |
733 | } | |
734 | ifnet_set_start_cycle(ifp, &ts); | |
735 | } else { | |
736 | if (pktsched_verbose) { | |
737 | if (tbr->tbr_rate == 0) { | |
738 | printf("%s: TBR calculated tokens %lld " | |
739 | "infinite filluptime\n", if_name(ifp), | |
740 | TBR_UNSCALE(tbr->tbr_token)); | |
741 | } else if (!(ifp->if_flags & IFF_UP)) { | |
742 | printf("%s: TBR suspended (link is down)\n", | |
743 | if_name(ifp)); | |
744 | } | |
745 | } | |
746 | ifnet_set_start_cycle(ifp, NULL); | |
747 | } | |
748 | if (update && tbr->tbr_rate_raw != old_rate) | |
39236c6e | 749 | ifclassq_update(ifq, CLASSQ_EV_LINK_BANDWIDTH); |
316670eb A |
750 | |
751 | return (0); | |
752 | } | |
39037602 A |
753 | |
754 | void | |
755 | ifclassq_calc_target_qdelay(struct ifnet *ifp, u_int64_t *if_target_qdelay) | |
756 | { | |
5ba3f43e A |
757 | u_int64_t qdelay = 0; |
758 | qdelay = IFCQ_TARGET_QDELAY(&ifp->if_snd); | |
39037602 A |
759 | |
760 | if (ifclassq_target_qdelay != 0) | |
5ba3f43e | 761 | qdelay = ifclassq_target_qdelay; |
39037602 A |
762 | |
763 | /* | |
764 | * If we do not know the effective bandwidth, use the default | |
765 | * target queue delay. | |
766 | */ | |
5ba3f43e A |
767 | if (qdelay == 0) |
768 | qdelay = IFQ_TARGET_DELAY; | |
39037602 A |
769 | |
770 | /* | |
771 | * If a delay has been added to ifnet start callback for | |
772 | * coalescing, we have to add that to the pre-set target delay | |
773 | * because the packets can be in the queue longer. | |
774 | */ | |
775 | if ((ifp->if_eflags & IFEF_ENQUEUE_MULTI) && | |
776 | ifp->if_start_delay_timeout > 0) | |
5ba3f43e | 777 | qdelay += ifp->if_start_delay_timeout; |
39037602 | 778 | |
5ba3f43e | 779 | *(if_target_qdelay) = qdelay; |
39037602 A |
780 | } |
781 | ||
782 | void | |
783 | ifclassq_calc_update_interval(u_int64_t *update_interval) | |
784 | { | |
785 | u_int64_t uint = 0; | |
786 | ||
787 | /* If the system level override is set, use it */ | |
788 | if (ifclassq_update_interval != 0) | |
789 | uint = ifclassq_update_interval; | |
790 | ||
791 | /* Otherwise use the default value */ | |
792 | if (uint == 0) | |
793 | uint = IFQ_UPDATE_INTERVAL; | |
794 | ||
795 | *update_interval = uint; | |
796 | } | |
a39ff7e2 A |
797 | |
798 | void | |
799 | ifclassq_reap_caches(boolean_t purge) | |
800 | { | |
801 | fq_codel_reap_caches(purge); | |
802 | } |