]>
Commit | Line | Data |
---|---|---|
316670eb | 1 | /* |
3e170ce0 | 2 | * Copyright (c) 2011-2015 Apple Inc. All rights reserved. |
316670eb A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <sys/cdefs.h> | |
30 | #include <sys/param.h> | |
31 | #include <sys/mbuf.h> | |
32 | #include <sys/errno.h> | |
33 | #include <sys/random.h> | |
34 | #include <sys/kernel_types.h> | |
35 | #include <sys/sysctl.h> | |
36 | ||
37 | #include <kern/zalloc.h> | |
38 | ||
39 | #include <net/if.h> | |
40 | #include <net/net_osdep.h> | |
41 | #include <net/classq/classq.h> | |
39037602 | 42 | #include <pexpert/pexpert.h> |
316670eb A |
43 | #if CLASSQ_RED |
44 | #include <net/classq/classq_red.h> | |
45 | #endif /* CLASSQ_RED */ | |
46 | #if CLASSQ_RIO | |
47 | #include <net/classq/classq_rio.h> | |
48 | #endif /* CLASSQ_RIO */ | |
49 | #if CLASSQ_BLUE | |
50 | #include <net/classq/classq_blue.h> | |
51 | #endif /* CLASSQ_BLUE */ | |
52 | #include <net/classq/classq_sfb.h> | |
53 | #include <net/pktsched/pktsched.h> | |
39037602 | 54 | #include <net/pktsched/pktsched_fq_codel.h> |
316670eb A |
55 | |
56 | #include <libkern/libkern.h> | |
57 | ||
58 | #if PF_ALTQ | |
59 | #include <net/altq/altq.h> | |
60 | #endif /* PF_ALTQ */ | |
61 | ||
62 | static errno_t ifclassq_dequeue_common(struct ifclassq *, mbuf_svc_class_t, | |
39037602 A |
63 | u_int32_t, u_int32_t, struct mbuf **, struct mbuf **, u_int32_t *, |
64 | u_int32_t *, boolean_t); | |
316670eb A |
65 | static struct mbuf *ifclassq_poll_common(struct ifclassq *, |
66 | mbuf_svc_class_t, boolean_t); | |
67 | static struct mbuf *ifclassq_tbr_dequeue_common(struct ifclassq *, int, | |
68 | mbuf_svc_class_t, boolean_t); | |
69 | ||
39037602 A |
70 | static u_int64_t ifclassq_target_qdelay = 0; |
71 | SYSCTL_QUAD(_net_classq, OID_AUTO, target_qdelay, CTLFLAG_RW|CTLFLAG_LOCKED, | |
72 | &ifclassq_target_qdelay, "target queue delay in nanoseconds"); | |
73 | ||
74 | static u_int64_t ifclassq_update_interval = 0; | |
75 | SYSCTL_QUAD(_net_classq, OID_AUTO, update_interval, | |
76 | CTLFLAG_RW|CTLFLAG_LOCKED, &ifclassq_update_interval, | |
77 | "update interval in nanoseconds"); | |
78 | ||
79 | static int32_t ifclassq_sched_fq_codel; | |
80 | ||
316670eb A |
81 | void |
82 | classq_init(void) | |
83 | { | |
84 | _CASSERT(MBUF_TC_BE == 0); | |
85 | _CASSERT(MBUF_SC_BE == 0); | |
86 | _CASSERT(IFCQ_SC_MAX == MBUF_SC_MAX_CLASSES); | |
87 | ||
88 | #if CLASSQ_RED | |
89 | red_init(); | |
90 | #endif /* CLASSQ_RED */ | |
91 | #if CLASSQ_RIO | |
92 | rio_init(); | |
93 | #endif /* CLASSQ_RIO */ | |
94 | #if CLASSQ_BLUE | |
95 | blue_init(); | |
96 | #endif /* CLASSQ_BLUE */ | |
97 | sfb_init(); | |
39037602 A |
98 | fq_codel_scheduler_init(); |
99 | ||
100 | if (!PE_parse_boot_argn("fq_codel", &ifclassq_sched_fq_codel, | |
101 | sizeof (ifclassq_sched_fq_codel))) | |
102 | ifclassq_sched_fq_codel = 0; | |
316670eb A |
103 | } |
104 | ||
105 | int | |
106 | ifclassq_setup(struct ifnet *ifp, u_int32_t sflags, boolean_t reuse) | |
107 | { | |
108 | #pragma unused(reuse) | |
109 | struct ifclassq *ifq = &ifp->if_snd; | |
110 | int err = 0; | |
111 | ||
112 | IFCQ_LOCK(ifq); | |
113 | VERIFY(IFCQ_IS_EMPTY(ifq)); | |
114 | ifq->ifcq_ifp = ifp; | |
115 | IFCQ_LEN(ifq) = 0; | |
3e170ce0 | 116 | IFCQ_BYTES(ifq) = 0; |
316670eb A |
117 | bzero(&ifq->ifcq_xmitcnt, sizeof (ifq->ifcq_xmitcnt)); |
118 | bzero(&ifq->ifcq_dropcnt, sizeof (ifq->ifcq_dropcnt)); | |
119 | ||
120 | VERIFY(!IFCQ_TBR_IS_ENABLED(ifq)); | |
121 | VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE); | |
122 | VERIFY(ifq->ifcq_flags == 0); | |
123 | VERIFY(ifq->ifcq_sflags == 0); | |
124 | VERIFY(ifq->ifcq_disc == NULL); | |
125 | VERIFY(ifq->ifcq_enqueue == NULL); | |
126 | VERIFY(ifq->ifcq_dequeue == NULL); | |
127 | VERIFY(ifq->ifcq_dequeue_sc == NULL); | |
128 | VERIFY(ifq->ifcq_request == NULL); | |
129 | ||
130 | if (ifp->if_eflags & IFEF_TXSTART) { | |
131 | u_int32_t maxlen = 0; | |
132 | ||
133 | if ((maxlen = IFCQ_MAXLEN(ifq)) == 0) | |
134 | maxlen = if_sndq_maxlen; | |
135 | IFCQ_SET_MAXLEN(ifq, maxlen); | |
136 | ||
fe8ab488 A |
137 | if (IFCQ_MAXLEN(ifq) != if_sndq_maxlen && |
138 | IFCQ_TARGET_QDELAY(ifq) == 0) { | |
139 | /* | |
140 | * Choose static queues because the interface has | |
141 | * maximum queue size set | |
142 | */ | |
143 | sflags &= ~PKTSCHEDF_QALG_DELAYBASED; | |
144 | } | |
316670eb A |
145 | ifq->ifcq_sflags = sflags; |
146 | err = ifclassq_pktsched_setup(ifq); | |
147 | if (err == 0) | |
148 | ifq->ifcq_flags = (IFCQF_READY | IFCQF_ENABLED); | |
149 | } | |
150 | ||
151 | #if PF_ALTQ | |
152 | ifq->ifcq_drain = 0; | |
153 | IFCQ_ALTQ(ifq)->altq_ifcq = ifq; | |
154 | VERIFY(IFCQ_ALTQ(ifq)->altq_type == ALTQT_NONE); | |
155 | VERIFY(IFCQ_ALTQ(ifq)->altq_flags == 0); | |
156 | VERIFY(IFCQ_ALTQ(ifq)->altq_disc == NULL); | |
157 | VERIFY(IFCQ_ALTQ(ifq)->altq_enqueue == NULL); | |
158 | VERIFY(IFCQ_ALTQ(ifq)->altq_dequeue == NULL); | |
159 | VERIFY(IFCQ_ALTQ(ifq)->altq_dequeue_sc == NULL); | |
160 | VERIFY(IFCQ_ALTQ(ifq)->altq_request == NULL); | |
161 | ||
162 | if ((ifp->if_eflags & IFEF_TXSTART) && | |
163 | ifp->if_output_sched_model != IFNET_SCHED_MODEL_DRIVER_MANAGED) | |
164 | ALTQ_SET_READY(IFCQ_ALTQ(ifq)); | |
165 | else | |
166 | ALTQ_CLEAR_READY(IFCQ_ALTQ(ifq)); | |
167 | #endif /* PF_ALTQ */ | |
168 | IFCQ_UNLOCK(ifq); | |
169 | ||
170 | return (err); | |
171 | } | |
172 | ||
173 | void | |
174 | ifclassq_teardown(struct ifnet *ifp) | |
175 | { | |
176 | struct ifclassq *ifq = &ifp->if_snd; | |
177 | ||
178 | IFCQ_LOCK(ifq); | |
179 | #if PF_ALTQ | |
180 | if (ALTQ_IS_READY(IFCQ_ALTQ(ifq))) { | |
181 | if (ALTQ_IS_ENABLED(IFCQ_ALTQ(ifq))) | |
182 | altq_disable(IFCQ_ALTQ(ifq)); | |
183 | if (ALTQ_IS_ATTACHED(IFCQ_ALTQ(ifq))) | |
184 | altq_detach(IFCQ_ALTQ(ifq)); | |
185 | IFCQ_ALTQ(ifq)->altq_flags = 0; | |
186 | } | |
187 | ifq->ifcq_drain = 0; | |
188 | IFCQ_ALTQ(ifq)->altq_ifcq = NULL; | |
189 | VERIFY(IFCQ_ALTQ(ifq)->altq_type == ALTQT_NONE); | |
190 | VERIFY(IFCQ_ALTQ(ifq)->altq_flags == 0); | |
191 | VERIFY(IFCQ_ALTQ(ifq)->altq_disc == NULL); | |
192 | VERIFY(IFCQ_ALTQ(ifq)->altq_enqueue == NULL); | |
193 | VERIFY(IFCQ_ALTQ(ifq)->altq_dequeue == NULL); | |
194 | VERIFY(IFCQ_ALTQ(ifq)->altq_dequeue_sc == NULL); | |
195 | VERIFY(IFCQ_ALTQ(ifq)->altq_request == NULL); | |
196 | #endif /* PF_ALTQ */ | |
197 | ||
198 | if (IFCQ_IS_READY(ifq)) { | |
199 | if (IFCQ_TBR_IS_ENABLED(ifq)) { | |
200 | struct tb_profile tb = { 0, 0, 0 }; | |
201 | (void) ifclassq_tbr_set(ifq, &tb, FALSE); | |
202 | } | |
203 | (void) pktsched_teardown(ifq); | |
204 | ifq->ifcq_flags = 0; | |
205 | } | |
206 | ifq->ifcq_sflags = 0; | |
207 | ||
208 | VERIFY(IFCQ_IS_EMPTY(ifq)); | |
209 | VERIFY(!IFCQ_TBR_IS_ENABLED(ifq)); | |
210 | VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE); | |
211 | VERIFY(ifq->ifcq_flags == 0); | |
212 | VERIFY(ifq->ifcq_sflags == 0); | |
213 | VERIFY(ifq->ifcq_disc == NULL); | |
214 | VERIFY(ifq->ifcq_enqueue == NULL); | |
215 | VERIFY(ifq->ifcq_dequeue == NULL); | |
216 | VERIFY(ifq->ifcq_dequeue_sc == NULL); | |
217 | VERIFY(ifq->ifcq_request == NULL); | |
218 | IFCQ_LEN(ifq) = 0; | |
3e170ce0 | 219 | IFCQ_BYTES(ifq) = 0; |
316670eb A |
220 | IFCQ_MAXLEN(ifq) = 0; |
221 | bzero(&ifq->ifcq_xmitcnt, sizeof (ifq->ifcq_xmitcnt)); | |
222 | bzero(&ifq->ifcq_dropcnt, sizeof (ifq->ifcq_dropcnt)); | |
223 | ||
224 | IFCQ_UNLOCK(ifq); | |
225 | } | |
226 | ||
227 | int | |
228 | ifclassq_pktsched_setup(struct ifclassq *ifq) | |
229 | { | |
230 | struct ifnet *ifp = ifq->ifcq_ifp; | |
231 | int err = 0; | |
232 | ||
233 | IFCQ_LOCK_ASSERT_HELD(ifq); | |
234 | VERIFY(ifp->if_eflags & IFEF_TXSTART); | |
235 | ||
236 | switch (ifp->if_output_sched_model) { | |
237 | case IFNET_SCHED_MODEL_DRIVER_MANAGED: | |
238 | err = pktsched_setup(ifq, PKTSCHEDT_TCQ, ifq->ifcq_sflags); | |
239 | break; | |
240 | ||
241 | case IFNET_SCHED_MODEL_NORMAL: | |
39037602 A |
242 | if (ifclassq_sched_fq_codel != 0) { |
243 | err = pktsched_setup(ifq, PKTSCHEDT_FQ_CODEL, | |
244 | ifq->ifcq_sflags); | |
245 | } else { | |
246 | err = pktsched_setup(ifq, PKTSCHEDT_QFQ, | |
247 | ifq->ifcq_sflags); | |
248 | } | |
249 | break; | |
250 | case IFNET_SCHED_MODEL_FQ_CODEL: | |
251 | err = pktsched_setup(ifq, PKTSCHEDT_FQ_CODEL, | |
252 | ifq->ifcq_sflags); | |
316670eb | 253 | break; |
316670eb A |
254 | default: |
255 | VERIFY(0); | |
256 | /* NOTREACHED */ | |
257 | } | |
258 | ||
259 | return (err); | |
260 | } | |
261 | ||
262 | void | |
263 | ifclassq_set_maxlen(struct ifclassq *ifq, u_int32_t maxqlen) | |
264 | { | |
265 | IFCQ_LOCK(ifq); | |
266 | if (maxqlen == 0) | |
267 | maxqlen = if_sndq_maxlen; | |
268 | IFCQ_SET_MAXLEN(ifq, maxqlen); | |
269 | IFCQ_UNLOCK(ifq); | |
270 | } | |
271 | ||
272 | u_int32_t | |
273 | ifclassq_get_maxlen(struct ifclassq *ifq) | |
274 | { | |
275 | return (IFCQ_MAXLEN(ifq)); | |
276 | } | |
277 | ||
39236c6e A |
278 | int |
279 | ifclassq_get_len(struct ifclassq *ifq, mbuf_svc_class_t sc, u_int32_t *packets, | |
280 | u_int32_t *bytes) | |
316670eb | 281 | { |
39236c6e A |
282 | int err = 0; |
283 | ||
284 | IFCQ_LOCK(ifq); | |
285 | if (sc == MBUF_SC_UNSPEC) { | |
286 | VERIFY(packets != NULL); | |
287 | *packets = IFCQ_LEN(ifq); | |
288 | } else { | |
289 | VERIFY(MBUF_VALID_SC(sc)); | |
290 | VERIFY(packets != NULL && bytes != NULL); | |
291 | IFCQ_LEN_SC(ifq, sc, packets, bytes, err); | |
292 | } | |
293 | IFCQ_UNLOCK(ifq); | |
294 | ||
295 | return (err); | |
316670eb A |
296 | } |
297 | ||
298 | errno_t | |
299 | ifclassq_enqueue(struct ifclassq *ifq, struct mbuf *m) | |
300 | { | |
301 | errno_t err; | |
302 | ||
303 | IFCQ_LOCK_SPIN(ifq); | |
304 | ||
305 | #if PF_ALTQ | |
306 | if (ALTQ_IS_ENABLED(IFCQ_ALTQ(ifq))) { | |
307 | ALTQ_ENQUEUE(IFCQ_ALTQ(ifq), m, err); | |
308 | } else { | |
309 | u_int32_t qlen = IFCQ_LEN(ifq); | |
310 | IFCQ_ENQUEUE(ifq, m, err); | |
311 | if (IFCQ_LEN(ifq) > qlen) | |
312 | ifq->ifcq_drain += (IFCQ_LEN(ifq) - qlen); | |
313 | } | |
314 | #else /* !PF_ALTQ */ | |
315 | IFCQ_ENQUEUE(ifq, m, err); | |
316 | #endif /* PF_ALTQ */ | |
317 | ||
318 | IFCQ_UNLOCK(ifq); | |
319 | ||
320 | return (err); | |
321 | } | |
322 | ||
323 | errno_t | |
39037602 A |
324 | ifclassq_dequeue(struct ifclassq *ifq, u_int32_t pkt_limit, |
325 | u_int32_t byte_limit, struct mbuf **head, | |
316670eb A |
326 | struct mbuf **tail, u_int32_t *cnt, u_int32_t *len) |
327 | { | |
39037602 A |
328 | return (ifclassq_dequeue_common(ifq, MBUF_SC_UNSPEC, pkt_limit, |
329 | byte_limit, head, tail, cnt, len, FALSE)); | |
316670eb A |
330 | } |
331 | ||
332 | errno_t | |
333 | ifclassq_dequeue_sc(struct ifclassq *ifq, mbuf_svc_class_t sc, | |
39037602 A |
334 | u_int32_t pkt_limit, struct mbuf **head, struct mbuf **tail, |
335 | u_int32_t *cnt, u_int32_t *len) | |
316670eb | 336 | { |
39037602 A |
337 | return (ifclassq_dequeue_common(ifq, sc, pkt_limit, |
338 | CLASSQ_DEQUEUE_MAX_BYTE_LIMIT, head, tail, cnt, len, TRUE)); | |
316670eb A |
339 | } |
340 | ||
341 | static errno_t | |
342 | ifclassq_dequeue_common(struct ifclassq *ifq, mbuf_svc_class_t sc, | |
39037602 A |
343 | u_int32_t pkt_limit, u_int32_t byte_limit, struct mbuf **head, |
344 | struct mbuf **tail, u_int32_t *cnt, u_int32_t *len, boolean_t drvmgt) | |
316670eb A |
345 | { |
346 | struct ifnet *ifp = ifq->ifcq_ifp; | |
347 | u_int32_t i = 0, l = 0; | |
348 | struct mbuf **first, *last; | |
349 | #if PF_ALTQ | |
350 | struct ifaltq *altq = IFCQ_ALTQ(ifq); | |
351 | boolean_t draining; | |
352 | #endif /* PF_ALTQ */ | |
353 | ||
354 | VERIFY(!drvmgt || MBUF_VALID_SC(sc)); | |
355 | ||
39037602 A |
356 | /* |
357 | * If the scheduler support dequeueing multiple packets at the | |
358 | * same time, call that one instead. | |
359 | */ | |
360 | ||
361 | if (ifq->ifcq_dequeue_multi != NULL) { | |
362 | int err; | |
363 | IFCQ_LOCK_SPIN(ifq); | |
364 | err = ifq->ifcq_dequeue_multi(ifq, CLASSQDQ_REMOVE, | |
365 | pkt_limit, byte_limit, head, tail, cnt, len); | |
366 | IFCQ_UNLOCK(ifq); | |
367 | ||
368 | if (err == 0 && (*head) == NULL) | |
369 | err = EAGAIN; | |
370 | return (err); | |
371 | } | |
372 | ||
316670eb A |
373 | *head = NULL; |
374 | first = &(*head); | |
375 | last = NULL; | |
376 | ||
316670eb A |
377 | IFCQ_LOCK_SPIN(ifq); |
378 | ||
39037602 | 379 | while (i < pkt_limit && l < byte_limit) { |
316670eb A |
380 | #if PF_ALTQ |
381 | u_int32_t qlen; | |
382 | ||
383 | qlen = IFCQ_LEN(ifq); | |
384 | draining = IFCQ_IS_DRAINING(ifq); | |
385 | ||
386 | if (drvmgt) { | |
387 | if (IFCQ_TBR_IS_ENABLED(ifq)) | |
388 | IFCQ_TBR_DEQUEUE_SC(ifq, sc, *head); | |
389 | else if (draining) | |
390 | IFCQ_DEQUEUE_SC(ifq, sc, *head); | |
391 | else if (ALTQ_IS_ENABLED(altq)) | |
392 | ALTQ_DEQUEUE_SC(altq, sc, *head); | |
393 | else | |
394 | *head = NULL; | |
395 | } else { | |
396 | if (IFCQ_TBR_IS_ENABLED(ifq)) | |
397 | IFCQ_TBR_DEQUEUE(ifq, *head); | |
398 | else if (draining) | |
399 | IFCQ_DEQUEUE(ifq, *head); | |
400 | else if (ALTQ_IS_ENABLED(altq)) | |
401 | ALTQ_DEQUEUE(altq, *head); | |
402 | else | |
403 | *head = NULL; | |
404 | } | |
405 | ||
406 | if (draining && *head != NULL) { | |
407 | VERIFY(ifq->ifcq_drain >= (qlen - IFCQ_LEN(ifq))); | |
408 | ifq->ifcq_drain -= (qlen - IFCQ_LEN(ifq)); | |
409 | } | |
410 | #else /* ! PF_ALTQ */ | |
411 | if (drvmgt) { | |
412 | if (IFCQ_TBR_IS_ENABLED(ifq)) | |
413 | IFCQ_TBR_DEQUEUE_SC(ifq, sc, *head); | |
414 | else | |
415 | IFCQ_DEQUEUE_SC(ifq, sc, *head); | |
416 | } else { | |
417 | if (IFCQ_TBR_IS_ENABLED(ifq)) | |
418 | IFCQ_TBR_DEQUEUE(ifq, *head); | |
419 | else | |
420 | IFCQ_DEQUEUE(ifq, *head); | |
421 | } | |
422 | #endif /* !PF_ALTQ */ | |
423 | ||
424 | if (*head == NULL) | |
425 | break; | |
426 | ||
427 | (*head)->m_nextpkt = NULL; | |
428 | last = *head; | |
429 | ||
430 | l += (*head)->m_pkthdr.len; | |
316670eb | 431 | |
39236c6e A |
432 | #if MEASURE_BW |
433 | (*head)->m_pkthdr.pkt_bwseq = | |
3e170ce0 | 434 | atomic_add_64_ov(&(ifp->if_bw.cur_seq), m_pktlen(*head)); |
39236c6e | 435 | #endif /* MEASURE_BW */ |
3e170ce0 A |
436 | if (IFNET_IS_CELLULAR(ifp)) { |
437 | (*head)->m_pkthdr.pkt_flags |= PKTF_VALID_UNSENT_DATA; | |
39037602 A |
438 | (*head)->m_pkthdr.bufstatus_if = IFCQ_BYTES(ifq); |
439 | (*head)->m_pkthdr.bufstatus_sndbuf = ifp->if_sndbyte_unsent; | |
3e170ce0 | 440 | } |
316670eb A |
441 | head = &(*head)->m_nextpkt; |
442 | i++; | |
443 | } | |
444 | ||
445 | IFCQ_UNLOCK(ifq); | |
446 | ||
447 | if (tail != NULL) | |
448 | *tail = last; | |
449 | if (cnt != NULL) | |
450 | *cnt = i; | |
451 | if (len != NULL) | |
452 | *len = l; | |
453 | ||
454 | return ((*first != NULL) ? 0 : EAGAIN); | |
455 | } | |
456 | ||
457 | struct mbuf * | |
458 | ifclassq_poll(struct ifclassq *ifq) | |
459 | { | |
460 | return (ifclassq_poll_common(ifq, MBUF_SC_UNSPEC, FALSE)); | |
461 | } | |
462 | ||
463 | struct mbuf * | |
464 | ifclassq_poll_sc(struct ifclassq *ifq, mbuf_svc_class_t sc) | |
465 | { | |
466 | return (ifclassq_poll_common(ifq, sc, TRUE)); | |
467 | } | |
468 | ||
469 | static struct mbuf * | |
470 | ifclassq_poll_common(struct ifclassq *ifq, mbuf_svc_class_t sc, | |
471 | boolean_t drvmgt) | |
472 | { | |
473 | #if PF_ALTQ | |
474 | struct ifaltq *altq = IFCQ_ALTQ(ifq); | |
475 | #endif /* PF_ALTQ */ | |
476 | struct mbuf *m; | |
477 | ||
478 | VERIFY(!drvmgt || MBUF_VALID_SC(sc)); | |
479 | ||
480 | #if PF_ALTQ | |
481 | if (drvmgt) { | |
482 | if (IFCQ_TBR_IS_ENABLED(ifq)) | |
483 | IFCQ_TBR_POLL_SC(ifq, sc, m); | |
484 | else if (IFCQ_IS_DRAINING(ifq)) | |
485 | IFCQ_POLL_SC(ifq, sc, m); | |
486 | else if (ALTQ_IS_ENABLED(altq)) | |
487 | ALTQ_POLL_SC(altq, sc, m); | |
488 | else | |
489 | m = NULL; | |
490 | } else { | |
491 | if (IFCQ_TBR_IS_ENABLED(ifq)) | |
492 | IFCQ_TBR_POLL(ifq, m); | |
493 | else if (IFCQ_IS_DRAINING(ifq)) | |
494 | IFCQ_POLL(ifq, m); | |
495 | else if (ALTQ_IS_ENABLED(altq)) | |
496 | ALTQ_POLL(altq, m); | |
497 | else | |
498 | m = NULL; | |
499 | } | |
500 | #else /* ! PF_ALTQ */ | |
501 | if (drvmgt) { | |
502 | if (IFCQ_TBR_IS_ENABLED(ifq)) | |
503 | IFCQ_TBR_POLL_SC(ifq, sc, m); | |
504 | else | |
505 | IFCQ_POLL_SC(ifq, sc, m); | |
506 | } else { | |
507 | if (IFCQ_TBR_IS_ENABLED(ifq)) | |
508 | IFCQ_TBR_POLL(ifq, m); | |
509 | else | |
510 | IFCQ_POLL(ifq, m); | |
511 | } | |
512 | #endif /* !PF_ALTQ */ | |
513 | ||
514 | return (m); | |
515 | } | |
516 | ||
517 | void | |
518 | ifclassq_update(struct ifclassq *ifq, cqev_t ev) | |
519 | { | |
520 | IFCQ_LOCK_ASSERT_HELD(ifq); | |
521 | VERIFY(IFCQ_IS_READY(ifq)); | |
522 | ||
523 | #if PF_ALTQ | |
524 | if (ALTQ_IS_ENABLED(IFCQ_ALTQ(ifq))) | |
525 | ALTQ_UPDATE(IFCQ_ALTQ(ifq), ev); | |
526 | #endif /* PF_ALTQ */ | |
527 | IFCQ_UPDATE(ifq, ev); | |
528 | } | |
529 | ||
530 | int | |
531 | ifclassq_attach(struct ifclassq *ifq, u_int32_t type, void *discipline, | |
532 | ifclassq_enq_func enqueue, ifclassq_deq_func dequeue, | |
39037602 A |
533 | ifclassq_deq_sc_func dequeue_sc, ifclassq_deq_multi_func dequeue_multi, |
534 | ifclassq_req_func request) | |
316670eb A |
535 | { |
536 | IFCQ_LOCK_ASSERT_HELD(ifq); | |
537 | ||
538 | VERIFY(ifq->ifcq_disc == NULL); | |
539 | VERIFY(enqueue != NULL); | |
540 | VERIFY(!(dequeue != NULL && dequeue_sc != NULL)); | |
541 | VERIFY(request != NULL); | |
542 | ||
543 | ifq->ifcq_type = type; | |
544 | ifq->ifcq_disc = discipline; | |
545 | ifq->ifcq_enqueue = enqueue; | |
546 | ifq->ifcq_dequeue = dequeue; | |
547 | ifq->ifcq_dequeue_sc = dequeue_sc; | |
39037602 | 548 | ifq->ifcq_dequeue_multi = dequeue_multi; |
316670eb A |
549 | ifq->ifcq_request = request; |
550 | ||
551 | return (0); | |
552 | } | |
553 | ||
554 | int | |
555 | ifclassq_detach(struct ifclassq *ifq) | |
556 | { | |
557 | IFCQ_LOCK_ASSERT_HELD(ifq); | |
558 | ||
559 | VERIFY(ifq->ifcq_disc == NULL); | |
560 | ||
561 | ifq->ifcq_type = PKTSCHEDT_NONE; | |
562 | ifq->ifcq_disc = NULL; | |
563 | ifq->ifcq_enqueue = NULL; | |
564 | ifq->ifcq_dequeue = NULL; | |
565 | ifq->ifcq_dequeue_sc = NULL; | |
566 | ifq->ifcq_request = NULL; | |
567 | ||
568 | return (0); | |
569 | } | |
570 | ||
571 | int | |
572 | ifclassq_getqstats(struct ifclassq *ifq, u_int32_t qid, void *ubuf, | |
573 | u_int32_t *nbytes) | |
574 | { | |
575 | struct if_ifclassq_stats *ifqs; | |
576 | int err; | |
577 | ||
578 | if (*nbytes < sizeof (*ifqs)) | |
579 | return (EINVAL); | |
580 | ||
581 | ifqs = _MALLOC(sizeof (*ifqs), M_TEMP, M_WAITOK | M_ZERO); | |
582 | if (ifqs == NULL) | |
583 | return (ENOMEM); | |
584 | ||
585 | IFCQ_LOCK(ifq); | |
586 | if (!IFCQ_IS_READY(ifq)) { | |
587 | IFCQ_UNLOCK(ifq); | |
588 | _FREE(ifqs, M_TEMP); | |
589 | return (ENXIO); | |
590 | } | |
591 | ||
592 | ifqs->ifqs_len = IFCQ_LEN(ifq); | |
593 | ifqs->ifqs_maxlen = IFCQ_MAXLEN(ifq); | |
594 | *(&ifqs->ifqs_xmitcnt) = *(&ifq->ifcq_xmitcnt); | |
595 | *(&ifqs->ifqs_dropcnt) = *(&ifq->ifcq_dropcnt); | |
596 | ifqs->ifqs_scheduler = ifq->ifcq_type; | |
597 | ||
598 | err = pktsched_getqstats(ifq, qid, ifqs); | |
599 | IFCQ_UNLOCK(ifq); | |
600 | ||
601 | if (err == 0 && (err = copyout((caddr_t)ifqs, | |
602 | (user_addr_t)(uintptr_t)ubuf, sizeof (*ifqs))) == 0) | |
603 | *nbytes = sizeof (*ifqs); | |
604 | ||
605 | _FREE(ifqs, M_TEMP); | |
606 | ||
607 | return (err); | |
608 | } | |
609 | ||
610 | const char * | |
611 | ifclassq_ev2str(cqev_t ev) | |
612 | { | |
613 | const char *c; | |
614 | ||
615 | switch (ev) { | |
39236c6e A |
616 | case CLASSQ_EV_LINK_BANDWIDTH: |
617 | c = "LINK_BANDWIDTH"; | |
618 | break; | |
619 | ||
620 | case CLASSQ_EV_LINK_LATENCY: | |
621 | c = "LINK_LATENCY"; | |
316670eb A |
622 | break; |
623 | ||
624 | case CLASSQ_EV_LINK_MTU: | |
625 | c = "LINK_MTU"; | |
626 | break; | |
627 | ||
628 | case CLASSQ_EV_LINK_UP: | |
629 | c = "LINK_UP"; | |
630 | break; | |
631 | ||
632 | case CLASSQ_EV_LINK_DOWN: | |
633 | c = "LINK_DOWN"; | |
634 | break; | |
635 | ||
636 | default: | |
637 | c = "UNKNOWN"; | |
638 | break; | |
639 | } | |
640 | ||
641 | return (c); | |
642 | } | |
643 | ||
644 | /* | |
645 | * internal representation of token bucket parameters | |
646 | * rate: byte_per_unittime << 32 | |
647 | * (((bits_per_sec) / 8) << 32) / machclk_freq | |
648 | * depth: byte << 32 | |
649 | * | |
650 | */ | |
651 | #define TBR_SHIFT 32 | |
652 | #define TBR_SCALE(x) ((int64_t)(x) << TBR_SHIFT) | |
653 | #define TBR_UNSCALE(x) ((x) >> TBR_SHIFT) | |
654 | ||
655 | struct mbuf * | |
656 | ifclassq_tbr_dequeue(struct ifclassq *ifq, int op) | |
657 | { | |
658 | return (ifclassq_tbr_dequeue_common(ifq, op, MBUF_SC_UNSPEC, FALSE)); | |
659 | } | |
660 | ||
661 | struct mbuf * | |
662 | ifclassq_tbr_dequeue_sc(struct ifclassq *ifq, int op, mbuf_svc_class_t sc) | |
663 | { | |
664 | return (ifclassq_tbr_dequeue_common(ifq, op, sc, TRUE)); | |
665 | } | |
666 | ||
667 | static struct mbuf * | |
668 | ifclassq_tbr_dequeue_common(struct ifclassq *ifq, int op, | |
669 | mbuf_svc_class_t sc, boolean_t drvmgt) | |
670 | { | |
671 | struct tb_regulator *tbr; | |
672 | struct mbuf *m; | |
673 | int64_t interval; | |
674 | u_int64_t now; | |
675 | ||
676 | IFCQ_LOCK_ASSERT_HELD(ifq); | |
677 | ||
678 | VERIFY(!drvmgt || MBUF_VALID_SC(sc)); | |
679 | VERIFY(IFCQ_TBR_IS_ENABLED(ifq)); | |
680 | ||
681 | tbr = &ifq->ifcq_tbr; | |
682 | if (op == CLASSQDQ_REMOVE && tbr->tbr_lastop == CLASSQDQ_POLL) { | |
683 | /* if this is a remove after poll, bypass tbr check */ | |
684 | } else { | |
685 | /* update token only when it is negative */ | |
686 | if (tbr->tbr_token <= 0) { | |
687 | now = read_machclk(); | |
688 | interval = now - tbr->tbr_last; | |
689 | if (interval >= tbr->tbr_filluptime) { | |
690 | tbr->tbr_token = tbr->tbr_depth; | |
691 | } else { | |
692 | tbr->tbr_token += interval * tbr->tbr_rate; | |
693 | if (tbr->tbr_token > tbr->tbr_depth) | |
694 | tbr->tbr_token = tbr->tbr_depth; | |
695 | } | |
696 | tbr->tbr_last = now; | |
697 | } | |
698 | /* if token is still negative, don't allow dequeue */ | |
699 | if (tbr->tbr_token <= 0) | |
700 | return (NULL); | |
701 | } | |
702 | ||
703 | /* | |
704 | * ifclassq takes precedence over ALTQ queue; | |
705 | * ifcq_drain count is adjusted by the caller. | |
706 | */ | |
707 | #if PF_ALTQ | |
708 | if (IFCQ_IS_DRAINING(ifq)) { | |
709 | #endif /* PF_ALTQ */ | |
710 | if (op == CLASSQDQ_POLL) { | |
711 | if (drvmgt) | |
712 | IFCQ_POLL_SC(ifq, sc, m); | |
713 | else | |
714 | IFCQ_POLL(ifq, m); | |
715 | } else { | |
716 | if (drvmgt) | |
717 | IFCQ_DEQUEUE_SC(ifq, sc, m); | |
718 | else | |
719 | IFCQ_DEQUEUE(ifq, m); | |
720 | } | |
721 | #if PF_ALTQ | |
722 | } else { | |
723 | struct ifaltq *altq = IFCQ_ALTQ(ifq); | |
724 | if (ALTQ_IS_ENABLED(altq)) { | |
725 | if (drvmgt) | |
726 | m = (*altq->altq_dequeue_sc)(altq, sc, op); | |
727 | else | |
728 | m = (*altq->altq_dequeue)(altq, op); | |
729 | } else { | |
730 | m = NULL; | |
731 | } | |
732 | } | |
733 | #endif /* PF_ALTQ */ | |
734 | ||
735 | if (m != NULL && op == CLASSQDQ_REMOVE) | |
736 | tbr->tbr_token -= TBR_SCALE(m_pktlen(m)); | |
737 | tbr->tbr_lastop = op; | |
738 | ||
739 | return (m); | |
740 | } | |
741 | ||
742 | /* | |
743 | * set a token bucket regulator. | |
744 | * if the specified rate is zero, the token bucket regulator is deleted. | |
745 | */ | |
746 | int | |
747 | ifclassq_tbr_set(struct ifclassq *ifq, struct tb_profile *profile, | |
748 | boolean_t update) | |
749 | { | |
750 | struct tb_regulator *tbr; | |
751 | struct ifnet *ifp = ifq->ifcq_ifp; | |
752 | u_int64_t rate, old_rate; | |
753 | ||
754 | IFCQ_LOCK_ASSERT_HELD(ifq); | |
755 | VERIFY(IFCQ_IS_READY(ifq)); | |
756 | ||
757 | VERIFY(machclk_freq != 0); | |
758 | ||
759 | tbr = &ifq->ifcq_tbr; | |
760 | old_rate = tbr->tbr_rate_raw; | |
761 | ||
762 | rate = profile->rate; | |
763 | if (profile->percent > 0) { | |
764 | u_int64_t eff_rate; | |
765 | ||
766 | if (profile->percent > 100) | |
767 | return (EINVAL); | |
768 | if ((eff_rate = ifp->if_output_bw.eff_bw) == 0) | |
769 | return (ENODEV); | |
770 | rate = (eff_rate * profile->percent) / 100; | |
771 | } | |
772 | ||
773 | if (rate == 0) { | |
774 | if (!IFCQ_TBR_IS_ENABLED(ifq)) | |
775 | return (ENOENT); | |
776 | ||
777 | if (pktsched_verbose) | |
778 | printf("%s: TBR disabled\n", if_name(ifp)); | |
779 | ||
780 | /* disable this TBR */ | |
781 | ifq->ifcq_flags &= ~IFCQF_TBR; | |
782 | bzero(tbr, sizeof (*tbr)); | |
783 | ifnet_set_start_cycle(ifp, NULL); | |
784 | if (update) | |
39236c6e | 785 | ifclassq_update(ifq, CLASSQ_EV_LINK_BANDWIDTH); |
316670eb A |
786 | return (0); |
787 | } | |
788 | ||
789 | if (pktsched_verbose) { | |
790 | printf("%s: TBR %s (rate %llu bps depth %u)\n", if_name(ifp), | |
791 | (ifq->ifcq_flags & IFCQF_TBR) ? "reconfigured" : | |
792 | "enabled", rate, profile->depth); | |
793 | } | |
794 | ||
795 | /* set the new TBR */ | |
796 | bzero(tbr, sizeof (*tbr)); | |
797 | tbr->tbr_rate_raw = rate; | |
798 | tbr->tbr_percent = profile->percent; | |
799 | ifq->ifcq_flags |= IFCQF_TBR; | |
800 | ||
801 | /* | |
802 | * Note that the TBR fill up time (hence the ifnet restart time) | |
803 | * is directly related to the specified TBR depth. The ideal | |
804 | * depth value should be computed such that the interval time | |
805 | * between each successive wakeup is adequately spaced apart, | |
806 | * in order to reduce scheduling overheads. A target interval | |
807 | * of 10 ms seems to provide good performance balance. This can be | |
808 | * overridden by specifying the depth profile. Values smaller than | |
809 | * the ideal depth will reduce delay at the expense of CPU cycles. | |
810 | */ | |
811 | tbr->tbr_rate = TBR_SCALE(rate / 8) / machclk_freq; | |
812 | if (tbr->tbr_rate > 0) { | |
813 | u_int32_t mtu = ifp->if_mtu; | |
814 | int64_t ival, idepth = 0; | |
815 | int i; | |
816 | ||
817 | if (mtu < IF_MINMTU) | |
818 | mtu = IF_MINMTU; | |
819 | ||
820 | ival = pktsched_nsecs_to_abstime(10 * NSEC_PER_MSEC); /* 10ms */ | |
821 | ||
822 | for (i = 1; ; i++) { | |
823 | idepth = TBR_SCALE(i * mtu); | |
824 | if ((idepth / tbr->tbr_rate) > ival) | |
825 | break; | |
826 | } | |
827 | VERIFY(idepth > 0); | |
828 | ||
829 | tbr->tbr_depth = TBR_SCALE(profile->depth); | |
830 | if (tbr->tbr_depth == 0) { | |
831 | tbr->tbr_filluptime = idepth / tbr->tbr_rate; | |
832 | /* a little fudge factor to get closer to rate */ | |
833 | tbr->tbr_depth = idepth + (idepth >> 3); | |
834 | } else { | |
835 | tbr->tbr_filluptime = tbr->tbr_depth / tbr->tbr_rate; | |
836 | } | |
837 | } else { | |
838 | tbr->tbr_depth = TBR_SCALE(profile->depth); | |
839 | tbr->tbr_filluptime = 0xffffffffffffffffLL; | |
840 | } | |
841 | tbr->tbr_token = tbr->tbr_depth; | |
842 | tbr->tbr_last = read_machclk(); | |
843 | tbr->tbr_lastop = CLASSQDQ_REMOVE; | |
844 | ||
845 | if (tbr->tbr_rate > 0 && (ifp->if_flags & IFF_UP)) { | |
846 | struct timespec ts = | |
847 | { 0, pktsched_abs_to_nsecs(tbr->tbr_filluptime) }; | |
848 | if (pktsched_verbose) { | |
849 | printf("%s: TBR calculated tokens %lld " | |
850 | "filluptime %llu ns\n", if_name(ifp), | |
851 | TBR_UNSCALE(tbr->tbr_token), | |
852 | pktsched_abs_to_nsecs(tbr->tbr_filluptime)); | |
853 | } | |
854 | ifnet_set_start_cycle(ifp, &ts); | |
855 | } else { | |
856 | if (pktsched_verbose) { | |
857 | if (tbr->tbr_rate == 0) { | |
858 | printf("%s: TBR calculated tokens %lld " | |
859 | "infinite filluptime\n", if_name(ifp), | |
860 | TBR_UNSCALE(tbr->tbr_token)); | |
861 | } else if (!(ifp->if_flags & IFF_UP)) { | |
862 | printf("%s: TBR suspended (link is down)\n", | |
863 | if_name(ifp)); | |
864 | } | |
865 | } | |
866 | ifnet_set_start_cycle(ifp, NULL); | |
867 | } | |
868 | if (update && tbr->tbr_rate_raw != old_rate) | |
39236c6e | 869 | ifclassq_update(ifq, CLASSQ_EV_LINK_BANDWIDTH); |
316670eb A |
870 | |
871 | return (0); | |
872 | } | |
39037602 A |
873 | |
874 | void | |
875 | ifclassq_calc_target_qdelay(struct ifnet *ifp, u_int64_t *if_target_qdelay) | |
876 | { | |
877 | u_int64_t target_qdelay = 0; | |
878 | target_qdelay = IFCQ_TARGET_QDELAY(&ifp->if_snd); | |
879 | ||
880 | if (ifclassq_target_qdelay != 0) | |
881 | target_qdelay = ifclassq_target_qdelay; | |
882 | ||
883 | /* | |
884 | * If we do not know the effective bandwidth, use the default | |
885 | * target queue delay. | |
886 | */ | |
887 | if (target_qdelay == 0) | |
888 | target_qdelay = IFQ_TARGET_DELAY; | |
889 | ||
890 | /* | |
891 | * If a delay has been added to ifnet start callback for | |
892 | * coalescing, we have to add that to the pre-set target delay | |
893 | * because the packets can be in the queue longer. | |
894 | */ | |
895 | if ((ifp->if_eflags & IFEF_ENQUEUE_MULTI) && | |
896 | ifp->if_start_delay_timeout > 0) | |
897 | target_qdelay += ifp->if_start_delay_timeout; | |
898 | ||
899 | *(if_target_qdelay) = target_qdelay; | |
900 | } | |
901 | ||
902 | void | |
903 | ifclassq_calc_update_interval(u_int64_t *update_interval) | |
904 | { | |
905 | u_int64_t uint = 0; | |
906 | ||
907 | /* If the system level override is set, use it */ | |
908 | if (ifclassq_update_interval != 0) | |
909 | uint = ifclassq_update_interval; | |
910 | ||
911 | /* Otherwise use the default value */ | |
912 | if (uint == 0) | |
913 | uint = IFQ_UPDATE_INTERVAL; | |
914 | ||
915 | *update_interval = uint; | |
916 | } |