]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/classq/classq_sfb.c
xnu-3247.10.11.tar.gz
[apple/xnu.git] / bsd / net / classq / classq_sfb.c
1 /*
2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/mbuf.h>
32 #include <sys/socket.h>
33 #include <sys/sockio.h>
34 #include <sys/systm.h>
35 #include <sys/sysctl.h>
36 #include <sys/syslog.h>
37 #include <sys/proc.h>
38 #include <sys/errno.h>
39 #include <sys/kernel.h>
40 #include <sys/kauth.h>
41
42 #include <kern/zalloc.h>
43
44 #include <net/if.h>
45 #include <net/if_var.h>
46 #include <net/if_types.h>
47 #include <net/dlil.h>
48 #include <net/flowadv.h>
49
50 #include <netinet/in.h>
51 #include <netinet/in_systm.h>
52 #include <netinet/ip.h>
53 #if INET6
54 #include <netinet/ip6.h>
55 #endif
56
57 #include <net/classq/classq_sfb.h>
58 #include <net/flowhash.h>
59 #include <net/net_osdep.h>
60 #include <dev/random/randomdev.h>
61
62 /*
63 * Stochastic Fair Blue
64 *
65 * Wu-chang Feng, Dilip D. Kandlur, Debanjan Saha, Kang G. Shin
66 * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
67 *
68 * Based on the NS code with the following parameters:
69 *
70 * bytes: false
71 * decrement: 0.001
72 * increment: 0.005
73 * hold-time: 10ms-50ms (randomized)
74 * algorithm: 0
75 * pbox: 1
76 * pbox-time: 50-100ms (randomized)
77 * hinterval: 11-23 (randomized)
78 *
79 * This implementation uses L = 2 and N = 32 for 2 sets of:
80 *
81 * B[L][N]: L x N array of bins (L levels, N bins per level)
82 *
83 * Each set effectively creates 32^2 virtual buckets (bin combinations)
84 * while using only O(32*2) states.
85 *
86 * Given a 32-bit hash value, we divide it such that octets [0,1,2,3] are
87 * used as index for the bins across the 2 levels, where level 1 uses [0,2]
88 * and level 2 uses [1,3]. The 2 values per level correspond to the indices
89 * for the current and warm-up sets (section 4.4. in the SFB paper regarding
90 * Moving Hash Functions explains the purposes of these 2 sets.)
91 */
92
93 /*
94 * Use Murmur3A_x86_32 for hash function. It seems to perform consistently
95 * across platforms for 1-word key (32-bit flowhash value). See flowhash.h
96 * for other alternatives. We only need 16-bit hash output.
97 */
98 #define SFB_HASH net_flowhash_mh3_x86_32
99 #define SFB_HASHMASK HASHMASK(16)
100
101 #define SFB_BINMASK(_x) \
102 ((_x) & HASHMASK(SFB_BINS_SHIFT))
103
104 #define SFB_BINST(_sp, _l, _n, _c) \
105 (&(*(_sp)->sfb_bins)[_c].stats[_l][_n])
106
107 #define SFB_BINFT(_sp, _l, _n, _c) \
108 (&(*(_sp)->sfb_bins)[_c].freezetime[_l][_n])
109
110 #define SFB_FC_LIST(_sp, _n) \
111 (&(*(_sp)->sfb_fc_lists)[_n])
112
113 /*
114 * The holdtime parameter determines the minimum time interval between
115 * two successive updates of the marking probability. In the event the
116 * uplink speed is not known, a default value is chosen and is randomized
117 * to be within the following range.
118 */
119 #define HOLDTIME_BASE (100ULL * 1000 * 1000) /* 100ms */
120 #define HOLDTIME_MIN (10ULL * 1000 * 1000) /* 10ms */
121 #define HOLDTIME_MAX (100ULL * 1000 * 1000) /* 100ms */
122
123 /*
124 * The pboxtime parameter determines the bandwidth allocated for rogue
125 * flows, i.e. the rate limiting bandwidth. In the event the uplink speed
126 * is not known, a default value is chosen and is randomized to be within
127 * the following range.
128 */
129 #define PBOXTIME_BASE (300ULL * 1000 * 1000) /* 300ms */
130 #define PBOXTIME_MIN (30ULL * 1000 * 1000) /* 30ms */
131 #define PBOXTIME_MAX (300ULL * 1000 * 1000) /* 300ms */
132
133 /*
134 * Target queueing delay is the amount of extra delay that can be added
135 * to accommodate variations in the link bandwidth. The queue should be
136 * large enough to induce this much delay and nothing more than that.
137 */
138 #define TARGET_QDELAY_BASE (10ULL * 1000 * 1000) /* 10ms */
139 #define TARGET_QDELAY_MIN (10ULL * 1000) /* 10us */
140 #define TARGET_QDELAY_MAX (20ULL * 1000 * 1000 * 1000) /* 20s */
141
142 /*
143 * Update interval for checking the extra delay added by the queue. This
144 * should be 90-95 percentile of RTT experienced by any TCP connection
145 * so that it will take care of the burst traffic.
146 */
147 #define UPDATE_INTERVAL_BASE (100ULL * 1000 * 1000) /* 100ms */
148 #define UPDATE_INTERVAL_MIN (100ULL * 1000 * 1000) /* 100ms */
149 #define UPDATE_INTERVAL_MAX (10ULL * 1000 * 1000 * 1000) /* 10s */
150
151 #define SFB_RANDOM(sp, tmin, tmax) ((sfb_random(sp) % (tmax)) + (tmin))
152
153 #define SFB_PKT_PBOX 0x1 /* in penalty box */
154
155 /* The following mantissa values are in SFB_FP_SHIFT Q format */
156 #define SFB_MAX_PMARK (1 << SFB_FP_SHIFT) /* Q14 representation of 1.00 */
157
158 /*
159 * These are d1 (increment) and d2 (decrement) parameters, used to determine
160 * the amount by which the marking probability is incremented when the queue
161 * overflows, or is decremented when the link is idle. d1 is set higher than
162 * d2, because link underutilization can occur when congestion management is
163 * either too conservative or too aggressive, but packet loss occurs only
164 * when congestion management is too conservative. By weighing heavily
165 * against packet loss, it can quickly reach to a substantial increase in
166 * traffic load.
167 */
168 #define SFB_INCREMENT 82 /* Q14 representation of 0.005 */
169 #define SFB_DECREMENT 16 /* Q14 representation of 0.001 */
170
171 #define SFB_PMARK_TH 16056 /* Q14 representation of 0.98 */
172 #define SFB_PMARK_WARM 3276 /* Q14 representation of 0.2 */
173
174 #define SFB_PMARK_INC(_bin) do { \
175 (_bin)->pmark += sfb_increment; \
176 if ((_bin)->pmark > SFB_MAX_PMARK) \
177 (_bin)->pmark = SFB_MAX_PMARK; \
178 } while (0)
179
180 #define SFB_PMARK_DEC(_bin) do { \
181 if ((_bin)->pmark > 0) { \
182 (_bin)->pmark -= sfb_decrement; \
183 if ((_bin)->pmark < 0) \
184 (_bin)->pmark = 0; \
185 } \
186 } while (0)
187
188 /* Minimum nuber of bytes in queue to get flow controlled */
189 #define SFB_MIN_FC_THRESHOLD_BYTES 7500
190
191 #define SFB_SET_DELAY_HIGH(_sp_, _q_) do { \
192 (_sp_)->sfb_flags |= SFBF_DELAYHIGH; \
193 (_sp_)->sfb_fc_threshold = max(SFB_MIN_FC_THRESHOLD_BYTES, \
194 (qsize((_q_)) >> 3)); \
195 } while (0)
196
197 #define SFB_QUEUE_DELAYBASED(_sp_) ((_sp_)->sfb_flags & SFBF_DELAYBASED)
198 #define SFB_IS_DELAYHIGH(_sp_) ((_sp_)->sfb_flags & SFBF_DELAYHIGH)
199 #define SFB_QUEUE_DELAYBASED_MAXSIZE 2048 /* max pkts */
200
201 #define HINTERVAL_MIN (10) /* 10 seconds */
202 #define HINTERVAL_MAX (20) /* 20 seconds */
203 #define SFB_HINTERVAL(sp) ((sfb_random(sp) % HINTERVAL_MAX) + HINTERVAL_MIN)
204
205 #define DEQUEUE_DECAY 7 /* ilog2 of EWMA decay rate, (128) */
206 #define DEQUEUE_SPIKE(_new, _old) \
207 ((u_int64_t)ABS((int64_t)(_new) - (int64_t)(_old)) > ((_old) << 11))
208
209 #define ABS(v) (((v) > 0) ? (v) : -(v))
210
211 #define SFB_ZONE_MAX 32 /* maximum elements in zone */
212 #define SFB_ZONE_NAME "classq_sfb" /* zone name */
213
214 #define SFB_BINS_ZONE_MAX 32 /* maximum elements in zone */
215 #define SFB_BINS_ZONE_NAME "classq_sfb_bins" /* zone name */
216
217 #define SFB_FCL_ZONE_MAX 32 /* maximum elements in zone */
218 #define SFB_FCL_ZONE_NAME "classq_sfb_fcl" /* zone name */
219
220 /* Place the flow control entries in current bin on level 0 */
221 #define SFB_FC_LEVEL 0
222
223 /* Store SFB hash and flags in the module private scratch space */
224 #define pkt_sfb_hash8 pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val8
225 #define pkt_sfb_hash16 pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val16
226 #define pkt_sfb_hash32 pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val32
227 #define pkt_sfb_flags pkt_mpriv.__mpriv_u.__mpriv32[1].__mpriv32_u.__val32
228
229 static unsigned int sfb_size; /* size of zone element */
230 static struct zone *sfb_zone; /* zone for sfb */
231
232 static unsigned int sfb_bins_size; /* size of zone element */
233 static struct zone *sfb_bins_zone; /* zone for sfb_bins */
234
235 static unsigned int sfb_fcl_size; /* size of zone element */
236 static struct zone *sfb_fcl_zone; /* zone for sfb_fc_lists */
237
238 /* internal function prototypes */
239 static u_int32_t sfb_random(struct sfb *);
240 static struct mbuf *sfb_getq_flow(struct sfb *, class_queue_t *, u_int32_t,
241 boolean_t);
242 static void sfb_resetq(struct sfb *, cqev_t);
243 static void sfb_calc_holdtime(struct sfb *, u_int64_t);
244 static void sfb_calc_pboxtime(struct sfb *, u_int64_t);
245 static void sfb_calc_hinterval(struct sfb *, u_int64_t *);
246 static void sfb_calc_target_qdelay(struct sfb *, u_int64_t);
247 static void sfb_calc_update_interval(struct sfb *, u_int64_t);
248 static void sfb_swap_bins(struct sfb *, u_int32_t);
249 static inline int sfb_pcheck(struct sfb *, struct pkthdr *);
250 static int sfb_penalize(struct sfb *, struct pkthdr *, struct timespec *);
251 static void sfb_adjust_bin(struct sfb *, struct sfbbinstats *,
252 struct timespec *, struct timespec *, boolean_t);
253 static void sfb_decrement_bin(struct sfb *, struct sfbbinstats *,
254 struct timespec *, struct timespec *);
255 static void sfb_increment_bin(struct sfb *, struct sfbbinstats *,
256 struct timespec *, struct timespec *);
257 static inline void sfb_dq_update_bins(struct sfb *, struct pkthdr *,
258 struct timespec *, u_int32_t qsize);
259 static inline void sfb_eq_update_bins(struct sfb *, struct pkthdr *);
260 static int sfb_drop_early(struct sfb *, struct pkthdr *, u_int16_t *,
261 struct timespec *);
262 static boolean_t sfb_bin_addfcentry(struct sfb *, struct pkthdr *);
263 static void sfb_fclist_append(struct sfb *, struct sfb_fcl *);
264 static void sfb_fclists_clean(struct sfb *sp);
265 static int sfb_bin_mark_or_drop(struct sfb *sp, struct sfbbinstats *bin);
266 static void sfb_detect_dequeue_stall(struct sfb *sp, class_queue_t *,
267 struct timespec *);
268
269 SYSCTL_NODE(_net_classq, OID_AUTO, sfb, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "SFB");
270
271 static u_int64_t sfb_holdtime = 0; /* 0 indicates "automatic" */
272 SYSCTL_QUAD(_net_classq_sfb, OID_AUTO, holdtime, CTLFLAG_RW|CTLFLAG_LOCKED,
273 &sfb_holdtime, "SFB freeze time in nanoseconds");
274
275 static u_int64_t sfb_pboxtime = 0; /* 0 indicates "automatic" */
276 SYSCTL_QUAD(_net_classq_sfb, OID_AUTO, pboxtime, CTLFLAG_RW|CTLFLAG_LOCKED,
277 &sfb_pboxtime, "SFB penalty box time in nanoseconds");
278
279 static u_int64_t sfb_hinterval;
280 SYSCTL_QUAD(_net_classq_sfb, OID_AUTO, hinterval, CTLFLAG_RW|CTLFLAG_LOCKED,
281 &sfb_hinterval, "SFB hash interval in nanoseconds");
282
283 static u_int64_t sfb_target_qdelay = 0;
284 SYSCTL_QUAD(_net_classq_sfb, OID_AUTO, target_qdelay, CTLFLAG_RW|CTLFLAG_LOCKED,
285 &sfb_target_qdelay, "SFB target queue delay in nanoseconds");
286
287 static u_int64_t sfb_update_interval;
288 SYSCTL_QUAD(_net_classq_sfb, OID_AUTO, update_interval,
289 CTLFLAG_RW|CTLFLAG_LOCKED, &sfb_update_interval, "SFB update interval");
290
291 static u_int32_t sfb_increment = SFB_INCREMENT;
292 SYSCTL_UINT(_net_classq_sfb, OID_AUTO, increment, CTLFLAG_RW|CTLFLAG_LOCKED,
293 &sfb_increment, SFB_INCREMENT, "SFB increment [d1]");
294
295 static u_int32_t sfb_decrement = SFB_DECREMENT;
296 SYSCTL_UINT(_net_classq_sfb, OID_AUTO, decrement, CTLFLAG_RW|CTLFLAG_LOCKED,
297 &sfb_decrement, SFB_DECREMENT, "SFB decrement [d2]");
298
299 static u_int32_t sfb_allocation = 0; /* 0 means "automatic" */
300 SYSCTL_UINT(_net_classq_sfb, OID_AUTO, allocation, CTLFLAG_RW|CTLFLAG_LOCKED,
301 &sfb_allocation, 0, "SFB bin allocation");
302
303 static u_int32_t sfb_ratelimit = 0;
304 SYSCTL_UINT(_net_classq_sfb, OID_AUTO, ratelimit, CTLFLAG_RW|CTLFLAG_LOCKED,
305 &sfb_ratelimit, 0, "SFB rate limit");
306
307 #define KBPS (1ULL * 1000) /* 1 Kbits per second */
308 #define MBPS (1ULL * 1000 * 1000) /* 1 Mbits per second */
309 #define GBPS (MBPS * 1000) /* 1 Gbits per second */
310
311 struct sfb_time_tbl {
312 u_int64_t speed; /* uplink speed */
313 u_int64_t holdtime; /* hold time */
314 u_int64_t pboxtime; /* penalty box time */
315 };
316
317 static struct sfb_time_tbl sfb_ttbl[] = {
318 { 1 * MBPS, HOLDTIME_BASE * 1000, PBOXTIME_BASE * 1000 },
319 { 10 * MBPS, HOLDTIME_BASE * 100, PBOXTIME_BASE * 100 },
320 { 100 * MBPS, HOLDTIME_BASE * 10, PBOXTIME_BASE * 10 },
321 { 1 * GBPS, HOLDTIME_BASE, PBOXTIME_BASE },
322 { 10 * GBPS, HOLDTIME_BASE / 10, PBOXTIME_BASE / 10 },
323 { 100 * GBPS, HOLDTIME_BASE / 100, PBOXTIME_BASE / 100 },
324 { 0, 0, 0 }
325 };
326
327 void
328 sfb_init(void)
329 {
330 _CASSERT(SFBF_ECN4 == CLASSQF_ECN4);
331 _CASSERT(SFBF_ECN6 == CLASSQF_ECN6);
332
333 sfb_size = sizeof (struct sfb);
334 sfb_zone = zinit(sfb_size, SFB_ZONE_MAX * sfb_size,
335 0, SFB_ZONE_NAME);
336 if (sfb_zone == NULL) {
337 panic("%s: failed allocating %s", __func__, SFB_ZONE_NAME);
338 /* NOTREACHED */
339 }
340 zone_change(sfb_zone, Z_EXPAND, TRUE);
341 zone_change(sfb_zone, Z_CALLERACCT, TRUE);
342
343 sfb_bins_size = sizeof (*((struct sfb *)0)->sfb_bins);
344 sfb_bins_zone = zinit(sfb_bins_size, SFB_BINS_ZONE_MAX * sfb_bins_size,
345 0, SFB_BINS_ZONE_NAME);
346 if (sfb_bins_zone == NULL) {
347 panic("%s: failed allocating %s", __func__, SFB_BINS_ZONE_NAME);
348 /* NOTREACHED */
349 }
350 zone_change(sfb_bins_zone, Z_EXPAND, TRUE);
351 zone_change(sfb_bins_zone, Z_CALLERACCT, TRUE);
352
353 sfb_fcl_size = sizeof (*((struct sfb *)0)->sfb_fc_lists);
354 sfb_fcl_zone = zinit(sfb_fcl_size, SFB_FCL_ZONE_MAX * sfb_fcl_size,
355 0, SFB_FCL_ZONE_NAME);
356 if (sfb_fcl_zone == NULL) {
357 panic("%s: failed allocating %s", __func__, SFB_FCL_ZONE_NAME);
358 /* NOTREACHED */
359 }
360 zone_change(sfb_fcl_zone, Z_EXPAND, TRUE);
361 zone_change(sfb_fcl_zone, Z_CALLERACCT, TRUE);
362 }
363
364 static u_int32_t
365 sfb_random(struct sfb *sp)
366 {
367 IFCQ_CONVERT_LOCK(&sp->sfb_ifp->if_snd);
368 return (RandomULong());
369 }
370
371 static void
372 sfb_calc_holdtime(struct sfb *sp, u_int64_t outbw)
373 {
374 u_int64_t holdtime;
375
376 if (sfb_holdtime != 0) {
377 holdtime = sfb_holdtime;
378 } else if (outbw == 0) {
379 holdtime = SFB_RANDOM(sp, HOLDTIME_MIN, HOLDTIME_MAX);
380 } else {
381 unsigned int n, i;
382
383 n = sfb_ttbl[0].holdtime;
384 for (i = 0; sfb_ttbl[i].speed != 0; i++) {
385 if (outbw < sfb_ttbl[i].speed)
386 break;
387 n = sfb_ttbl[i].holdtime;
388 }
389 holdtime = n;
390 }
391 net_nsectimer(&holdtime, &sp->sfb_holdtime);
392 }
393
394 static void
395 sfb_calc_pboxtime(struct sfb *sp, u_int64_t outbw)
396 {
397 u_int64_t pboxtime;
398
399 if (sfb_pboxtime != 0) {
400 pboxtime = sfb_pboxtime;
401 } else if (outbw == 0) {
402 pboxtime = SFB_RANDOM(sp, PBOXTIME_MIN, PBOXTIME_MAX);
403 } else {
404 unsigned int n, i;
405
406 n = sfb_ttbl[0].pboxtime;
407 for (i = 0; sfb_ttbl[i].speed != 0; i++) {
408 if (outbw < sfb_ttbl[i].speed)
409 break;
410 n = sfb_ttbl[i].pboxtime;
411 }
412 pboxtime = n;
413 }
414 net_nsectimer(&pboxtime, &sp->sfb_pboxtime);
415 net_timerclear(&sp->sfb_pboxfreeze);
416 }
417
418 static void
419 sfb_calc_hinterval(struct sfb *sp, u_int64_t *t)
420 {
421 u_int64_t hinterval;
422 struct timespec now;
423
424 if (t != NULL) {
425 /*
426 * TODO adi@apple.com: use dq_avg to derive hinterval.
427 */
428 hinterval = *t;
429 }
430
431 if (sfb_hinterval != 0)
432 hinterval = sfb_hinterval;
433 else if (t == NULL || hinterval == 0)
434 hinterval = ((u_int64_t)SFB_HINTERVAL(sp) * NSEC_PER_SEC);
435
436 net_nsectimer(&hinterval, &sp->sfb_hinterval);
437
438 nanouptime(&now);
439 net_timeradd(&now, &sp->sfb_hinterval, &sp->sfb_nextreset);
440 }
441
442 static void
443 sfb_calc_target_qdelay(struct sfb *sp, u_int64_t out_bw)
444 {
445 #pragma unused(out_bw)
446 u_int64_t target_qdelay = 0;
447 struct ifnet *ifp = sp->sfb_ifp;
448
449 target_qdelay = IFCQ_TARGET_QDELAY(&ifp->if_snd);
450
451 if (sfb_target_qdelay != 0)
452 target_qdelay = sfb_target_qdelay;
453
454 /*
455 * If we do not know the effective bandwidth, use the default
456 * target queue delay.
457 */
458 if (target_qdelay == 0)
459 target_qdelay = IFQ_TARGET_DELAY;
460
461 /*
462 * If a delay has been added to ifnet start callback for
463 * coalescing, we have to add that to the pre-set target delay
464 * because the packets can be in the queue longer.
465 */
466 if ((ifp->if_eflags & IFEF_ENQUEUE_MULTI) &&
467 ifp->if_start_delay_timeout > 0)
468 target_qdelay += ifp->if_start_delay_timeout;
469
470 sp->sfb_target_qdelay = target_qdelay;
471 }
472
473 static void
474 sfb_calc_update_interval(struct sfb *sp, u_int64_t out_bw)
475 {
476 #pragma unused(out_bw)
477 u_int64_t update_interval = 0;
478
479 /* If the system-level override is set, use it */
480 if (sfb_update_interval != 0)
481 update_interval = sfb_update_interval;
482 /*
483 * If we do not know the effective bandwidth, use the default
484 * update interval.
485 */
486 if (update_interval == 0)
487 update_interval = IFQ_UPDATE_INTERVAL;
488
489 net_nsectimer(&update_interval, &sp->sfb_update_interval);
490 }
491
492 /*
493 * sfb support routines
494 */
495 struct sfb *
496 sfb_alloc(struct ifnet *ifp, u_int32_t qid, u_int32_t qlim, u_int32_t flags)
497 {
498 struct sfb *sp;
499 int i;
500
501 VERIFY(ifp != NULL && qlim > 0);
502
503 sp = zalloc(sfb_zone);
504 if (sp == NULL) {
505 log(LOG_ERR, "%s: SFB unable to allocate\n", if_name(ifp));
506 return (NULL);
507 }
508 bzero(sp, sfb_size);
509
510 if ((sp->sfb_bins = zalloc(sfb_bins_zone)) == NULL) {
511 log(LOG_ERR, "%s: SFB unable to allocate bins\n", if_name(ifp));
512 sfb_destroy(sp);
513 return (NULL);
514 }
515 bzero(sp->sfb_bins, sfb_bins_size);
516
517 if ((sp->sfb_fc_lists = zalloc(sfb_fcl_zone)) == NULL) {
518 log(LOG_ERR, "%s: SFB unable to allocate flow control lists\n",
519 if_name(ifp));
520 sfb_destroy(sp);
521 return(NULL);
522 }
523 bzero(sp->sfb_fc_lists, sfb_fcl_size);
524
525 for (i = 0; i < SFB_BINS; ++i)
526 STAILQ_INIT(&SFB_FC_LIST(sp, i)->fclist);
527
528 sp->sfb_ifp = ifp;
529 sp->sfb_qlim = qlim;
530 sp->sfb_qid = qid;
531 sp->sfb_flags = (flags & SFBF_USERFLAGS);
532 #if !PF_ECN
533 if (sp->sfb_flags & SFBF_ECN) {
534 sp->sfb_flags &= ~SFBF_ECN;
535 log(LOG_ERR, "%s: SFB qid=%d, ECN not available; ignoring "
536 "SFBF_ECN flag!\n", if_name(ifp), sp->sfb_qid);
537 }
538 #endif /* !PF_ECN */
539
540 sfb_resetq(sp, -1);
541
542 return (sp);
543 }
544
545 static void
546 sfb_fclist_append(struct sfb *sp, struct sfb_fcl *fcl)
547 {
548 IFCQ_CONVERT_LOCK(&sp->sfb_ifp->if_snd);
549
550 VERIFY(STAILQ_EMPTY(&fcl->fclist) || fcl->cnt > 0);
551 sp->sfb_stats.flow_feedback += fcl->cnt;
552 fcl->cnt = 0;
553
554 flowadv_add(&fcl->fclist);
555 VERIFY(fcl->cnt == 0 && STAILQ_EMPTY(&fcl->fclist));
556 }
557
558 static void
559 sfb_fclists_clean(struct sfb *sp)
560 {
561 int i;
562
563 /* Move all the flow control entries to the flowadv list */
564 for (i = 0; i < SFB_BINS; ++i) {
565 struct sfb_fcl *fcl = SFB_FC_LIST(sp, i);
566 if (!STAILQ_EMPTY(&fcl->fclist))
567 sfb_fclist_append(sp, fcl);
568 }
569 }
570
571 void
572 sfb_destroy(struct sfb *sp)
573 {
574 sfb_fclists_clean(sp);
575 if (sp->sfb_bins != NULL) {
576 zfree(sfb_bins_zone, sp->sfb_bins);
577 sp->sfb_bins = NULL;
578 }
579 if (sp->sfb_fc_lists != NULL) {
580 zfree(sfb_fcl_zone, sp->sfb_fc_lists);
581 sp->sfb_fc_lists = NULL;
582 }
583 zfree(sfb_zone, sp);
584 }
585
586 static void
587 sfb_resetq(struct sfb *sp, cqev_t ev)
588 {
589 struct ifnet *ifp = sp->sfb_ifp;
590 u_int64_t eff_rate;
591
592 VERIFY(ifp != NULL);
593
594 if (ev != CLASSQ_EV_LINK_DOWN) {
595 (*sp->sfb_bins)[0].fudge = sfb_random(sp);
596 (*sp->sfb_bins)[1].fudge = sfb_random(sp);
597 sp->sfb_allocation = ((sfb_allocation == 0) ?
598 (sp->sfb_qlim / 3) : sfb_allocation);
599 sp->sfb_drop_thresh = sp->sfb_allocation +
600 (sp->sfb_allocation >> 1);
601 }
602
603 sp->sfb_clearpkts = 0;
604 sp->sfb_current = 0;
605
606 eff_rate = ifnet_output_linkrate(ifp);
607 sp->sfb_eff_rate = eff_rate;
608
609 sfb_calc_holdtime(sp, eff_rate);
610 sfb_calc_pboxtime(sp, eff_rate);
611 sfb_calc_hinterval(sp, NULL);
612 sfb_calc_target_qdelay(sp, eff_rate);
613 sfb_calc_update_interval(sp, eff_rate);
614
615 if (ev == CLASSQ_EV_LINK_DOWN ||
616 ev == CLASSQ_EV_LINK_UP)
617 sfb_fclists_clean(sp);
618
619 bzero(sp->sfb_bins, sizeof (*sp->sfb_bins));
620 bzero(&sp->sfb_stats, sizeof (sp->sfb_stats));
621
622 if (ev == CLASSQ_EV_LINK_DOWN || !classq_verbose)
623 return;
624
625 log(LOG_DEBUG, "%s: SFB qid=%d, holdtime=%llu nsec, "
626 "pboxtime=%llu nsec, allocation=%d, drop_thresh=%d, "
627 "hinterval=%d sec, sfb_bins=%d bytes, eff_rate=%llu bps"
628 "target_qdelay= %llu nsec "
629 "update_interval=%llu sec %llu nsec flags=0x%x\n",
630 if_name(ifp), sp->sfb_qid, (u_int64_t)sp->sfb_holdtime.tv_nsec,
631 (u_int64_t)sp->sfb_pboxtime.tv_nsec,
632 (u_int32_t)sp->sfb_allocation, (u_int32_t)sp->sfb_drop_thresh,
633 (int)sp->sfb_hinterval.tv_sec, (int)sizeof (*sp->sfb_bins),
634 eff_rate, (u_int64_t)sp->sfb_target_qdelay,
635 (u_int64_t)sp->sfb_update_interval.tv_sec,
636 (u_int64_t)sp->sfb_update_interval.tv_nsec, sp->sfb_flags);
637 }
638
639 void
640 sfb_getstats(struct sfb *sp, struct sfb_stats *sps)
641 {
642 sps->allocation = sp->sfb_allocation;
643 sps->dropthresh = sp->sfb_drop_thresh;
644 sps->clearpkts = sp->sfb_clearpkts;
645 sps->current = sp->sfb_current;
646 sps->target_qdelay = sp->sfb_target_qdelay;
647 sps->min_estdelay = sp->sfb_min_qdelay;
648 sps->delay_fcthreshold = sp->sfb_fc_threshold;
649 sps->flags = sp->sfb_flags;
650
651 net_timernsec(&sp->sfb_holdtime, &sp->sfb_stats.hold_time);
652 net_timernsec(&sp->sfb_pboxtime, &sp->sfb_stats.pbox_time);
653 net_timernsec(&sp->sfb_hinterval, &sp->sfb_stats.rehash_intval);
654 net_timernsec(&sp->sfb_update_interval, &sps->update_interval);
655 *(&(sps->sfbstats)) = *(&(sp->sfb_stats));
656
657 _CASSERT(sizeof ((*sp->sfb_bins)[0].stats) ==
658 sizeof (sps->binstats[0].stats));
659
660 bcopy(&(*sp->sfb_bins)[0].stats, &sps->binstats[0].stats,
661 sizeof (sps->binstats[0].stats));
662 bcopy(&(*sp->sfb_bins)[1].stats, &sps->binstats[1].stats,
663 sizeof (sps->binstats[1].stats));
664 }
665
666 static void
667 sfb_swap_bins(struct sfb *sp, u_int32_t len)
668 {
669 int i, j, s;
670
671 if (sp->sfb_flags & SFBF_SUSPENDED)
672 return;
673
674 s = sp->sfb_current;
675 VERIFY((s + (s ^ 1)) == 1);
676
677 (*sp->sfb_bins)[s].fudge = sfb_random(sp); /* recompute perturbation */
678 sp->sfb_clearpkts = len;
679 sp->sfb_stats.num_rehash++;
680
681 s = (sp->sfb_current ^= 1); /* flip the bit (swap current) */
682
683 if (classq_verbose) {
684 log(LOG_DEBUG, "%s: SFB qid=%d, set %d is now current, "
685 "qlen=%d\n", if_name(sp->sfb_ifp), sp->sfb_qid, s, len);
686 }
687
688 /* clear freezetime for all current bins */
689 bzero(&(*sp->sfb_bins)[s].freezetime,
690 sizeof ((*sp->sfb_bins)[s].freezetime));
691
692 /* clear/adjust bin statistics and flow control lists */
693 for (i = 0; i < SFB_BINS; i++) {
694 struct sfb_fcl *fcl = SFB_FC_LIST(sp, i);
695
696 if (!STAILQ_EMPTY(&fcl->fclist))
697 sfb_fclist_append(sp, fcl);
698
699 for (j = 0; j < SFB_LEVELS; j++) {
700 struct sfbbinstats *cbin, *wbin;
701
702 cbin = SFB_BINST(sp, j, i, s); /* current */
703 wbin = SFB_BINST(sp, j, i, s ^ 1); /* warm-up */
704
705 cbin->pkts = 0;
706 cbin->bytes = 0;
707 if (cbin->pmark > SFB_MAX_PMARK)
708 cbin->pmark = SFB_MAX_PMARK;
709 if (cbin->pmark < 0)
710 cbin->pmark = 0;
711
712 /*
713 * Keep pmark from before to identify
714 * non-responsives immediately.
715 */
716 if (wbin->pmark > SFB_PMARK_WARM)
717 wbin->pmark = SFB_PMARK_WARM;
718 }
719 }
720 }
721
722 static inline int
723 sfb_pcheck(struct sfb *sp, struct pkthdr *pkt)
724 {
725 #if SFB_LEVELS != 2
726 int i, n;
727 #endif /* SFB_LEVELS != 2 */
728 int s;
729
730 s = sp->sfb_current;
731 VERIFY((s + (s ^ 1)) == 1);
732
733 /*
734 * For current bins, returns 1 if all pmark >= SFB_PMARK_TH,
735 * 0 otherwise; optimize for SFB_LEVELS=2.
736 */
737 #if SFB_LEVELS == 2
738 /*
739 * Level 0: bin index at [0] for set 0; [2] for set 1
740 * Level 1: bin index at [1] for set 0; [3] for set 1
741 */
742 if (SFB_BINST(sp, 0, SFB_BINMASK(pkt->pkt_sfb_hash8[(s << 1)]),
743 s)->pmark < SFB_PMARK_TH ||
744 SFB_BINST(sp, 1, SFB_BINMASK(pkt->pkt_sfb_hash8[(s << 1) + 1]),
745 s)->pmark < SFB_PMARK_TH)
746 return (0);
747 #else /* SFB_LEVELS != 2 */
748 for (i = 0; i < SFB_LEVELS; i++) {
749 if (s == 0) /* set 0, bin index [0,1] */
750 n = SFB_BINMASK(pkt->pkt_sfb_hash8[i]);
751 else /* set 1, bin index [2,3] */
752 n = SFB_BINMASK(pkt->pkt_sfb_hash8[i + 2]);
753
754 if (SFB_BINST(sp, i, n, s)->pmark < SFB_PMARK_TH)
755 return (0);
756 }
757 #endif /* SFB_LEVELS != 2 */
758 return (1);
759 }
760
761 static int
762 sfb_penalize(struct sfb *sp, struct pkthdr *pkt, struct timespec *now)
763 {
764 struct timespec delta = { 0, 0 };
765
766 /* If minimum pmark of current bins is < SFB_PMARK_TH, we're done */
767 if (!sfb_ratelimit || !sfb_pcheck(sp, pkt))
768 return (0);
769
770 net_timersub(now, &sp->sfb_pboxfreeze, &delta);
771 if (net_timercmp(&delta, &sp->sfb_pboxtime, <)) {
772 #if SFB_LEVELS != 2
773 int i;
774 #endif /* SFB_LEVELS != 2 */
775 struct sfbbinstats *bin;
776 int n, w;
777
778 w = sp->sfb_current ^ 1;
779 VERIFY((w + (w ^ 1)) == 1);
780
781 /*
782 * Update warm-up bins; optimize for SFB_LEVELS=2
783 */
784 #if SFB_LEVELS == 2
785 /* Level 0: bin index at [0] for set 0; [2] for set 1 */
786 n = SFB_BINMASK(pkt->pkt_sfb_hash8[(w << 1)]);
787 bin = SFB_BINST(sp, 0, n, w);
788 if (bin->pkts >= sp->sfb_allocation)
789 sfb_increment_bin(sp, bin, SFB_BINFT(sp, 0, n, w), now);
790
791 /* Level 0: bin index at [1] for set 0; [3] for set 1 */
792 n = SFB_BINMASK(pkt->pkt_sfb_hash8[(w << 1) + 1]);
793 bin = SFB_BINST(sp, 1, n, w);
794 if (bin->pkts >= sp->sfb_allocation)
795 sfb_increment_bin(sp, bin, SFB_BINFT(sp, 1, n, w), now);
796 #else /* SFB_LEVELS != 2 */
797 for (i = 0; i < SFB_LEVELS; i++) {
798 if (w == 0) /* set 0, bin index [0,1] */
799 n = SFB_BINMASK(pkt->pkt_sfb_hash8[i]);
800 else /* set 1, bin index [2,3] */
801 n = SFB_BINMASK(pkt->pkt_sfb_hash8[i + 2]);
802
803 bin = SFB_BINST(sp, i, n, w);
804 if (bin->pkts >= sp->sfb_allocation) {
805 sfb_increment_bin(sp, bin,
806 SFB_BINFT(sp, i, n, w), now);
807 }
808 }
809 #endif /* SFB_LEVELS != 2 */
810 return (1);
811 }
812
813 /* non-conformant or else misclassified flow; queue it anyway */
814 pkt->pkt_sfb_flags |= SFB_PKT_PBOX;
815 *(&sp->sfb_pboxfreeze) = *now;
816
817 return (0);
818 }
819
820 static void
821 sfb_adjust_bin(struct sfb *sp, struct sfbbinstats *bin, struct timespec *ft,
822 struct timespec *now, boolean_t inc)
823 {
824 struct timespec delta;
825
826 net_timersub(now, ft, &delta);
827 if (net_timercmp(&delta, &sp->sfb_holdtime, <)) {
828 if (classq_verbose > 1) {
829 log(LOG_DEBUG, "%s: SFB qid=%d, %s update frozen "
830 "(delta=%llu nsec)\n", if_name(sp->sfb_ifp),
831 sp->sfb_qid, inc ? "increment" : "decrement",
832 (u_int64_t)delta.tv_nsec);
833 }
834 return;
835 }
836
837 /* increment/decrement marking probability */
838 *ft = *now;
839 if (inc)
840 SFB_PMARK_INC(bin);
841 else
842 SFB_PMARK_DEC(bin);
843 }
844
845 static void
846 sfb_decrement_bin(struct sfb *sp, struct sfbbinstats *bin, struct timespec *ft,
847 struct timespec *now)
848 {
849 return (sfb_adjust_bin(sp, bin, ft, now, FALSE));
850 }
851
852 static void
853 sfb_increment_bin(struct sfb *sp, struct sfbbinstats *bin, struct timespec *ft,
854 struct timespec *now)
855 {
856 return (sfb_adjust_bin(sp, bin, ft, now, TRUE));
857 }
858
859 static inline void
860 sfb_dq_update_bins(struct sfb *sp, struct pkthdr *pkt,
861 struct timespec *now, u_int32_t qsize)
862 {
863 #if SFB_LEVELS != 2 || SFB_FC_LEVEL != 0
864 int i;
865 #endif /* SFB_LEVELS != 2 || SFB_FC_LEVEL != 0 */
866 struct sfbbinstats *bin;
867 int s, n;
868 struct sfb_fcl *fcl = NULL;
869
870 s = sp->sfb_current;
871 VERIFY((s + (s ^ 1)) == 1);
872
873 /*
874 * Update current bins; optimize for SFB_LEVELS=2 and SFB_FC_LEVEL=0
875 */
876 #if SFB_LEVELS == 2 && SFB_FC_LEVEL == 0
877 /* Level 0: bin index at [0] for set 0; [2] for set 1 */
878 n = SFB_BINMASK(pkt->pkt_sfb_hash8[(s << 1)]);
879 bin = SFB_BINST(sp, 0, n, s);
880
881 VERIFY(bin->pkts > 0 && bin->bytes >= (u_int32_t)pkt->len);
882 bin->pkts--;
883 bin->bytes -= pkt->len;
884
885 if (bin->pkts == 0)
886 sfb_decrement_bin(sp, bin, SFB_BINFT(sp, 0, n, s), now);
887
888 /* Deliver flow control feedback to the sockets */
889 if (SFB_QUEUE_DELAYBASED(sp)) {
890 if (!(SFB_IS_DELAYHIGH(sp)) ||
891 bin->bytes <= sp->sfb_fc_threshold ||
892 bin->pkts == 0 || qsize == 0)
893 fcl = SFB_FC_LIST(sp, n);
894 } else if (bin->pkts <= (sp->sfb_allocation >> 2)) {
895 fcl = SFB_FC_LIST(sp, n);
896 }
897
898 if (fcl != NULL && !STAILQ_EMPTY(&fcl->fclist))
899 sfb_fclist_append(sp, fcl);
900 fcl = NULL;
901
902 /* Level 1: bin index at [1] for set 0; [3] for set 1 */
903 n = SFB_BINMASK(pkt->pkt_sfb_hash8[(s << 1) + 1]);
904 bin = SFB_BINST(sp, 1, n, s);
905
906 VERIFY(bin->pkts > 0 && bin->bytes >= (u_int64_t)pkt->len);
907 bin->pkts--;
908 bin->bytes -= pkt->len;
909 if (bin->pkts == 0)
910 sfb_decrement_bin(sp, bin, SFB_BINFT(sp, 1, n, s), now);
911 #else /* SFB_LEVELS != 2 || SFB_FC_LEVEL != 0 */
912 for (i = 0; i < SFB_LEVELS; i++) {
913 if (s == 0) /* set 0, bin index [0,1] */
914 n = SFB_BINMASK(pkt->pkt_sfb_hash8[i]);
915 else /* set 1, bin index [2,3] */
916 n = SFB_BINMASK(pkt->pkt_sfb_hash8[i + 2]);
917
918 bin = SFB_BINST(sp, i, n, s);
919
920 VERIFY(bin->pkts > 0 && bin->bytes >= pkt->len);
921 bin->pkts--;
922 bin->bytes -= pkt->len;
923 if (bin->pkts == 0)
924 sfb_decrement_bin(sp, bin,
925 SFB_BINFT(sp, i, n, s), now);
926 if (i != SFB_FC_LEVEL)
927 continue;
928 if (SFB_QUEUE_DELAYBASED(sp)) {
929 if (!(SFB_IS_DELAYHIGH(sp)) ||
930 bin->bytes <= sp->sfb_fc_threshold)
931 fcl = SFB_FC_LIST(sp, n);
932 } else if (bin->pkts <= (sp->sfb_allocation >> 2)) {
933 fcl = SFB_FC_LIST(sp, n);
934 }
935 if (fcl != NULL && !STAILQ_EMPTY(&fcl->fclist))
936 sfb_fclist_append(sp, fcl);
937 fcl = NULL;
938 }
939 #endif /* SFB_LEVELS != 2 || SFB_FC_LEVEL != 0 */
940 }
941
942 static inline void
943 sfb_eq_update_bins(struct sfb *sp, struct pkthdr *pkt)
944 {
945 #if SFB_LEVELS != 2
946 int i, n;
947 #endif /* SFB_LEVELS != 2 */
948 int s;
949 struct sfbbinstats *bin;
950 s = sp->sfb_current;
951 VERIFY((s + (s ^ 1)) == 1);
952
953 /*
954 * Update current bins; optimize for SFB_LEVELS=2
955 */
956 #if SFB_LEVELS == 2
957 /* Level 0: bin index at [0] for set 0; [2] for set 1 */
958 bin = SFB_BINST(sp, 0,
959 SFB_BINMASK(pkt->pkt_sfb_hash8[(s << 1)]), s);
960 bin->pkts++;
961 bin->bytes += pkt->len;
962
963 /* Level 1: bin index at [1] for set 0; [3] for set 1 */
964 bin = SFB_BINST(sp, 1,
965 SFB_BINMASK(pkt->pkt_sfb_hash8[(s << 1) + 1]), s);
966 bin->pkts++;
967 bin->bytes += pkt->len;
968
969 #else /* SFB_LEVELS != 2 */
970 for (i = 0; i < SFB_LEVELS; i++) {
971 if (s == 0) /* set 0, bin index [0,1] */
972 n = SFB_BINMASK(pkt->pkt_sfb_hash8[i]);
973 else /* set 1, bin index [2,3] */
974 n = SFB_BINMASK(pkt->pkt_sfb_hash8[i + 2]);
975
976 bin = SFB_BINST(sp, i, n, s);
977 bin->pkts++;
978 bin->bytes += pkt->len;
979 }
980 #endif /* SFB_LEVELS != 2 */
981 }
982
983 static boolean_t
984 sfb_bin_addfcentry(struct sfb *sp, struct pkthdr *pkt)
985 {
986 struct flowadv_fcentry *fce;
987 u_int32_t flowsrc, flowid;
988 struct sfb_fcl *fcl;
989 int s;
990
991 s = sp->sfb_current;
992 VERIFY((s + (s ^ 1)) == 1);
993
994 flowsrc = pkt->pkt_flowsrc;
995 flowid = pkt->pkt_flowid;
996
997 if (flowid == 0) {
998 sp->sfb_stats.null_flowid++;
999 return (FALSE);
1000 }
1001
1002 /*
1003 * Use value at index 0 for set 0 and
1004 * value at index 2 for set 1
1005 */
1006 fcl = SFB_FC_LIST(sp, SFB_BINMASK(pkt->pkt_sfb_hash8[(s << 1)]));
1007 STAILQ_FOREACH(fce, &fcl->fclist, fce_link) {
1008 if (fce->fce_flowsrc == flowsrc &&
1009 fce->fce_flowid == flowid) {
1010 /* Already on flow control list; just return */
1011 return (TRUE);
1012 }
1013 }
1014
1015 IFCQ_CONVERT_LOCK(&sp->sfb_ifp->if_snd);
1016 fce = flowadv_alloc_entry(M_WAITOK);
1017 if (fce != NULL) {
1018 fce->fce_flowsrc = flowsrc;
1019 fce->fce_flowid = flowid;
1020 STAILQ_INSERT_TAIL(&fcl->fclist, fce, fce_link);
1021 fcl->cnt++;
1022 sp->sfb_stats.flow_controlled++;
1023 }
1024
1025 return (fce != NULL);
1026 }
1027
1028 /*
1029 * check if this flow needs to be flow-controlled or if this
1030 * packet needs to be dropped.
1031 */
1032 static int
1033 sfb_bin_mark_or_drop(struct sfb *sp, struct sfbbinstats *bin)
1034 {
1035 int ret = 0;
1036 if (SFB_QUEUE_DELAYBASED(sp)) {
1037 /*
1038 * Mark or drop if this bin has more
1039 * bytes than the flowcontrol threshold.
1040 */
1041 if (SFB_IS_DELAYHIGH(sp) &&
1042 bin->bytes >= (sp->sfb_fc_threshold << 1))
1043 ret = 1;
1044 } else {
1045 if (bin->pkts >= sp->sfb_allocation &&
1046 bin->pkts >= sp->sfb_drop_thresh)
1047 ret = 1; /* drop or mark */
1048 }
1049 return (ret);
1050 }
1051
1052 /*
1053 * early-drop probability is kept in pmark of each bin of the flow
1054 */
1055 static int
1056 sfb_drop_early(struct sfb *sp, struct pkthdr *pkt, u_int16_t *pmin,
1057 struct timespec *now)
1058 {
1059 #if SFB_LEVELS != 2
1060 int i;
1061 #endif /* SFB_LEVELS != 2 */
1062 struct sfbbinstats *bin;
1063 int s, n, ret = 0;
1064
1065 s = sp->sfb_current;
1066 VERIFY((s + (s ^ 1)) == 1);
1067
1068 *pmin = (u_int16_t)-1;
1069
1070 /*
1071 * Update current bins; optimize for SFB_LEVELS=2
1072 */
1073 #if SFB_LEVELS == 2
1074 /* Level 0: bin index at [0] for set 0; [2] for set 1 */
1075 n = SFB_BINMASK(pkt->pkt_sfb_hash8[(s << 1)]);
1076 bin = SFB_BINST(sp, 0, n, s);
1077 if (*pmin > (u_int16_t)bin->pmark)
1078 *pmin = (u_int16_t)bin->pmark;
1079
1080
1081 /* Update SFB probability */
1082 if (bin->pkts >= sp->sfb_allocation)
1083 sfb_increment_bin(sp, bin, SFB_BINFT(sp, 0, n, s), now);
1084
1085 ret = sfb_bin_mark_or_drop(sp, bin);
1086
1087 /* Level 1: bin index at [1] for set 0; [3] for set 1 */
1088 n = SFB_BINMASK(pkt->pkt_sfb_hash8[(s << 1) + 1]);
1089 bin = SFB_BINST(sp, 1, n, s);
1090 if (*pmin > (u_int16_t)bin->pmark)
1091 *pmin = (u_int16_t)bin->pmark;
1092
1093 if (bin->pkts >= sp->sfb_allocation)
1094 sfb_increment_bin(sp, bin, SFB_BINFT(sp, 1, n, s), now);
1095 #else /* SFB_LEVELS != 2 */
1096 for (i = 0; i < SFB_LEVELS; i++) {
1097 if (s == 0) /* set 0, bin index [0,1] */
1098 n = SFB_BINMASK(pkt->pkt_sfb_hash8[i]);
1099 else /* set 1, bin index [2,3] */
1100 n = SFB_BINMASK(pkt->pkt_sfb_hash8[i + 2]);
1101
1102 bin = SFB_BINST(sp, i, n, s);
1103 if (*pmin > (u_int16_t)bin->pmark)
1104 *pmin = (u_int16_t)bin->pmark;
1105
1106 if (bin->pkts >= sp->sfb_allocation)
1107 sfb_increment_bin(sp, bin,
1108 SFB_BINFT(sp, i, n, s), now);
1109 if (i == SFB_FC_LEVEL)
1110 ret = sfb_bin_mark_or_drop(sp, bin);
1111 }
1112 #endif /* SFB_LEVELS != 2 */
1113
1114 if (sp->sfb_flags & SFBF_SUSPENDED)
1115 ret = 1; /* drop or mark */
1116
1117 return (ret);
1118 }
1119
1120 void
1121 sfb_detect_dequeue_stall(struct sfb *sp, class_queue_t *q,
1122 struct timespec *now)
1123 {
1124 struct timespec max_getqtime;
1125
1126 if (!SFB_QUEUE_DELAYBASED(sp) || SFB_IS_DELAYHIGH(sp) ||
1127 qsize(q) <= SFB_MIN_FC_THRESHOLD_BYTES ||
1128 !net_timerisset(&sp->sfb_getqtime))
1129 return;
1130
1131 net_timeradd(&sp->sfb_getqtime, &sp->sfb_update_interval,
1132 &max_getqtime);
1133 if (net_timercmp(now, &max_getqtime, >)) {
1134 /*
1135 * No packets have been dequeued in an update interval
1136 * worth of time. It means that the queue is stalled
1137 */
1138 SFB_SET_DELAY_HIGH(sp, q);
1139 sp->sfb_stats.dequeue_stall++;
1140 }
1141 }
1142
1143 #define DTYPE_NODROP 0 /* no drop */
1144 #define DTYPE_FORCED 1 /* a "forced" drop */
1145 #define DTYPE_EARLY 2 /* an "unforced" (early) drop */
1146
1147 int
1148 sfb_addq(struct sfb *sp, class_queue_t *q, struct mbuf *m, struct pf_mtag *t)
1149 {
1150 #if !PF_ECN
1151 #pragma unused(t)
1152 #endif /* !PF_ECN */
1153 struct pkthdr *pkt = &m->m_pkthdr;
1154 struct timespec now;
1155 int droptype, s;
1156 u_int16_t pmin;
1157 int fc_adv = 0;
1158 int ret = CLASSQEQ_SUCCESS;
1159 u_int32_t maxqsize = 0;
1160
1161 s = sp->sfb_current;
1162 VERIFY((s + (s ^ 1)) == 1);
1163
1164 /* See comments in <rdar://problem/14040693> */
1165 VERIFY(!(pkt->pkt_flags & PKTF_PRIV_GUARDED));
1166 pkt->pkt_flags |= PKTF_PRIV_GUARDED;
1167
1168 if (pkt->pkt_enqueue_ts > 0) {
1169 net_nsectimer(&pkt->pkt_enqueue_ts, &now);
1170 } else {
1171 nanouptime(&now);
1172 net_timernsec(&now, &pkt->pkt_enqueue_ts);
1173 }
1174
1175 /* time to swap the bins? */
1176 if (net_timercmp(&now, &sp->sfb_nextreset, >=)) {
1177 net_timeradd(&now, &sp->sfb_hinterval, &sp->sfb_nextreset);
1178 sfb_swap_bins(sp, qlen(q));
1179 s = sp->sfb_current;
1180 VERIFY((s + (s ^ 1)) == 1);
1181 }
1182
1183 if (!net_timerisset(&sp->sfb_update_time)) {
1184 net_timeradd(&now, &sp->sfb_update_interval,
1185 &sp->sfb_update_time);
1186 }
1187
1188 /*
1189 * If getq time is not set because this is the first packet
1190 * or after idle time, set it now so that we can detect a stall.
1191 */
1192 if (qsize(q) == 0 && !net_timerisset(&sp->sfb_getqtime))
1193 *(&sp->sfb_getqtime) = *(&now);
1194
1195 pkt->pkt_sfb_flags = 0;
1196 pkt->pkt_sfb_hash16[s] =
1197 (SFB_HASH(&pkt->pkt_flowid, sizeof (pkt->pkt_flowid),
1198 (*sp->sfb_bins)[s].fudge) & SFB_HASHMASK);
1199 pkt->pkt_sfb_hash16[s ^ 1] =
1200 (SFB_HASH(&pkt->pkt_flowid, sizeof (pkt->pkt_flowid),
1201 (*sp->sfb_bins)[s ^ 1].fudge) & SFB_HASHMASK);
1202
1203 /* check if the queue has been stalled */
1204 sfb_detect_dequeue_stall(sp, q, &now);
1205
1206 /* see if we drop early */
1207 droptype = DTYPE_NODROP;
1208 if (sfb_drop_early(sp, pkt, &pmin, &now)) {
1209 /* flow control, mark or drop by sfb */
1210 if ((sp->sfb_flags & SFBF_FLOWCTL) &&
1211 (pkt->pkt_flags & PKTF_FLOW_ADV)) {
1212 fc_adv = 1;
1213 /* drop all during suspension or for non-TCP */
1214 if ((sp->sfb_flags & SFBF_SUSPENDED) ||
1215 pkt->pkt_proto != IPPROTO_TCP) {
1216 droptype = DTYPE_EARLY;
1217 sp->sfb_stats.drop_early++;
1218 }
1219 }
1220 #if PF_ECN
1221 else if ((sp->sfb_flags & SFBF_ECN) &&
1222 (pkt->pkt_proto == IPPROTO_TCP) && /* only for TCP */
1223 ((sfb_random(sp) & SFB_MAX_PMARK) <= pmin) &&
1224 mark_ecn(m, t, sp->sfb_flags) &&
1225 !(sp->sfb_flags & SFBF_SUSPENDED)) {
1226 /* successfully marked; do not drop. */
1227 sp->sfb_stats.marked_packets++;
1228 }
1229 #endif /* PF_ECN */
1230 else {
1231 /* unforced drop by sfb */
1232 droptype = DTYPE_EARLY;
1233 sp->sfb_stats.drop_early++;
1234 }
1235 }
1236
1237 /* non-responsive flow penalty? */
1238 if (droptype == DTYPE_NODROP && sfb_penalize(sp, pkt, &now)) {
1239 droptype = DTYPE_FORCED;
1240 sp->sfb_stats.drop_pbox++;
1241 }
1242
1243 if (SFB_QUEUE_DELAYBASED(sp))
1244 maxqsize = SFB_QUEUE_DELAYBASED_MAXSIZE;
1245 else
1246 maxqsize = qlimit(q);
1247
1248 /*
1249 * When the queue length hits the queue limit, make it a forced
1250 * drop
1251 */
1252 if (droptype == DTYPE_NODROP && qlen(q) >= maxqsize) {
1253 if (pkt->pkt_proto == IPPROTO_TCP &&
1254 ((pkt->pkt_flags & PKTF_TCP_REXMT) ||
1255 (sp->sfb_flags & SFBF_LAST_PKT_DROPPED))) {
1256 /*
1257 * At some level, dropping packets will make the
1258 * flows backoff and will keep memory requirements
1259 * under control. But we should not cause a tail
1260 * drop because it can take a long time for a
1261 * TCP flow to recover. We should try to drop
1262 * alternate packets instead.
1263 */
1264 sp->sfb_flags &= ~SFBF_LAST_PKT_DROPPED;
1265 } else {
1266 droptype = DTYPE_FORCED;
1267 sp->sfb_stats.drop_queue++;
1268 sp->sfb_flags |= SFBF_LAST_PKT_DROPPED;
1269 }
1270 }
1271
1272 if (fc_adv == 1 && droptype != DTYPE_FORCED &&
1273 sfb_bin_addfcentry(sp, pkt)) {
1274 /* deliver flow control advisory error */
1275 if (droptype == DTYPE_NODROP) {
1276 ret = CLASSQEQ_SUCCESS_FC;
1277 VERIFY(!(sp->sfb_flags & SFBF_SUSPENDED));
1278 } else if (sp->sfb_flags & SFBF_SUSPENDED) {
1279 /* dropped due to suspension */
1280 ret = CLASSQEQ_DROPPED_SP;
1281 } else {
1282 /* dropped due to flow-control */
1283 ret = CLASSQEQ_DROPPED_FC;
1284 }
1285 }
1286 /* if successful enqueue this packet, else drop it */
1287 if (droptype == DTYPE_NODROP) {
1288 _addq(q, m);
1289 } else {
1290 IFCQ_CONVERT_LOCK(&sp->sfb_ifp->if_snd);
1291 m_freem(m);
1292 return ((ret != CLASSQEQ_SUCCESS) ? ret : CLASSQEQ_DROPPED);
1293 }
1294
1295 if (!(pkt->pkt_sfb_flags & SFB_PKT_PBOX))
1296 sfb_eq_update_bins(sp, pkt);
1297 else
1298 sp->sfb_stats.pbox_packets++;
1299
1300 /* successfully queued */
1301 return (ret);
1302 }
1303
1304 static struct mbuf *
1305 sfb_getq_flow(struct sfb *sp, class_queue_t *q, u_int32_t flow, boolean_t purge)
1306 {
1307 struct timespec now;
1308 struct mbuf *m;
1309 struct pkthdr *pkt;
1310
1311 if (!purge && (sp->sfb_flags & SFBF_SUSPENDED))
1312 return (NULL);
1313
1314 nanouptime(&now);
1315
1316 /* flow of 0 means head of queue */
1317 if ((m = ((flow == 0) ? _getq(q) : _getq_flow(q, flow))) == NULL) {
1318 if (!purge)
1319 net_timerclear(&sp->sfb_getqtime);
1320 return (NULL);
1321 }
1322
1323 VERIFY(m->m_flags & M_PKTHDR);
1324
1325 pkt = &m->m_pkthdr;
1326 VERIFY(pkt->pkt_flags & PKTF_PRIV_GUARDED);
1327
1328 if (!purge) {
1329 /* calculate EWMA of dequeues */
1330 if (net_timerisset(&sp->sfb_getqtime)) {
1331 struct timespec delta;
1332 u_int64_t avg, new;
1333 net_timersub(&now, &sp->sfb_getqtime, &delta);
1334 net_timernsec(&delta, &new);
1335 avg = sp->sfb_stats.dequeue_avg;
1336 if (avg > 0) {
1337 int decay = DEQUEUE_DECAY;
1338 /*
1339 * If the time since last dequeue is
1340 * significantly greater than the current
1341 * average, weigh the average more against
1342 * the old value.
1343 */
1344 if (DEQUEUE_SPIKE(new, avg))
1345 decay += 5;
1346 avg = (((avg << decay) - avg) + new) >> decay;
1347 } else {
1348 avg = new;
1349 }
1350 sp->sfb_stats.dequeue_avg = avg;
1351 }
1352 *(&sp->sfb_getqtime) = *(&now);
1353 }
1354
1355 if (!purge && SFB_QUEUE_DELAYBASED(sp)) {
1356 u_int64_t dequeue_ns, queue_delay = 0;
1357 net_timernsec(&now, &dequeue_ns);
1358 if (dequeue_ns > pkt->pkt_enqueue_ts)
1359 queue_delay = dequeue_ns - pkt->pkt_enqueue_ts;
1360
1361 if (sp->sfb_min_qdelay == 0 ||
1362 (queue_delay > 0 && queue_delay < sp->sfb_min_qdelay))
1363 sp->sfb_min_qdelay = queue_delay;
1364 if (net_timercmp(&now, &sp->sfb_update_time, >=)) {
1365 if (sp->sfb_min_qdelay > sp->sfb_target_qdelay) {
1366 if (!SFB_IS_DELAYHIGH(sp))
1367 SFB_SET_DELAY_HIGH(sp, q);
1368 } else {
1369 sp->sfb_flags &= ~(SFBF_DELAYHIGH);
1370 sp->sfb_fc_threshold = 0;
1371
1372 }
1373 net_timeradd(&now, &sp->sfb_update_interval,
1374 &sp->sfb_update_time);
1375 sp->sfb_min_qdelay = 0;
1376 }
1377 }
1378 pkt->pkt_enqueue_ts = 0;
1379
1380 /*
1381 * Clearpkts are the ones which were in the queue when the hash
1382 * function was perturbed. Since the perturbation value (fudge),
1383 * and thus bin information for these packets is not known, we do
1384 * not change accounting information while dequeuing these packets.
1385 * It is important not to set the hash interval too small due to
1386 * this reason. A rule of thumb is to set it to K*D, where D is
1387 * the time taken to drain queue.
1388 */
1389 if (pkt->pkt_sfb_flags & SFB_PKT_PBOX) {
1390 pkt->pkt_sfb_flags &= ~SFB_PKT_PBOX;
1391 if (sp->sfb_clearpkts > 0)
1392 sp->sfb_clearpkts--;
1393 } else if (sp->sfb_clearpkts > 0) {
1394 sp->sfb_clearpkts--;
1395 } else {
1396 sfb_dq_update_bins(sp, pkt, &now, qsize(q));
1397 }
1398
1399 /* See comments in <rdar://problem/14040693> */
1400 pkt->pkt_flags &= ~PKTF_PRIV_GUARDED;
1401
1402 /*
1403 * If the queue becomes empty before the update interval, reset
1404 * the flow control threshold
1405 */
1406 if (qsize(q) == 0) {
1407 sp->sfb_flags &= ~SFBF_DELAYHIGH;
1408 sp->sfb_min_qdelay = 0;
1409 sp->sfb_fc_threshold = 0;
1410 net_timerclear(&sp->sfb_update_time);
1411 net_timerclear(&sp->sfb_getqtime);
1412 }
1413
1414 return (m);
1415 }
1416
1417 struct mbuf *
1418 sfb_getq(struct sfb *sp, class_queue_t *q)
1419 {
1420 return (sfb_getq_flow(sp, q, 0, FALSE));
1421 }
1422
1423 void
1424 sfb_purgeq(struct sfb *sp, class_queue_t *q, u_int32_t flow, u_int32_t *packets,
1425 u_int32_t *bytes)
1426 {
1427 u_int32_t cnt = 0, len = 0;
1428 struct mbuf *m;
1429
1430 IFCQ_CONVERT_LOCK(&sp->sfb_ifp->if_snd);
1431
1432 while ((m = sfb_getq_flow(sp, q, flow, TRUE)) != NULL) {
1433 cnt++;
1434 len += m_pktlen(m);
1435 m_freem(m);
1436 }
1437
1438 if (packets != NULL)
1439 *packets = cnt;
1440 if (bytes != NULL)
1441 *bytes = len;
1442 }
1443
1444 void
1445 sfb_updateq(struct sfb *sp, cqev_t ev)
1446 {
1447 struct ifnet *ifp = sp->sfb_ifp;
1448
1449 VERIFY(ifp != NULL);
1450
1451 switch (ev) {
1452 case CLASSQ_EV_LINK_BANDWIDTH: {
1453 u_int64_t eff_rate = ifnet_output_linkrate(ifp);
1454
1455 /* update parameters only if rate has changed */
1456 if (eff_rate == sp->sfb_eff_rate)
1457 break;
1458
1459 if (classq_verbose) {
1460 log(LOG_DEBUG, "%s: SFB qid=%d, adapting to new "
1461 "eff_rate=%llu bps\n", if_name(ifp), sp->sfb_qid,
1462 eff_rate);
1463 }
1464 sfb_calc_holdtime(sp, eff_rate);
1465 sfb_calc_pboxtime(sp, eff_rate);
1466 sfb_calc_target_qdelay(sp, eff_rate);
1467 sfb_calc_update_interval(sp, eff_rate);
1468 break;
1469 }
1470
1471 case CLASSQ_EV_LINK_UP:
1472 case CLASSQ_EV_LINK_DOWN:
1473 if (classq_verbose) {
1474 log(LOG_DEBUG, "%s: SFB qid=%d, resetting due to "
1475 "link %s\n", if_name(ifp), sp->sfb_qid,
1476 (ev == CLASSQ_EV_LINK_UP) ? "UP" : "DOWN");
1477 }
1478 sfb_resetq(sp, ev);
1479 break;
1480
1481 case CLASSQ_EV_LINK_LATENCY:
1482 case CLASSQ_EV_LINK_MTU:
1483 default:
1484 break;
1485 }
1486 }
1487
1488 int
1489 sfb_suspendq(struct sfb *sp, class_queue_t *q, boolean_t on)
1490 {
1491 #pragma unused(q)
1492 struct ifnet *ifp = sp->sfb_ifp;
1493
1494 VERIFY(ifp != NULL);
1495
1496 if ((on && (sp->sfb_flags & SFBF_SUSPENDED)) ||
1497 (!on && !(sp->sfb_flags & SFBF_SUSPENDED)))
1498 return (0);
1499
1500 if (!(sp->sfb_flags & SFBF_FLOWCTL)) {
1501 log(LOG_ERR, "%s: SFB qid=%d, unable to %s queue since "
1502 "flow-control is not enabled", if_name(ifp), sp->sfb_qid,
1503 (on ? "suspend" : "resume"));
1504 return (ENOTSUP);
1505 }
1506
1507 if (classq_verbose) {
1508 log(LOG_DEBUG, "%s: SFB qid=%d, setting state to %s",
1509 if_name(ifp), sp->sfb_qid, (on ? "SUSPENDED" : "RUNNING"));
1510 }
1511
1512 if (on) {
1513 sp->sfb_flags |= SFBF_SUSPENDED;
1514 } else {
1515 sp->sfb_flags &= ~SFBF_SUSPENDED;
1516 sfb_swap_bins(sp, qlen(q));
1517 }
1518
1519 return (0);
1520 }