]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/classq/classq_sfb.c
73f0ca03d89f9d195448b1944d8fb63fb8d8c26f
[apple/xnu.git] / bsd / net / classq / classq_sfb.c
1 /*
2 * Copyright (c) 2011-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/mbuf.h>
32 #include <sys/socket.h>
33 #include <sys/sockio.h>
34 #include <sys/systm.h>
35 #include <sys/sysctl.h>
36 #include <sys/syslog.h>
37 #include <sys/proc.h>
38 #include <sys/errno.h>
39 #include <sys/kernel.h>
40 #include <sys/kauth.h>
41
42 #include <kern/zalloc.h>
43
44 #include <net/if.h>
45 #include <net/if_var.h>
46 #include <net/if_types.h>
47 #include <net/dlil.h>
48 #include <net/flowadv.h>
49
50 #include <netinet/in.h>
51 #include <netinet/in_systm.h>
52 #include <netinet/ip.h>
53 #if INET6
54 #include <netinet/ip6.h>
55 #endif
56
57 #include <net/classq/classq_sfb.h>
58 #include <net/flowhash.h>
59 #include <net/net_osdep.h>
60 #include <dev/random/randomdev.h>
61
62 /*
63 * Stochastic Fair Blue
64 *
65 * Wu-chang Feng, Dilip D. Kandlur, Debanjan Saha, Kang G. Shin
66 * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
67 *
68 * Based on the NS code with the following parameters:
69 *
70 * bytes: false
71 * decrement: 0.001
72 * increment: 0.005
73 * hold-time: 10ms-50ms (randomized)
74 * algorithm: 0
75 * pbox: 1
76 * pbox-time: 50-100ms (randomized)
77 * hinterval: 11-23 (randomized)
78 *
79 * This implementation uses L = 2 and N = 32 for 2 sets of:
80 *
81 * B[L][N]: L x N array of bins (L levels, N bins per level)
82 *
83 * Each set effectively creates 32^2 virtual buckets (bin combinations)
84 * while using only O(32*2) states.
85 *
86 * Given a 32-bit hash value, we divide it such that octets [0,1,2,3] are
87 * used as index for the bins across the 2 levels, where level 1 uses [0,2]
88 * and level 2 uses [1,3]. The 2 values per level correspond to the indices
89 * for the current and warm-up sets (section 4.4. in the SFB paper regarding
90 * Moving Hash Functions explains the purposes of these 2 sets.)
91 */
92
93 /*
94 * Use Murmur3A_x86_32 for hash function. It seems to perform consistently
95 * across platforms for 1-word key (32-bit flowhash value). See flowhash.h
96 * for other alternatives. We only need 16-bit hash output.
97 */
98 #define SFB_HASH net_flowhash_mh3_x86_32
99 #define SFB_HASHMASK HASHMASK(16)
100
101 #define SFB_BINMASK(_x) \
102 ((_x) & HASHMASK(SFB_BINS_SHIFT))
103
104 #define SFB_BINST(_sp, _l, _n, _c) \
105 (&(*(_sp)->sfb_bins)[_c].stats[_l][_n])
106
107 #define SFB_BINFT(_sp, _l, _n, _c) \
108 (&(*(_sp)->sfb_bins)[_c].freezetime[_l][_n])
109
110 #define SFB_FC_LIST(_sp, _n) \
111 (&(*(_sp)->sfb_fc_lists)[_n])
112
113 /*
114 * The holdtime parameter determines the minimum time interval between
115 * two successive updates of the marking probability. In the event the
116 * uplink speed is not known, a default value is chosen and is randomized
117 * to be within the following range.
118 */
119 #define HOLDTIME_BASE (100ULL * 1000 * 1000) /* 100ms */
120 #define HOLDTIME_MIN (10ULL * 1000 * 1000) /* 10ms */
121 #define HOLDTIME_MAX (100ULL * 1000 * 1000) /* 100ms */
122
123 /*
124 * The pboxtime parameter determines the bandwidth allocated for rogue
125 * flows, i.e. the rate limiting bandwidth. In the event the uplink speed
126 * is not known, a default value is chosen and is randomized to be within
127 * the following range.
128 */
129 #define PBOXTIME_BASE (300ULL * 1000 * 1000) /* 300ms */
130 #define PBOXTIME_MIN (30ULL * 1000 * 1000) /* 30ms */
131 #define PBOXTIME_MAX (300ULL * 1000 * 1000) /* 300ms */
132
133 /*
134 * Target queueing delay is the amount of extra delay that can be added
135 * to accommodate variations in the link bandwidth. The queue should be
136 * large enough to induce this much delay and nothing more than that.
137 */
138 #define TARGET_QDELAY_BASE (10ULL * 1000 * 1000) /* 10ms */
139 #define TARGET_QDELAY_MIN (10ULL * 1000) /* 10us */
140 #define TARGET_QDELAY_MAX (20ULL * 1000 * 1000 * 1000) /* 20s */
141
142 /*
143 * Update interval for checking the extra delay added by the queue. This
144 * should be 90-95 percentile of RTT experienced by any TCP connection
145 * so that it will take care of the burst traffic.
146 */
147 #define UPDATE_INTERVAL_BASE (100ULL * 1000 * 1000) /* 100ms */
148 #define UPDATE_INTERVAL_MIN (100ULL * 1000 * 1000) /* 100ms */
149 #define UPDATE_INTERVAL_MAX (10ULL * 1000 * 1000 * 1000) /* 10s */
150
151 #define SFB_RANDOM(sp, tmin, tmax) ((sfb_random(sp) % (tmax)) + (tmin))
152
153 #define SFB_PKT_PBOX 0x1 /* in penalty box */
154
155 /* The following mantissa values are in SFB_FP_SHIFT Q format */
156 #define SFB_MAX_PMARK (1 << SFB_FP_SHIFT) /* Q14 representation of 1.00 */
157
158 /*
159 * These are d1 (increment) and d2 (decrement) parameters, used to determine
160 * the amount by which the marking probability is incremented when the queue
161 * overflows, or is decremented when the link is idle. d1 is set higher than
162 * d2, because link underutilization can occur when congestion management is
163 * either too conservative or too aggressive, but packet loss occurs only
164 * when congestion management is too conservative. By weighing heavily
165 * against packet loss, it can quickly reach to a substantial increase in
166 * traffic load.
167 */
168 #define SFB_INCREMENT 82 /* Q14 representation of 0.005 */
169 #define SFB_DECREMENT 16 /* Q14 representation of 0.001 */
170
171 #define SFB_PMARK_TH 16056 /* Q14 representation of 0.98 */
172 #define SFB_PMARK_WARM 3276 /* Q14 representation of 0.2 */
173
174 #define SFB_PMARK_INC(_bin) do { \
175 (_bin)->pmark += sfb_increment; \
176 if ((_bin)->pmark > SFB_MAX_PMARK) \
177 (_bin)->pmark = SFB_MAX_PMARK; \
178 } while (0)
179
180 #define SFB_PMARK_DEC(_bin) do { \
181 if ((_bin)->pmark > 0) { \
182 (_bin)->pmark -= sfb_decrement; \
183 if ((_bin)->pmark < 0) \
184 (_bin)->pmark = 0; \
185 } \
186 } while (0)
187
188 /* Minimum nuber of bytes in queue to get flow controlled */
189 #define SFB_MIN_FC_THRESHOLD_BYTES 7500
190
191 #define SFB_SET_DELAY_HIGH(_sp_, _q_) do { \
192 (_sp_)->sfb_flags |= SFBF_DELAYHIGH; \
193 (_sp_)->sfb_fc_threshold = max(SFB_MIN_FC_THRESHOLD_BYTES, \
194 (qsize((_q_)) >> 3)); \
195 } while (0)
196
197 #define SFB_QUEUE_DELAYBASED(_sp_) ((_sp_)->sfb_flags & SFBF_DELAYBASED)
198 #define SFB_IS_DELAYHIGH(_sp_) ((_sp_)->sfb_flags & SFBF_DELAYHIGH)
199 #define SFB_QUEUE_DELAYBASED_MAXSIZE 2048 /* max pkts */
200
201 #define HINTERVAL_MIN (10) /* 10 seconds */
202 #define HINTERVAL_MAX (20) /* 20 seconds */
203 #define SFB_HINTERVAL(sp) ((sfb_random(sp) % HINTERVAL_MAX) + HINTERVAL_MIN)
204
205 #define DEQUEUE_DECAY 7 /* ilog2 of EWMA decay rate, (128) */
206 #define DEQUEUE_SPIKE(_new, _old) \
207 ((u_int64_t)ABS((int64_t)(_new) - (int64_t)(_old)) > ((_old) << 11))
208
209 #define ABS(v) (((v) > 0) ? (v) : -(v))
210
211 #define SFB_ZONE_MAX 32 /* maximum elements in zone */
212 #define SFB_ZONE_NAME "classq_sfb" /* zone name */
213
214 #define SFB_BINS_ZONE_MAX 32 /* maximum elements in zone */
215 #define SFB_BINS_ZONE_NAME "classq_sfb_bins" /* zone name */
216
217 #define SFB_FCL_ZONE_MAX 32 /* maximum elements in zone */
218 #define SFB_FCL_ZONE_NAME "classq_sfb_fcl" /* zone name */
219
220 /* Place the flow control entries in current bin on level 0 */
221 #define SFB_FC_LEVEL 0
222
223 static unsigned int sfb_size; /* size of zone element */
224 static struct zone *sfb_zone; /* zone for sfb */
225
226 static unsigned int sfb_bins_size; /* size of zone element */
227 static struct zone *sfb_bins_zone; /* zone for sfb_bins */
228
229 static unsigned int sfb_fcl_size; /* size of zone element */
230 static struct zone *sfb_fcl_zone; /* zone for sfb_fc_lists */
231
232 /* internal function prototypes */
233 static u_int32_t sfb_random(struct sfb *);
234 static void *sfb_getq_flow(struct sfb *, class_queue_t *, u_int32_t, boolean_t,
235 pktsched_pkt_t *);
236 static void sfb_resetq(struct sfb *, cqev_t);
237 static void sfb_calc_holdtime(struct sfb *, u_int64_t);
238 static void sfb_calc_pboxtime(struct sfb *, u_int64_t);
239 static void sfb_calc_hinterval(struct sfb *, u_int64_t *);
240 static void sfb_calc_update_interval(struct sfb *, u_int64_t);
241 static void sfb_swap_bins(struct sfb *, u_int32_t);
242 static inline int sfb_pcheck(struct sfb *, uint32_t);
243 static int sfb_penalize(struct sfb *, uint32_t, uint32_t *, struct timespec *);
244 static void sfb_adjust_bin(struct sfb *, struct sfbbinstats *,
245 struct timespec *, struct timespec *, boolean_t);
246 static void sfb_decrement_bin(struct sfb *, struct sfbbinstats *,
247 struct timespec *, struct timespec *);
248 static void sfb_increment_bin(struct sfb *, struct sfbbinstats *,
249 struct timespec *, struct timespec *);
250 static inline void sfb_dq_update_bins(struct sfb *, uint32_t, uint32_t,
251 struct timespec *, u_int32_t qsize);
252 static inline void sfb_eq_update_bins(struct sfb *, uint32_t, uint32_t);
253 static int sfb_drop_early(struct sfb *, uint32_t, u_int16_t *,
254 struct timespec *);
255 static boolean_t sfb_bin_addfcentry(struct sfb *, pktsched_pkt_t *,
256 uint32_t, uint8_t, uint32_t);
257 static void sfb_fclist_append(struct sfb *, struct sfb_fcl *);
258 static void sfb_fclists_clean(struct sfb *sp);
259 static int sfb_bin_mark_or_drop(struct sfb *sp, struct sfbbinstats *bin);
260 static void sfb_detect_dequeue_stall(struct sfb *sp, class_queue_t *,
261 struct timespec *);
262
263 SYSCTL_NODE(_net_classq, OID_AUTO, sfb, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "SFB");
264
265 static u_int64_t sfb_holdtime = 0; /* 0 indicates "automatic" */
266 SYSCTL_QUAD(_net_classq_sfb, OID_AUTO, holdtime, CTLFLAG_RW | CTLFLAG_LOCKED,
267 &sfb_holdtime, "SFB freeze time in nanoseconds");
268
269 static u_int64_t sfb_pboxtime = 0; /* 0 indicates "automatic" */
270 SYSCTL_QUAD(_net_classq_sfb, OID_AUTO, pboxtime, CTLFLAG_RW | CTLFLAG_LOCKED,
271 &sfb_pboxtime, "SFB penalty box time in nanoseconds");
272
273 static u_int64_t sfb_hinterval;
274 SYSCTL_QUAD(_net_classq_sfb, OID_AUTO, hinterval, CTLFLAG_RW | CTLFLAG_LOCKED,
275 &sfb_hinterval, "SFB hash interval in nanoseconds");
276
277 static u_int32_t sfb_increment = SFB_INCREMENT;
278 SYSCTL_UINT(_net_classq_sfb, OID_AUTO, increment, CTLFLAG_RW | CTLFLAG_LOCKED,
279 &sfb_increment, SFB_INCREMENT, "SFB increment [d1]");
280
281 static u_int32_t sfb_decrement = SFB_DECREMENT;
282 SYSCTL_UINT(_net_classq_sfb, OID_AUTO, decrement, CTLFLAG_RW | CTLFLAG_LOCKED,
283 &sfb_decrement, SFB_DECREMENT, "SFB decrement [d2]");
284
285 static u_int32_t sfb_allocation = 0; /* 0 means "automatic" */
286 SYSCTL_UINT(_net_classq_sfb, OID_AUTO, allocation, CTLFLAG_RW | CTLFLAG_LOCKED,
287 &sfb_allocation, 0, "SFB bin allocation");
288
289 static u_int32_t sfb_ratelimit = 0;
290 SYSCTL_UINT(_net_classq_sfb, OID_AUTO, ratelimit, CTLFLAG_RW | CTLFLAG_LOCKED,
291 &sfb_ratelimit, 0, "SFB rate limit");
292
293 #define KBPS (1ULL * 1000) /* 1 Kbits per second */
294 #define MBPS (1ULL * 1000 * 1000) /* 1 Mbits per second */
295 #define GBPS (MBPS * 1000) /* 1 Gbits per second */
296
297 struct sfb_time_tbl {
298 u_int64_t speed; /* uplink speed */
299 u_int64_t holdtime; /* hold time */
300 u_int64_t pboxtime; /* penalty box time */
301 };
302
303 static struct sfb_time_tbl sfb_ttbl[] = {
304 { 1 * MBPS, HOLDTIME_BASE * 1000, PBOXTIME_BASE * 1000 },
305 { 10 * MBPS, HOLDTIME_BASE * 100, PBOXTIME_BASE * 100 },
306 { 100 * MBPS, HOLDTIME_BASE * 10, PBOXTIME_BASE * 10 },
307 { 1 * GBPS, HOLDTIME_BASE, PBOXTIME_BASE },
308 { 10 * GBPS, HOLDTIME_BASE / 10, PBOXTIME_BASE / 10 },
309 { 100 * GBPS, HOLDTIME_BASE / 100, PBOXTIME_BASE / 100 },
310 { 0, 0, 0 }
311 };
312
313 void
314 sfb_init(void)
315 {
316 _CASSERT(SFBF_ECN4 == CLASSQF_ECN4);
317 _CASSERT(SFBF_ECN6 == CLASSQF_ECN6);
318
319 sfb_size = sizeof(struct sfb);
320 sfb_zone = zinit(sfb_size, SFB_ZONE_MAX * sfb_size,
321 0, SFB_ZONE_NAME);
322 if (sfb_zone == NULL) {
323 panic("%s: failed allocating %s", __func__, SFB_ZONE_NAME);
324 /* NOTREACHED */
325 }
326 zone_change(sfb_zone, Z_EXPAND, TRUE);
327 zone_change(sfb_zone, Z_CALLERACCT, TRUE);
328
329 sfb_bins_size = sizeof(*((struct sfb *)0)->sfb_bins);
330 sfb_bins_zone = zinit(sfb_bins_size, SFB_BINS_ZONE_MAX * sfb_bins_size,
331 0, SFB_BINS_ZONE_NAME);
332 if (sfb_bins_zone == NULL) {
333 panic("%s: failed allocating %s", __func__, SFB_BINS_ZONE_NAME);
334 /* NOTREACHED */
335 }
336 zone_change(sfb_bins_zone, Z_EXPAND, TRUE);
337 zone_change(sfb_bins_zone, Z_CALLERACCT, TRUE);
338
339 sfb_fcl_size = sizeof(*((struct sfb *)0)->sfb_fc_lists);
340 sfb_fcl_zone = zinit(sfb_fcl_size, SFB_FCL_ZONE_MAX * sfb_fcl_size,
341 0, SFB_FCL_ZONE_NAME);
342 if (sfb_fcl_zone == NULL) {
343 panic("%s: failed allocating %s", __func__, SFB_FCL_ZONE_NAME);
344 /* NOTREACHED */
345 }
346 zone_change(sfb_fcl_zone, Z_EXPAND, TRUE);
347 zone_change(sfb_fcl_zone, Z_CALLERACCT, TRUE);
348 }
349
350 static u_int32_t
351 sfb_random(struct sfb *sp)
352 {
353 IFCQ_CONVERT_LOCK(&sp->sfb_ifp->if_snd);
354 return RandomULong();
355 }
356
357 static void
358 sfb_calc_holdtime(struct sfb *sp, u_int64_t outbw)
359 {
360 u_int64_t holdtime;
361
362 if (sfb_holdtime != 0) {
363 holdtime = sfb_holdtime;
364 } else if (outbw == 0) {
365 holdtime = SFB_RANDOM(sp, HOLDTIME_MIN, HOLDTIME_MAX);
366 } else {
367 unsigned int n, i;
368
369 n = sfb_ttbl[0].holdtime;
370 for (i = 0; sfb_ttbl[i].speed != 0; i++) {
371 if (outbw < sfb_ttbl[i].speed) {
372 break;
373 }
374 n = sfb_ttbl[i].holdtime;
375 }
376 holdtime = n;
377 }
378 net_nsectimer(&holdtime, &sp->sfb_holdtime);
379 }
380
381 static void
382 sfb_calc_pboxtime(struct sfb *sp, u_int64_t outbw)
383 {
384 u_int64_t pboxtime;
385
386 if (sfb_pboxtime != 0) {
387 pboxtime = sfb_pboxtime;
388 } else if (outbw == 0) {
389 pboxtime = SFB_RANDOM(sp, PBOXTIME_MIN, PBOXTIME_MAX);
390 } else {
391 unsigned int n, i;
392
393 n = sfb_ttbl[0].pboxtime;
394 for (i = 0; sfb_ttbl[i].speed != 0; i++) {
395 if (outbw < sfb_ttbl[i].speed) {
396 break;
397 }
398 n = sfb_ttbl[i].pboxtime;
399 }
400 pboxtime = n;
401 }
402 net_nsectimer(&pboxtime, &sp->sfb_pboxtime);
403 net_timerclear(&sp->sfb_pboxfreeze);
404 }
405
406 static void
407 sfb_calc_hinterval(struct sfb *sp, u_int64_t *t)
408 {
409 u_int64_t hinterval = 0;
410 struct timespec now;
411
412 if (t != NULL) {
413 /*
414 * TODO adi@apple.com: use dq_avg to derive hinterval.
415 */
416 hinterval = *t;
417 }
418
419 if (sfb_hinterval != 0) {
420 hinterval = sfb_hinterval;
421 } else if (t == NULL || hinterval == 0) {
422 hinterval = ((u_int64_t)SFB_HINTERVAL(sp) * NSEC_PER_SEC);
423 }
424
425 net_nsectimer(&hinterval, &sp->sfb_hinterval);
426
427 nanouptime(&now);
428 net_timeradd(&now, &sp->sfb_hinterval, &sp->sfb_nextreset);
429 }
430
431 static void
432 sfb_calc_update_interval(struct sfb *sp, u_int64_t out_bw)
433 {
434 #pragma unused(out_bw)
435 u_int64_t update_interval = 0;
436 ifclassq_calc_update_interval(&update_interval);
437 net_nsectimer(&update_interval, &sp->sfb_update_interval);
438 }
439
440 /*
441 * sfb support routines
442 */
443 struct sfb *
444 sfb_alloc(struct ifnet *ifp, u_int32_t qid, u_int32_t qlim, u_int32_t flags)
445 {
446 struct sfb *sp;
447 int i;
448
449 VERIFY(ifp != NULL && qlim > 0);
450
451 sp = zalloc(sfb_zone);
452 if (sp == NULL) {
453 log(LOG_ERR, "%s: SFB unable to allocate\n", if_name(ifp));
454 return NULL;
455 }
456 bzero(sp, sfb_size);
457
458 if ((sp->sfb_bins = zalloc(sfb_bins_zone)) == NULL) {
459 log(LOG_ERR, "%s: SFB unable to allocate bins\n", if_name(ifp));
460 sfb_destroy(sp);
461 return NULL;
462 }
463 bzero(sp->sfb_bins, sfb_bins_size);
464
465 if ((sp->sfb_fc_lists = zalloc(sfb_fcl_zone)) == NULL) {
466 log(LOG_ERR, "%s: SFB unable to allocate flow control lists\n",
467 if_name(ifp));
468 sfb_destroy(sp);
469 return NULL;
470 }
471 bzero(sp->sfb_fc_lists, sfb_fcl_size);
472
473 for (i = 0; i < SFB_BINS; ++i) {
474 STAILQ_INIT(&SFB_FC_LIST(sp, i)->fclist);
475 }
476
477 sp->sfb_ifp = ifp;
478 sp->sfb_qlim = qlim;
479 sp->sfb_qid = qid;
480 sp->sfb_flags = (flags & SFBF_USERFLAGS);
481 #if !PF_ECN
482 if (sp->sfb_flags & SFBF_ECN) {
483 sp->sfb_flags &= ~SFBF_ECN;
484 log(LOG_ERR, "%s: SFB qid=%d, ECN not available; ignoring "
485 "SFBF_ECN flag!\n", if_name(ifp), sp->sfb_qid);
486 }
487 #endif /* !PF_ECN */
488
489 sfb_resetq(sp, CLASSQ_EV_INIT);
490
491 return sp;
492 }
493
494 static void
495 sfb_fclist_append(struct sfb *sp, struct sfb_fcl *fcl)
496 {
497 IFCQ_CONVERT_LOCK(&sp->sfb_ifp->if_snd);
498 VERIFY(STAILQ_EMPTY(&fcl->fclist) || fcl->cnt > 0);
499 sp->sfb_stats.flow_feedback += fcl->cnt;
500 fcl->cnt = 0;
501
502 flowadv_add(&fcl->fclist);
503 VERIFY(fcl->cnt == 0 && STAILQ_EMPTY(&fcl->fclist));
504 }
505
506 static void
507 sfb_fclists_clean(struct sfb *sp)
508 {
509 int i;
510
511 /* Move all the flow control entries to the flowadv list */
512 for (i = 0; i < SFB_BINS; ++i) {
513 struct sfb_fcl *fcl = SFB_FC_LIST(sp, i);
514 if (!STAILQ_EMPTY(&fcl->fclist)) {
515 sfb_fclist_append(sp, fcl);
516 }
517 }
518 }
519
520 void
521 sfb_destroy(struct sfb *sp)
522 {
523 sfb_fclists_clean(sp);
524 if (sp->sfb_bins != NULL) {
525 zfree(sfb_bins_zone, sp->sfb_bins);
526 sp->sfb_bins = NULL;
527 }
528 if (sp->sfb_fc_lists != NULL) {
529 zfree(sfb_fcl_zone, sp->sfb_fc_lists);
530 sp->sfb_fc_lists = NULL;
531 }
532 zfree(sfb_zone, sp);
533 }
534
535 static void
536 sfb_resetq(struct sfb *sp, cqev_t ev)
537 {
538 struct ifnet *ifp = sp->sfb_ifp;
539 u_int64_t eff_rate;
540
541 VERIFY(ifp != NULL);
542
543 if (ev != CLASSQ_EV_LINK_DOWN) {
544 (*sp->sfb_bins)[0].fudge = sfb_random(sp);
545 (*sp->sfb_bins)[1].fudge = sfb_random(sp);
546 sp->sfb_allocation = ((sfb_allocation == 0) ?
547 (sp->sfb_qlim / 3) : sfb_allocation);
548 sp->sfb_drop_thresh = sp->sfb_allocation +
549 (sp->sfb_allocation >> 1);
550 }
551
552 sp->sfb_clearpkts = 0;
553 sp->sfb_current = 0;
554
555 eff_rate = ifnet_output_linkrate(ifp);
556 sp->sfb_eff_rate = eff_rate;
557
558 sfb_calc_holdtime(sp, eff_rate);
559 sfb_calc_pboxtime(sp, eff_rate);
560 sfb_calc_hinterval(sp, NULL);
561 ifclassq_calc_target_qdelay(ifp, &sp->sfb_target_qdelay);
562 sfb_calc_update_interval(sp, eff_rate);
563
564 if (ev == CLASSQ_EV_LINK_DOWN ||
565 ev == CLASSQ_EV_LINK_UP) {
566 sfb_fclists_clean(sp);
567 }
568
569 bzero(sp->sfb_bins, sizeof(*sp->sfb_bins));
570 bzero(&sp->sfb_stats, sizeof(sp->sfb_stats));
571
572 if (ev == CLASSQ_EV_LINK_DOWN || !classq_verbose) {
573 return;
574 }
575
576 log(LOG_DEBUG, "%s: SFB qid=%d, holdtime=%llu nsec, "
577 "pboxtime=%llu nsec, allocation=%d, drop_thresh=%d, "
578 "hinterval=%d sec, sfb_bins=%d bytes, eff_rate=%llu bps"
579 "target_qdelay= %llu nsec "
580 "update_interval=%llu sec %llu nsec flags=0x%x\n",
581 if_name(ifp), sp->sfb_qid, (u_int64_t)sp->sfb_holdtime.tv_nsec,
582 (u_int64_t)sp->sfb_pboxtime.tv_nsec,
583 (u_int32_t)sp->sfb_allocation, (u_int32_t)sp->sfb_drop_thresh,
584 (int)sp->sfb_hinterval.tv_sec, (int)sizeof(*sp->sfb_bins),
585 eff_rate, (u_int64_t)sp->sfb_target_qdelay,
586 (u_int64_t)sp->sfb_update_interval.tv_sec,
587 (u_int64_t)sp->sfb_update_interval.tv_nsec, sp->sfb_flags);
588 }
589
590 void
591 sfb_getstats(struct sfb *sp, struct sfb_stats *sps)
592 {
593 sps->allocation = sp->sfb_allocation;
594 sps->dropthresh = sp->sfb_drop_thresh;
595 sps->clearpkts = sp->sfb_clearpkts;
596 sps->current = sp->sfb_current;
597 sps->target_qdelay = sp->sfb_target_qdelay;
598 sps->min_estdelay = sp->sfb_min_qdelay;
599 sps->delay_fcthreshold = sp->sfb_fc_threshold;
600 sps->flags = sp->sfb_flags;
601
602 net_timernsec(&sp->sfb_holdtime, &sp->sfb_stats.hold_time);
603 net_timernsec(&sp->sfb_pboxtime, &sp->sfb_stats.pbox_time);
604 net_timernsec(&sp->sfb_hinterval, &sp->sfb_stats.rehash_intval);
605 net_timernsec(&sp->sfb_update_interval, &sps->update_interval);
606 *(&(sps->sfbstats)) = *(&(sp->sfb_stats));
607
608 _CASSERT(sizeof((*sp->sfb_bins)[0].stats) ==
609 sizeof(sps->binstats[0].stats));
610
611 bcopy(&(*sp->sfb_bins)[0].stats, &sps->binstats[0].stats,
612 sizeof(sps->binstats[0].stats));
613 bcopy(&(*sp->sfb_bins)[1].stats, &sps->binstats[1].stats,
614 sizeof(sps->binstats[1].stats));
615 }
616
617 static void
618 sfb_swap_bins(struct sfb *sp, u_int32_t len)
619 {
620 int i, j, s;
621
622 if (sp->sfb_flags & SFBF_SUSPENDED) {
623 return;
624 }
625
626 s = sp->sfb_current;
627 VERIFY((s + (s ^ 1)) == 1);
628
629 (*sp->sfb_bins)[s].fudge = sfb_random(sp); /* recompute perturbation */
630 sp->sfb_clearpkts = len;
631 sp->sfb_stats.num_rehash++;
632
633 s = (sp->sfb_current ^= 1); /* flip the bit (swap current) */
634
635 if (classq_verbose) {
636 log(LOG_DEBUG, "%s: SFB qid=%d, set %d is now current, "
637 "qlen=%d\n", if_name(sp->sfb_ifp), sp->sfb_qid, s, len);
638 }
639
640 /* clear freezetime for all current bins */
641 bzero(&(*sp->sfb_bins)[s].freezetime,
642 sizeof((*sp->sfb_bins)[s].freezetime));
643
644 /* clear/adjust bin statistics and flow control lists */
645 for (i = 0; i < SFB_BINS; i++) {
646 struct sfb_fcl *fcl = SFB_FC_LIST(sp, i);
647
648 if (!STAILQ_EMPTY(&fcl->fclist)) {
649 sfb_fclist_append(sp, fcl);
650 }
651
652 for (j = 0; j < SFB_LEVELS; j++) {
653 struct sfbbinstats *cbin, *wbin;
654
655 cbin = SFB_BINST(sp, j, i, s); /* current */
656 wbin = SFB_BINST(sp, j, i, s ^ 1); /* warm-up */
657
658 cbin->pkts = 0;
659 cbin->bytes = 0;
660 if (cbin->pmark > SFB_MAX_PMARK) {
661 cbin->pmark = SFB_MAX_PMARK;
662 }
663 if (cbin->pmark < 0) {
664 cbin->pmark = 0;
665 }
666
667 /*
668 * Keep pmark from before to identify
669 * non-responsives immediately.
670 */
671 if (wbin->pmark > SFB_PMARK_WARM) {
672 wbin->pmark = SFB_PMARK_WARM;
673 }
674 }
675 }
676 }
677
678 static inline int
679 sfb_pcheck(struct sfb *sp, uint32_t pkt_sfb_hash)
680 {
681 #if SFB_LEVELS != 2
682 int i, n;
683 #endif /* SFB_LEVELS != 2 */
684 uint8_t *pkt_sfb_hash8 = (uint8_t *)&pkt_sfb_hash;
685 int s;
686
687 s = sp->sfb_current;
688 VERIFY((s + (s ^ 1)) == 1);
689
690 /*
691 * For current bins, returns 1 if all pmark >= SFB_PMARK_TH,
692 * 0 otherwise; optimize for SFB_LEVELS=2.
693 */
694 #if SFB_LEVELS == 2
695 /*
696 * Level 0: bin index at [0] for set 0; [2] for set 1
697 * Level 1: bin index at [1] for set 0; [3] for set 1
698 */
699 if (SFB_BINST(sp, 0, SFB_BINMASK(pkt_sfb_hash8[(s << 1)]),
700 s)->pmark < SFB_PMARK_TH ||
701 SFB_BINST(sp, 1, SFB_BINMASK(pkt_sfb_hash8[(s << 1) + 1]),
702 s)->pmark < SFB_PMARK_TH) {
703 return 0;
704 }
705 #else /* SFB_LEVELS != 2 */
706 for (i = 0; i < SFB_LEVELS; i++) {
707 if (s == 0) { /* set 0, bin index [0,1] */
708 n = SFB_BINMASK(pkt_sfb_hash8[i]);
709 } else { /* set 1, bin index [2,3] */
710 n = SFB_BINMASK(pkt_sfb_hash8[i + 2]);
711 }
712
713 if (SFB_BINST(sp, i, n, s)->pmark < SFB_PMARK_TH) {
714 return 0;
715 }
716 }
717 #endif /* SFB_LEVELS != 2 */
718 return 1;
719 }
720
721 static int
722 sfb_penalize(struct sfb *sp, uint32_t pkt_sfb_hash, uint32_t *pkt_sfb_flags,
723 struct timespec *now)
724 {
725 struct timespec delta = { 0, 0 };
726 uint8_t *pkt_sfb_hash8 = (uint8_t *)&pkt_sfb_hash;
727
728 /* If minimum pmark of current bins is < SFB_PMARK_TH, we're done */
729 if (!sfb_ratelimit || !sfb_pcheck(sp, pkt_sfb_hash)) {
730 return 0;
731 }
732
733 net_timersub(now, &sp->sfb_pboxfreeze, &delta);
734 if (net_timercmp(&delta, &sp->sfb_pboxtime, <)) {
735 #if SFB_LEVELS != 2
736 int i;
737 #endif /* SFB_LEVELS != 2 */
738 struct sfbbinstats *bin;
739 int n, w;
740
741 w = sp->sfb_current ^ 1;
742 VERIFY((w + (w ^ 1)) == 1);
743
744 /*
745 * Update warm-up bins; optimize for SFB_LEVELS=2
746 */
747 #if SFB_LEVELS == 2
748 /* Level 0: bin index at [0] for set 0; [2] for set 1 */
749 n = SFB_BINMASK(pkt_sfb_hash8[(w << 1)]);
750 bin = SFB_BINST(sp, 0, n, w);
751 if (bin->pkts >= sp->sfb_allocation) {
752 sfb_increment_bin(sp, bin, SFB_BINFT(sp, 0, n, w), now);
753 }
754
755 /* Level 0: bin index at [1] for set 0; [3] for set 1 */
756 n = SFB_BINMASK(pkt_sfb_hash8[(w << 1) + 1]);
757 bin = SFB_BINST(sp, 1, n, w);
758 if (bin->pkts >= sp->sfb_allocation) {
759 sfb_increment_bin(sp, bin, SFB_BINFT(sp, 1, n, w), now);
760 }
761 #else /* SFB_LEVELS != 2 */
762 for (i = 0; i < SFB_LEVELS; i++) {
763 if (w == 0) { /* set 0, bin index [0,1] */
764 n = SFB_BINMASK(pkt_sfb_hash8[i]);
765 } else { /* set 1, bin index [2,3] */
766 n = SFB_BINMASK(pkt_sfb_hash8[i + 2]);
767 }
768
769 bin = SFB_BINST(sp, i, n, w);
770 if (bin->pkts >= sp->sfb_allocation) {
771 sfb_increment_bin(sp, bin,
772 SFB_BINFT(sp, i, n, w), now);
773 }
774 }
775 #endif /* SFB_LEVELS != 2 */
776 return 1;
777 }
778
779 /* non-conformant or else misclassified flow; queue it anyway */
780 *pkt_sfb_flags |= SFB_PKT_PBOX;
781 *(&sp->sfb_pboxfreeze) = *now;
782
783 return 0;
784 }
785
786 static void
787 sfb_adjust_bin(struct sfb *sp, struct sfbbinstats *bin, struct timespec *ft,
788 struct timespec *now, boolean_t inc)
789 {
790 struct timespec delta;
791
792 net_timersub(now, ft, &delta);
793 if (net_timercmp(&delta, &sp->sfb_holdtime, <)) {
794 if (classq_verbose > 1) {
795 log(LOG_DEBUG, "%s: SFB qid=%d, %s update frozen "
796 "(delta=%llu nsec)\n", if_name(sp->sfb_ifp),
797 sp->sfb_qid, inc ? "increment" : "decrement",
798 (u_int64_t)delta.tv_nsec);
799 }
800 return;
801 }
802
803 /* increment/decrement marking probability */
804 *ft = *now;
805 if (inc) {
806 SFB_PMARK_INC(bin);
807 } else {
808 SFB_PMARK_DEC(bin);
809 }
810 }
811
812 static void
813 sfb_decrement_bin(struct sfb *sp, struct sfbbinstats *bin, struct timespec *ft,
814 struct timespec *now)
815 {
816 return sfb_adjust_bin(sp, bin, ft, now, FALSE);
817 }
818
819 static void
820 sfb_increment_bin(struct sfb *sp, struct sfbbinstats *bin, struct timespec *ft,
821 struct timespec *now)
822 {
823 return sfb_adjust_bin(sp, bin, ft, now, TRUE);
824 }
825
826 static inline void
827 sfb_dq_update_bins(struct sfb *sp, uint32_t pkt_sfb_hash, uint32_t pkt_len,
828 struct timespec *now, u_int32_t qsize)
829 {
830 #if SFB_LEVELS != 2 || SFB_FC_LEVEL != 0
831 int i;
832 #endif /* SFB_LEVELS != 2 || SFB_FC_LEVEL != 0 */
833 struct sfbbinstats *bin;
834 int s, n;
835 struct sfb_fcl *fcl = NULL;
836 uint8_t *pkt_sfb_hash8 = (uint8_t *)&pkt_sfb_hash;
837
838 s = sp->sfb_current;
839 VERIFY((s + (s ^ 1)) == 1);
840
841 /*
842 * Update current bins; optimize for SFB_LEVELS=2 and SFB_FC_LEVEL=0
843 */
844 #if SFB_LEVELS == 2 && SFB_FC_LEVEL == 0
845 /* Level 0: bin index at [0] for set 0; [2] for set 1 */
846 n = SFB_BINMASK(pkt_sfb_hash8[(s << 1)]);
847 bin = SFB_BINST(sp, 0, n, s);
848
849 VERIFY(bin->pkts > 0 && bin->bytes >= pkt_len);
850 bin->pkts--;
851 bin->bytes -= pkt_len;
852
853 if (bin->pkts == 0) {
854 sfb_decrement_bin(sp, bin, SFB_BINFT(sp, 0, n, s), now);
855 }
856
857 /* Deliver flow control feedback to the sockets */
858 if (SFB_QUEUE_DELAYBASED(sp)) {
859 if (!(SFB_IS_DELAYHIGH(sp)) ||
860 bin->bytes <= sp->sfb_fc_threshold ||
861 bin->pkts == 0 || qsize == 0) {
862 fcl = SFB_FC_LIST(sp, n);
863 }
864 } else if (bin->pkts <= (sp->sfb_allocation >> 2)) {
865 fcl = SFB_FC_LIST(sp, n);
866 }
867
868 if (fcl != NULL && !STAILQ_EMPTY(&fcl->fclist)) {
869 sfb_fclist_append(sp, fcl);
870 }
871 fcl = NULL;
872
873 /* Level 1: bin index at [1] for set 0; [3] for set 1 */
874 n = SFB_BINMASK(pkt_sfb_hash8[(s << 1) + 1]);
875 bin = SFB_BINST(sp, 1, n, s);
876
877 VERIFY(bin->pkts > 0 && bin->bytes >= (u_int64_t)pkt_len);
878 bin->pkts--;
879 bin->bytes -= pkt_len;
880 if (bin->pkts == 0) {
881 sfb_decrement_bin(sp, bin, SFB_BINFT(sp, 1, n, s), now);
882 }
883 #else /* SFB_LEVELS != 2 || SFB_FC_LEVEL != 0 */
884 for (i = 0; i < SFB_LEVELS; i++) {
885 if (s == 0) { /* set 0, bin index [0,1] */
886 n = SFB_BINMASK(pkt_sfb_hash8[i]);
887 } else { /* set 1, bin index [2,3] */
888 n = SFB_BINMASK(pkt_sfb_hash8[i + 2]);
889 }
890
891 bin = SFB_BINST(sp, i, n, s);
892
893 VERIFY(bin->pkts > 0 && bin->bytes >= pkt_len);
894 bin->pkts--;
895 bin->bytes -= pkt_len;
896 if (bin->pkts == 0) {
897 sfb_decrement_bin(sp, bin,
898 SFB_BINFT(sp, i, n, s), now);
899 }
900 if (i != SFB_FC_LEVEL) {
901 continue;
902 }
903 if (SFB_QUEUE_DELAYBASED(sp)) {
904 if (!(SFB_IS_DELAYHIGH(sp)) ||
905 bin->bytes <= sp->sfb_fc_threshold) {
906 fcl = SFB_FC_LIST(sp, n);
907 }
908 } else if (bin->pkts <= (sp->sfb_allocation >> 2)) {
909 fcl = SFB_FC_LIST(sp, n);
910 }
911 if (fcl != NULL && !STAILQ_EMPTY(&fcl->fclist)) {
912 sfb_fclist_append(sp, fcl);
913 }
914 fcl = NULL;
915 }
916 #endif /* SFB_LEVELS != 2 || SFB_FC_LEVEL != 0 */
917 }
918
919 static inline void
920 sfb_eq_update_bins(struct sfb *sp, uint32_t pkt_sfb_hash, uint32_t pkt_len)
921 {
922 #if SFB_LEVELS != 2
923 int i, n;
924 #endif /* SFB_LEVELS != 2 */
925 int s;
926 struct sfbbinstats *bin;
927 uint8_t *pkt_sfb_hash8 = (uint8_t *)&pkt_sfb_hash;
928 s = sp->sfb_current;
929 VERIFY((s + (s ^ 1)) == 1);
930
931 /*
932 * Update current bins; optimize for SFB_LEVELS=2
933 */
934 #if SFB_LEVELS == 2
935 /* Level 0: bin index at [0] for set 0; [2] for set 1 */
936 bin = SFB_BINST(sp, 0,
937 SFB_BINMASK(pkt_sfb_hash8[(s << 1)]), s);
938 bin->pkts++;
939 bin->bytes += pkt_len;
940
941 /* Level 1: bin index at [1] for set 0; [3] for set 1 */
942 bin = SFB_BINST(sp, 1,
943 SFB_BINMASK(pkt_sfb_hash8[(s << 1) + 1]), s);
944 bin->pkts++;
945 bin->bytes += pkt_len;
946
947 #else /* SFB_LEVELS != 2 */
948 for (i = 0; i < SFB_LEVELS; i++) {
949 if (s == 0) { /* set 0, bin index [0,1] */
950 n = SFB_BINMASK(pkt_sfb_hash8[i]);
951 } else { /* set 1, bin index [2,3] */
952 n = SFB_BINMASK(pkt_sfb_hash8[i + 2]);
953 }
954
955 bin = SFB_BINST(sp, i, n, s);
956 bin->pkts++;
957 bin->bytes += pkt_len;
958 }
959 #endif /* SFB_LEVELS != 2 */
960 }
961
962 static boolean_t
963 sfb_bin_addfcentry(struct sfb *sp, pktsched_pkt_t *pkt, uint32_t pkt_sfb_hash,
964 uint8_t flowsrc, uint32_t flowid)
965 {
966 struct flowadv_fcentry *fce;
967 struct sfb_fcl *fcl;
968 int s;
969 uint8_t *pkt_sfb_hash8 = (uint8_t *)&pkt_sfb_hash;
970
971 s = sp->sfb_current;
972 VERIFY((s + (s ^ 1)) == 1);
973
974 if (flowid == 0) {
975 sp->sfb_stats.null_flowid++;
976 return FALSE;
977 }
978
979 /*
980 * Use value at index 0 for set 0 and
981 * value at index 2 for set 1
982 */
983 fcl = SFB_FC_LIST(sp, SFB_BINMASK(pkt_sfb_hash8[(s << 1)]));
984 STAILQ_FOREACH(fce, &fcl->fclist, fce_link) {
985 if ((uint8_t)fce->fce_flowsrc_type == flowsrc &&
986 fce->fce_flowid == flowid) {
987 /* Already on flow control list; just return */
988 return TRUE;
989 }
990 }
991
992 IFCQ_CONVERT_LOCK(&sp->sfb_ifp->if_snd);
993 fce = pktsched_alloc_fcentry(pkt, sp->sfb_ifp, M_WAITOK);
994 if (fce != NULL) {
995 STAILQ_INSERT_TAIL(&fcl->fclist, fce, fce_link);
996 fcl->cnt++;
997 sp->sfb_stats.flow_controlled++;
998 }
999
1000 return fce != NULL;
1001 }
1002
1003 /*
1004 * check if this flow needs to be flow-controlled or if this
1005 * packet needs to be dropped.
1006 */
1007 static int
1008 sfb_bin_mark_or_drop(struct sfb *sp, struct sfbbinstats *bin)
1009 {
1010 int ret = 0;
1011 if (SFB_QUEUE_DELAYBASED(sp)) {
1012 /*
1013 * Mark or drop if this bin has more
1014 * bytes than the flowcontrol threshold.
1015 */
1016 if (SFB_IS_DELAYHIGH(sp) &&
1017 bin->bytes >= (sp->sfb_fc_threshold << 1)) {
1018 ret = 1;
1019 }
1020 } else {
1021 if (bin->pkts >= sp->sfb_allocation &&
1022 bin->pkts >= sp->sfb_drop_thresh) {
1023 ret = 1; /* drop or mark */
1024 }
1025 }
1026 return ret;
1027 }
1028
1029 /*
1030 * early-drop probability is kept in pmark of each bin of the flow
1031 */
1032 static int
1033 sfb_drop_early(struct sfb *sp, uint32_t pkt_sfb_hash, u_int16_t *pmin,
1034 struct timespec *now)
1035 {
1036 #if SFB_LEVELS != 2
1037 int i;
1038 #endif /* SFB_LEVELS != 2 */
1039 struct sfbbinstats *bin;
1040 int s, n, ret = 0;
1041 uint8_t *pkt_sfb_hash8 = (uint8_t *)&pkt_sfb_hash;
1042
1043 s = sp->sfb_current;
1044 VERIFY((s + (s ^ 1)) == 1);
1045
1046 *pmin = (u_int16_t)-1;
1047
1048 /*
1049 * Update current bins; optimize for SFB_LEVELS=2
1050 */
1051 #if SFB_LEVELS == 2
1052 /* Level 0: bin index at [0] for set 0; [2] for set 1 */
1053 n = SFB_BINMASK(pkt_sfb_hash8[(s << 1)]);
1054 bin = SFB_BINST(sp, 0, n, s);
1055 if (*pmin > (u_int16_t)bin->pmark) {
1056 *pmin = (u_int16_t)bin->pmark;
1057 }
1058
1059
1060 /* Update SFB probability */
1061 if (bin->pkts >= sp->sfb_allocation) {
1062 sfb_increment_bin(sp, bin, SFB_BINFT(sp, 0, n, s), now);
1063 }
1064
1065 ret = sfb_bin_mark_or_drop(sp, bin);
1066
1067 /* Level 1: bin index at [1] for set 0; [3] for set 1 */
1068 n = SFB_BINMASK(pkt_sfb_hash8[(s << 1) + 1]);
1069 bin = SFB_BINST(sp, 1, n, s);
1070 if (*pmin > (u_int16_t)bin->pmark) {
1071 *pmin = (u_int16_t)bin->pmark;
1072 }
1073
1074 if (bin->pkts >= sp->sfb_allocation) {
1075 sfb_increment_bin(sp, bin, SFB_BINFT(sp, 1, n, s), now);
1076 }
1077 #else /* SFB_LEVELS != 2 */
1078 for (i = 0; i < SFB_LEVELS; i++) {
1079 if (s == 0) { /* set 0, bin index [0,1] */
1080 n = SFB_BINMASK(pkt_sfb_hash8[i]);
1081 } else { /* set 1, bin index [2,3] */
1082 n = SFB_BINMASK(pkt_sfb_hash8[i + 2]);
1083 }
1084
1085 bin = SFB_BINST(sp, i, n, s);
1086 if (*pmin > (u_int16_t)bin->pmark) {
1087 *pmin = (u_int16_t)bin->pmark;
1088 }
1089
1090 if (bin->pkts >= sp->sfb_allocation) {
1091 sfb_increment_bin(sp, bin,
1092 SFB_BINFT(sp, i, n, s), now);
1093 }
1094 if (i == SFB_FC_LEVEL) {
1095 ret = sfb_bin_mark_or_drop(sp, bin);
1096 }
1097 }
1098 #endif /* SFB_LEVELS != 2 */
1099
1100 if (sp->sfb_flags & SFBF_SUSPENDED) {
1101 ret = 1; /* drop or mark */
1102 }
1103 return ret;
1104 }
1105
1106 void
1107 sfb_detect_dequeue_stall(struct sfb *sp, class_queue_t *q,
1108 struct timespec *now)
1109 {
1110 struct timespec max_getqtime;
1111
1112 if (!SFB_QUEUE_DELAYBASED(sp) || SFB_IS_DELAYHIGH(sp) ||
1113 qsize(q) <= SFB_MIN_FC_THRESHOLD_BYTES ||
1114 !net_timerisset(&sp->sfb_getqtime)) {
1115 return;
1116 }
1117
1118 net_timeradd(&sp->sfb_getqtime, &sp->sfb_update_interval,
1119 &max_getqtime);
1120 if (net_timercmp(now, &max_getqtime, >)) {
1121 /*
1122 * No packets have been dequeued in an update interval
1123 * worth of time. It means that the queue is stalled
1124 */
1125 SFB_SET_DELAY_HIGH(sp, q);
1126 sp->sfb_stats.dequeue_stall++;
1127 }
1128 }
1129
1130 #define DTYPE_NODROP 0 /* no drop */
1131 #define DTYPE_FORCED 1 /* a "forced" drop */
1132 #define DTYPE_EARLY 2 /* an "unforced" (early) drop */
1133
1134 int
1135 sfb_addq(struct sfb *sp, class_queue_t *q, pktsched_pkt_t *pkt,
1136 struct pf_mtag *t)
1137 {
1138 #if !PF_ECN
1139 #pragma unused(t)
1140 #endif /* !PF_ECN */
1141 struct timespec now;
1142 int droptype, s;
1143 uint16_t pmin;
1144 int fc_adv = 0;
1145 int ret = CLASSQEQ_SUCCESS;
1146 uint32_t maxqsize = 0;
1147 uint64_t *pkt_timestamp;
1148 uint32_t *pkt_sfb_hash;
1149 uint16_t *pkt_sfb_hash16;
1150 uint32_t *pkt_sfb_flags;
1151 uint32_t pkt_flowid;
1152 uint32_t *pkt_flags;
1153 uint8_t pkt_proto, pkt_flowsrc;
1154
1155 s = sp->sfb_current;
1156 VERIFY((s + (s ^ 1)) == 1);
1157
1158 pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, &pkt_flowid,
1159 &pkt_flowsrc, &pkt_proto, NULL);
1160 pkt_sfb_hash = pktsched_get_pkt_sfb_vars(pkt, &pkt_sfb_flags);
1161 pkt_sfb_hash16 = (uint16_t *)pkt_sfb_hash;
1162
1163 if (pkt->pktsched_ptype == QP_MBUF) {
1164 /* See comments in <rdar://problem/14040693> */
1165 VERIFY(!(*pkt_flags & PKTF_PRIV_GUARDED));
1166 *pkt_flags |= PKTF_PRIV_GUARDED;
1167 }
1168
1169 if (*pkt_timestamp > 0) {
1170 net_nsectimer(pkt_timestamp, &now);
1171 } else {
1172 nanouptime(&now);
1173 net_timernsec(&now, pkt_timestamp);
1174 }
1175
1176 /* time to swap the bins? */
1177 if (net_timercmp(&now, &sp->sfb_nextreset, >=)) {
1178 net_timeradd(&now, &sp->sfb_hinterval, &sp->sfb_nextreset);
1179 sfb_swap_bins(sp, qlen(q));
1180 s = sp->sfb_current;
1181 VERIFY((s + (s ^ 1)) == 1);
1182 }
1183
1184 if (!net_timerisset(&sp->sfb_update_time)) {
1185 net_timeradd(&now, &sp->sfb_update_interval,
1186 &sp->sfb_update_time);
1187 }
1188
1189 /*
1190 * If getq time is not set because this is the first packet
1191 * or after idle time, set it now so that we can detect a stall.
1192 */
1193 if (qsize(q) == 0 && !net_timerisset(&sp->sfb_getqtime)) {
1194 *(&sp->sfb_getqtime) = *(&now);
1195 }
1196
1197 *pkt_sfb_flags = 0;
1198 pkt_sfb_hash16[s] =
1199 (SFB_HASH(&pkt_flowid, sizeof(pkt_flowid),
1200 (*sp->sfb_bins)[s].fudge) & SFB_HASHMASK);
1201 pkt_sfb_hash16[s ^ 1] =
1202 (SFB_HASH(&pkt_flowid, sizeof(pkt_flowid),
1203 (*sp->sfb_bins)[s ^ 1].fudge) & SFB_HASHMASK);
1204
1205 /* check if the queue has been stalled */
1206 sfb_detect_dequeue_stall(sp, q, &now);
1207
1208 /* see if we drop early */
1209 droptype = DTYPE_NODROP;
1210 if (sfb_drop_early(sp, *pkt_sfb_hash, &pmin, &now)) {
1211 /* flow control, mark or drop by sfb */
1212 if ((sp->sfb_flags & SFBF_FLOWCTL) &&
1213 (*pkt_flags & PKTF_FLOW_ADV)) {
1214 fc_adv = 1;
1215 /* drop all during suspension or for non-TCP */
1216 if ((sp->sfb_flags & SFBF_SUSPENDED) ||
1217 pkt_proto != IPPROTO_TCP) {
1218 droptype = DTYPE_EARLY;
1219 sp->sfb_stats.drop_early++;
1220 }
1221 }
1222 #if PF_ECN
1223 /* XXX: only supported for mbuf */
1224 else if ((sp->sfb_flags & SFBF_ECN) &&
1225 (pkt->pktsched_ptype == QP_MBUF) &&
1226 (pkt_proto == IPPROTO_TCP) && /* only for TCP */
1227 ((sfb_random(sp) & SFB_MAX_PMARK) <= pmin) &&
1228 mark_ecn(m, t, sp->sfb_flags) &&
1229 !(sp->sfb_flags & SFBF_SUSPENDED)) {
1230 /* successfully marked; do not drop. */
1231 sp->sfb_stats.marked_packets++;
1232 }
1233 #endif /* PF_ECN */
1234 else {
1235 /* unforced drop by sfb */
1236 droptype = DTYPE_EARLY;
1237 sp->sfb_stats.drop_early++;
1238 }
1239 }
1240
1241 /* non-responsive flow penalty? */
1242 if (droptype == DTYPE_NODROP && sfb_penalize(sp, *pkt_sfb_hash,
1243 pkt_sfb_flags, &now)) {
1244 droptype = DTYPE_FORCED;
1245 sp->sfb_stats.drop_pbox++;
1246 }
1247
1248 if (SFB_QUEUE_DELAYBASED(sp)) {
1249 maxqsize = SFB_QUEUE_DELAYBASED_MAXSIZE;
1250 } else {
1251 maxqsize = qlimit(q);
1252 }
1253
1254 /*
1255 * When the queue length hits the queue limit, make it a forced
1256 * drop
1257 */
1258 if (droptype == DTYPE_NODROP && qlen(q) >= maxqsize) {
1259 if (pkt_proto == IPPROTO_TCP &&
1260 qlen(q) < (maxqsize + (maxqsize >> 1)) &&
1261 ((*pkt_flags & PKTF_TCP_REXMT) ||
1262 (sp->sfb_flags & SFBF_LAST_PKT_DROPPED))) {
1263 /*
1264 * At some level, dropping packets will make the
1265 * flows backoff and will keep memory requirements
1266 * under control. But we should not cause a tail
1267 * drop because it can take a long time for a
1268 * TCP flow to recover. We should try to drop
1269 * alternate packets instead.
1270 */
1271 sp->sfb_flags &= ~SFBF_LAST_PKT_DROPPED;
1272 } else {
1273 droptype = DTYPE_FORCED;
1274 sp->sfb_stats.drop_queue++;
1275 sp->sfb_flags |= SFBF_LAST_PKT_DROPPED;
1276 }
1277 }
1278
1279 if (fc_adv == 1 && droptype != DTYPE_FORCED &&
1280 sfb_bin_addfcentry(sp, pkt, *pkt_sfb_hash, pkt_flowsrc,
1281 pkt_flowid)) {
1282 /* deliver flow control advisory error */
1283 if (droptype == DTYPE_NODROP) {
1284 ret = CLASSQEQ_SUCCESS_FC;
1285 VERIFY(!(sp->sfb_flags & SFBF_SUSPENDED));
1286 } else if (sp->sfb_flags & SFBF_SUSPENDED) {
1287 /* drop due to suspension */
1288 ret = CLASSQEQ_DROP_SP;
1289 } else {
1290 /* drop due to flow-control */
1291 ret = CLASSQEQ_DROP_FC;
1292 }
1293 }
1294 /* if successful enqueue this packet, else drop it */
1295 if (droptype == DTYPE_NODROP) {
1296 VERIFY(pkt->pktsched_ptype == qptype(q));
1297 _addq(q, pkt->pktsched_pkt);
1298 } else {
1299 IFCQ_CONVERT_LOCK(&sp->sfb_ifp->if_snd);
1300 return (ret != CLASSQEQ_SUCCESS) ? ret : CLASSQEQ_DROP;
1301 }
1302
1303 if (!(*pkt_sfb_flags & SFB_PKT_PBOX)) {
1304 sfb_eq_update_bins(sp, *pkt_sfb_hash,
1305 pktsched_get_pkt_len(pkt));
1306 } else {
1307 sp->sfb_stats.pbox_packets++;
1308 }
1309
1310 /* successfully queued */
1311 return ret;
1312 }
1313
1314 static void *
1315 sfb_getq_flow(struct sfb *sp, class_queue_t *q, u_int32_t flow, boolean_t purge,
1316 pktsched_pkt_t *pkt)
1317 {
1318 struct timespec now;
1319 classq_pkt_type_t ptype;
1320 uint64_t *pkt_timestamp;
1321 uint32_t *pkt_flags;
1322 uint32_t *pkt_sfb_flags;
1323 uint32_t *pkt_sfb_hash;
1324 void *p;
1325
1326 if (!purge && (sp->sfb_flags & SFBF_SUSPENDED)) {
1327 return NULL;
1328 }
1329
1330 nanouptime(&now);
1331
1332 /* flow of 0 means head of queue */
1333 if ((p = ((flow == 0) ? _getq(q) : _getq_flow(q, flow))) == NULL) {
1334 if (!purge) {
1335 net_timerclear(&sp->sfb_getqtime);
1336 }
1337 return NULL;
1338 }
1339
1340 ptype = qptype(q);
1341 pktsched_pkt_encap(pkt, ptype, p);
1342 pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, NULL,
1343 NULL, NULL, NULL);
1344 pkt_sfb_hash = pktsched_get_pkt_sfb_vars(pkt, &pkt_sfb_flags);
1345
1346 /* See comments in <rdar://problem/14040693> */
1347 if (ptype == QP_MBUF) {
1348 VERIFY(*pkt_flags & PKTF_PRIV_GUARDED);
1349 }
1350
1351 if (!purge) {
1352 /* calculate EWMA of dequeues */
1353 if (net_timerisset(&sp->sfb_getqtime)) {
1354 struct timespec delta;
1355 u_int64_t avg, new;
1356 net_timersub(&now, &sp->sfb_getqtime, &delta);
1357 net_timernsec(&delta, &new);
1358 avg = sp->sfb_stats.dequeue_avg;
1359 if (avg > 0) {
1360 int decay = DEQUEUE_DECAY;
1361 /*
1362 * If the time since last dequeue is
1363 * significantly greater than the current
1364 * average, weigh the average more against
1365 * the old value.
1366 */
1367 if (DEQUEUE_SPIKE(new, avg)) {
1368 decay += 5;
1369 }
1370 avg = (((avg << decay) - avg) + new) >> decay;
1371 } else {
1372 avg = new;
1373 }
1374 sp->sfb_stats.dequeue_avg = avg;
1375 }
1376 *(&sp->sfb_getqtime) = *(&now);
1377 }
1378
1379 if (!purge && SFB_QUEUE_DELAYBASED(sp)) {
1380 u_int64_t dequeue_ns, queue_delay = 0;
1381 net_timernsec(&now, &dequeue_ns);
1382 if (dequeue_ns > *pkt_timestamp) {
1383 queue_delay = dequeue_ns - *pkt_timestamp;
1384 }
1385
1386 if (sp->sfb_min_qdelay == 0 ||
1387 (queue_delay > 0 && queue_delay < sp->sfb_min_qdelay)) {
1388 sp->sfb_min_qdelay = queue_delay;
1389 }
1390 if (net_timercmp(&now, &sp->sfb_update_time, >=)) {
1391 if (sp->sfb_min_qdelay > sp->sfb_target_qdelay) {
1392 if (!SFB_IS_DELAYHIGH(sp)) {
1393 SFB_SET_DELAY_HIGH(sp, q);
1394 }
1395 } else {
1396 sp->sfb_flags &= ~(SFBF_DELAYHIGH);
1397 sp->sfb_fc_threshold = 0;
1398 }
1399 net_timeradd(&now, &sp->sfb_update_interval,
1400 &sp->sfb_update_time);
1401 sp->sfb_min_qdelay = 0;
1402 }
1403 }
1404 *pkt_timestamp = 0;
1405
1406 /*
1407 * Clearpkts are the ones which were in the queue when the hash
1408 * function was perturbed. Since the perturbation value (fudge),
1409 * and thus bin information for these packets is not known, we do
1410 * not change accounting information while dequeuing these packets.
1411 * It is important not to set the hash interval too small due to
1412 * this reason. A rule of thumb is to set it to K*D, where D is
1413 * the time taken to drain queue.
1414 */
1415 if (*pkt_sfb_flags & SFB_PKT_PBOX) {
1416 *pkt_sfb_flags &= ~SFB_PKT_PBOX;
1417 if (sp->sfb_clearpkts > 0) {
1418 sp->sfb_clearpkts--;
1419 }
1420 } else if (sp->sfb_clearpkts > 0) {
1421 sp->sfb_clearpkts--;
1422 } else {
1423 sfb_dq_update_bins(sp, *pkt_sfb_hash, pktsched_get_pkt_len(pkt),
1424 &now, qsize(q));
1425 }
1426
1427 /* See comments in <rdar://problem/14040693> */
1428 if (ptype == QP_MBUF) {
1429 *pkt_flags &= ~PKTF_PRIV_GUARDED;
1430 }
1431
1432 /*
1433 * If the queue becomes empty before the update interval, reset
1434 * the flow control threshold
1435 */
1436 if (qsize(q) == 0) {
1437 sp->sfb_flags &= ~SFBF_DELAYHIGH;
1438 sp->sfb_min_qdelay = 0;
1439 sp->sfb_fc_threshold = 0;
1440 net_timerclear(&sp->sfb_update_time);
1441 net_timerclear(&sp->sfb_getqtime);
1442 }
1443 return p;
1444 }
1445
1446 void
1447 sfb_getq(struct sfb *sp, class_queue_t *q, pktsched_pkt_t *pkt)
1448 {
1449 sfb_getq_flow(sp, q, 0, FALSE, pkt);
1450 }
1451
1452 void
1453 sfb_purgeq(struct sfb *sp, class_queue_t *q, u_int32_t flow, u_int32_t *packets,
1454 u_int32_t *bytes)
1455 {
1456 u_int32_t cnt = 0, len = 0;
1457 pktsched_pkt_t pkt;
1458
1459 IFCQ_CONVERT_LOCK(&sp->sfb_ifp->if_snd);
1460 while (sfb_getq_flow(sp, q, flow, TRUE, &pkt) != NULL) {
1461 cnt++;
1462 len += pktsched_get_pkt_len(&pkt);
1463 pktsched_free_pkt(&pkt);
1464 }
1465
1466 if (packets != NULL) {
1467 *packets = cnt;
1468 }
1469 if (bytes != NULL) {
1470 *bytes = len;
1471 }
1472 }
1473
1474 void
1475 sfb_updateq(struct sfb *sp, cqev_t ev)
1476 {
1477 struct ifnet *ifp = sp->sfb_ifp;
1478
1479 VERIFY(ifp != NULL);
1480
1481 switch (ev) {
1482 case CLASSQ_EV_LINK_BANDWIDTH: {
1483 u_int64_t eff_rate = ifnet_output_linkrate(ifp);
1484
1485 /* update parameters only if rate has changed */
1486 if (eff_rate == sp->sfb_eff_rate) {
1487 break;
1488 }
1489
1490 if (classq_verbose) {
1491 log(LOG_DEBUG, "%s: SFB qid=%d, adapting to new "
1492 "eff_rate=%llu bps\n", if_name(ifp), sp->sfb_qid,
1493 eff_rate);
1494 }
1495 sfb_calc_holdtime(sp, eff_rate);
1496 sfb_calc_pboxtime(sp, eff_rate);
1497 ifclassq_calc_target_qdelay(ifp, &sp->sfb_target_qdelay);
1498 sfb_calc_update_interval(sp, eff_rate);
1499 break;
1500 }
1501
1502 case CLASSQ_EV_LINK_UP:
1503 case CLASSQ_EV_LINK_DOWN:
1504 if (classq_verbose) {
1505 log(LOG_DEBUG, "%s: SFB qid=%d, resetting due to "
1506 "link %s\n", if_name(ifp), sp->sfb_qid,
1507 (ev == CLASSQ_EV_LINK_UP) ? "UP" : "DOWN");
1508 }
1509 sfb_resetq(sp, ev);
1510 break;
1511
1512 case CLASSQ_EV_LINK_LATENCY:
1513 case CLASSQ_EV_LINK_MTU:
1514 default:
1515 break;
1516 }
1517 }
1518
1519 int
1520 sfb_suspendq(struct sfb *sp, class_queue_t *q, boolean_t on)
1521 {
1522 #pragma unused(q)
1523 struct ifnet *ifp = sp->sfb_ifp;
1524
1525 VERIFY(ifp != NULL);
1526
1527 if ((on && (sp->sfb_flags & SFBF_SUSPENDED)) ||
1528 (!on && !(sp->sfb_flags & SFBF_SUSPENDED))) {
1529 return 0;
1530 }
1531
1532 if (!(sp->sfb_flags & SFBF_FLOWCTL)) {
1533 log(LOG_ERR, "%s: SFB qid=%d, unable to %s queue since "
1534 "flow-control is not enabled", if_name(ifp), sp->sfb_qid,
1535 (on ? "suspend" : "resume"));
1536 return ENOTSUP;
1537 }
1538
1539 if (classq_verbose) {
1540 log(LOG_DEBUG, "%s: SFB qid=%d, setting state to %s",
1541 if_name(ifp), sp->sfb_qid, (on ? "SUSPENDED" : "RUNNING"));
1542 }
1543
1544 if (on) {
1545 sp->sfb_flags |= SFBF_SUSPENDED;
1546 } else {
1547 sp->sfb_flags &= ~SFBF_SUSPENDED;
1548 sfb_swap_bins(sp, qlen(q));
1549 }
1550
1551 return 0;
1552 }