2 * Copyright (c) 2011-2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
32 #include <sys/socket.h>
33 #include <sys/sockio.h>
34 #include <sys/systm.h>
35 #include <sys/sysctl.h>
36 #include <sys/syslog.h>
38 #include <sys/errno.h>
39 #include <sys/kernel.h>
40 #include <sys/kauth.h>
42 #include <kern/zalloc.h>
45 #include <net/if_var.h>
46 #include <net/if_types.h>
48 #include <net/flowadv.h>
50 #include <netinet/in.h>
51 #include <netinet/in_systm.h>
52 #include <netinet/ip.h>
54 #include <netinet/ip6.h>
57 #include <net/classq/classq_sfb.h>
58 #include <net/flowhash.h>
59 #include <net/net_osdep.h>
60 #include <dev/random/randomdev.h>
63 * Stochastic Fair Blue
65 * Wu-chang Feng, Dilip D. Kandlur, Debanjan Saha, Kang G. Shin
66 * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
68 * Based on the NS code with the following parameters:
73 * hold-time: 10ms-50ms (randomized)
76 * pbox-time: 50-100ms (randomized)
77 * hinterval: 11-23 (randomized)
79 * This implementation uses L = 2 and N = 32 for 2 sets of:
81 * B[L][N]: L x N array of bins (L levels, N bins per level)
83 * Each set effectively creates 32^2 virtual buckets (bin combinations)
84 * while using only O(32*2) states.
86 * Given a 32-bit hash value, we divide it such that octets [0,1,2,3] are
87 * used as index for the bins across the 2 levels, where level 1 uses [0,2]
88 * and level 2 uses [1,3]. The 2 values per level correspond to the indices
89 * for the current and warm-up sets (section 4.4. in the SFB paper regarding
90 * Moving Hash Functions explains the purposes of these 2 sets.)
94 * Use Murmur3A_x86_32 for hash function. It seems to perform consistently
95 * across platforms for 1-word key (32-bit flowhash value). See flowhash.h
96 * for other alternatives. We only need 16-bit hash output.
98 #define SFB_HASH net_flowhash_mh3_x86_32
99 #define SFB_HASHMASK HASHMASK(16)
101 #define SFB_BINMASK(_x) \
102 ((_x) & HASHMASK(SFB_BINS_SHIFT))
104 #define SFB_BINST(_sp, _l, _n, _c) \
105 (&(*(_sp)->sfb_bins)[_c].stats[_l][_n])
107 #define SFB_BINFT(_sp, _l, _n, _c) \
108 (&(*(_sp)->sfb_bins)[_c].freezetime[_l][_n])
110 #define SFB_FC_LIST(_sp, _n) \
111 (&(*(_sp)->sfb_fc_lists)[_n])
114 * The holdtime parameter determines the minimum time interval between
115 * two successive updates of the marking probability. In the event the
116 * uplink speed is not known, a default value is chosen and is randomized
117 * to be within the following range.
119 #define HOLDTIME_BASE (100ULL * 1000 * 1000) /* 100ms */
120 #define HOLDTIME_MIN (10ULL * 1000 * 1000) /* 10ms */
121 #define HOLDTIME_MAX (100ULL * 1000 * 1000) /* 100ms */
124 * The pboxtime parameter determines the bandwidth allocated for rogue
125 * flows, i.e. the rate limiting bandwidth. In the event the uplink speed
126 * is not known, a default value is chosen and is randomized to be within
127 * the following range.
129 #define PBOXTIME_BASE (300ULL * 1000 * 1000) /* 300ms */
130 #define PBOXTIME_MIN (30ULL * 1000 * 1000) /* 30ms */
131 #define PBOXTIME_MAX (300ULL * 1000 * 1000) /* 300ms */
134 * Target queueing delay is the amount of extra delay that can be added
135 * to accommodate variations in the link bandwidth. The queue should be
136 * large enough to induce this much delay and nothing more than that.
138 #define TARGET_QDELAY_BASE (10ULL * 1000 * 1000) /* 10ms */
139 #define TARGET_QDELAY_MIN (10ULL * 1000) /* 10us */
140 #define TARGET_QDELAY_MAX (20ULL * 1000 * 1000 * 1000) /* 20s */
143 * Update interval for checking the extra delay added by the queue. This
144 * should be 90-95 percentile of RTT experienced by any TCP connection
145 * so that it will take care of the burst traffic.
147 #define UPDATE_INTERVAL_BASE (100ULL * 1000 * 1000) /* 100ms */
148 #define UPDATE_INTERVAL_MIN (100ULL * 1000 * 1000) /* 100ms */
149 #define UPDATE_INTERVAL_MAX (10ULL * 1000 * 1000 * 1000) /* 10s */
151 #define SFB_RANDOM(sp, tmin, tmax) ((sfb_random(sp) % (tmax)) + (tmin))
153 #define SFB_PKT_PBOX 0x1 /* in penalty box */
155 /* The following mantissa values are in SFB_FP_SHIFT Q format */
156 #define SFB_MAX_PMARK (1 << SFB_FP_SHIFT) /* Q14 representation of 1.00 */
159 * These are d1 (increment) and d2 (decrement) parameters, used to determine
160 * the amount by which the marking probability is incremented when the queue
161 * overflows, or is decremented when the link is idle. d1 is set higher than
162 * d2, because link underutilization can occur when congestion management is
163 * either too conservative or too aggressive, but packet loss occurs only
164 * when congestion management is too conservative. By weighing heavily
165 * against packet loss, it can quickly reach to a substantial increase in
168 #define SFB_INCREMENT 82 /* Q14 representation of 0.005 */
169 #define SFB_DECREMENT 16 /* Q14 representation of 0.001 */
171 #define SFB_PMARK_TH 16056 /* Q14 representation of 0.98 */
172 #define SFB_PMARK_WARM 3276 /* Q14 representation of 0.2 */
174 #define SFB_PMARK_INC(_bin) do { \
175 (_bin)->pmark += sfb_increment; \
176 if ((_bin)->pmark > SFB_MAX_PMARK) \
177 (_bin)->pmark = SFB_MAX_PMARK; \
180 #define SFB_PMARK_DEC(_bin) do { \
181 if ((_bin)->pmark > 0) { \
182 (_bin)->pmark -= sfb_decrement; \
183 if ((_bin)->pmark < 0) \
188 /* Minimum nuber of bytes in queue to get flow controlled */
189 #define SFB_MIN_FC_THRESHOLD_BYTES 7500
191 #define SFB_SET_DELAY_HIGH(_sp_, _q_) do { \
192 (_sp_)->sfb_flags |= SFBF_DELAYHIGH; \
193 (_sp_)->sfb_fc_threshold = max(SFB_MIN_FC_THRESHOLD_BYTES, \
194 (qsize((_q_)) >> 3)); \
197 #define SFB_QUEUE_DELAYBASED(_sp_) ((_sp_)->sfb_flags & SFBF_DELAYBASED)
198 #define SFB_IS_DELAYHIGH(_sp_) ((_sp_)->sfb_flags & SFBF_DELAYHIGH)
199 #define SFB_QUEUE_DELAYBASED_MAXSIZE 2048 /* max pkts */
201 #define HINTERVAL_MIN (10) /* 10 seconds */
202 #define HINTERVAL_MAX (20) /* 20 seconds */
203 #define SFB_HINTERVAL(sp) ((sfb_random(sp) % HINTERVAL_MAX) + HINTERVAL_MIN)
205 #define DEQUEUE_DECAY 7 /* ilog2 of EWMA decay rate, (128) */
206 #define DEQUEUE_SPIKE(_new, _old) \
207 ((u_int64_t)ABS((int64_t)(_new) - (int64_t)(_old)) > ((_old) << 11))
209 #define ABS(v) (((v) > 0) ? (v) : -(v))
211 #define SFB_ZONE_MAX 32 /* maximum elements in zone */
212 #define SFB_ZONE_NAME "classq_sfb" /* zone name */
214 #define SFB_BINS_ZONE_MAX 32 /* maximum elements in zone */
215 #define SFB_BINS_ZONE_NAME "classq_sfb_bins" /* zone name */
217 #define SFB_FCL_ZONE_MAX 32 /* maximum elements in zone */
218 #define SFB_FCL_ZONE_NAME "classq_sfb_fcl" /* zone name */
220 /* Place the flow control entries in current bin on level 0 */
221 #define SFB_FC_LEVEL 0
223 static unsigned int sfb_size
; /* size of zone element */
224 static struct zone
*sfb_zone
; /* zone for sfb */
226 static unsigned int sfb_bins_size
; /* size of zone element */
227 static struct zone
*sfb_bins_zone
; /* zone for sfb_bins */
229 static unsigned int sfb_fcl_size
; /* size of zone element */
230 static struct zone
*sfb_fcl_zone
; /* zone for sfb_fc_lists */
232 /* internal function prototypes */
233 static u_int32_t
sfb_random(struct sfb
*);
234 static void *sfb_getq_flow(struct sfb
*, class_queue_t
*, u_int32_t
, boolean_t
,
236 static void sfb_resetq(struct sfb
*, cqev_t
);
237 static void sfb_calc_holdtime(struct sfb
*, u_int64_t
);
238 static void sfb_calc_pboxtime(struct sfb
*, u_int64_t
);
239 static void sfb_calc_hinterval(struct sfb
*, u_int64_t
*);
240 static void sfb_calc_update_interval(struct sfb
*, u_int64_t
);
241 static void sfb_swap_bins(struct sfb
*, u_int32_t
);
242 static inline int sfb_pcheck(struct sfb
*, uint32_t);
243 static int sfb_penalize(struct sfb
*, uint32_t, uint32_t *, struct timespec
*);
244 static void sfb_adjust_bin(struct sfb
*, struct sfbbinstats
*,
245 struct timespec
*, struct timespec
*, boolean_t
);
246 static void sfb_decrement_bin(struct sfb
*, struct sfbbinstats
*,
247 struct timespec
*, struct timespec
*);
248 static void sfb_increment_bin(struct sfb
*, struct sfbbinstats
*,
249 struct timespec
*, struct timespec
*);
250 static inline void sfb_dq_update_bins(struct sfb
*, uint32_t, uint32_t,
251 struct timespec
*, u_int32_t qsize
);
252 static inline void sfb_eq_update_bins(struct sfb
*, uint32_t, uint32_t);
253 static int sfb_drop_early(struct sfb
*, uint32_t, u_int16_t
*,
255 static boolean_t
sfb_bin_addfcentry(struct sfb
*, pktsched_pkt_t
*,
256 uint32_t, uint8_t, uint32_t);
257 static void sfb_fclist_append(struct sfb
*, struct sfb_fcl
*);
258 static void sfb_fclists_clean(struct sfb
*sp
);
259 static int sfb_bin_mark_or_drop(struct sfb
*sp
, struct sfbbinstats
*bin
);
260 static void sfb_detect_dequeue_stall(struct sfb
*sp
, class_queue_t
*,
263 SYSCTL_NODE(_net_classq
, OID_AUTO
, sfb
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "SFB");
265 static u_int64_t sfb_holdtime
= 0; /* 0 indicates "automatic" */
266 SYSCTL_QUAD(_net_classq_sfb
, OID_AUTO
, holdtime
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
267 &sfb_holdtime
, "SFB freeze time in nanoseconds");
269 static u_int64_t sfb_pboxtime
= 0; /* 0 indicates "automatic" */
270 SYSCTL_QUAD(_net_classq_sfb
, OID_AUTO
, pboxtime
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
271 &sfb_pboxtime
, "SFB penalty box time in nanoseconds");
273 static u_int64_t sfb_hinterval
;
274 SYSCTL_QUAD(_net_classq_sfb
, OID_AUTO
, hinterval
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
275 &sfb_hinterval
, "SFB hash interval in nanoseconds");
277 static u_int32_t sfb_increment
= SFB_INCREMENT
;
278 SYSCTL_UINT(_net_classq_sfb
, OID_AUTO
, increment
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
279 &sfb_increment
, SFB_INCREMENT
, "SFB increment [d1]");
281 static u_int32_t sfb_decrement
= SFB_DECREMENT
;
282 SYSCTL_UINT(_net_classq_sfb
, OID_AUTO
, decrement
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
283 &sfb_decrement
, SFB_DECREMENT
, "SFB decrement [d2]");
285 static u_int32_t sfb_allocation
= 0; /* 0 means "automatic" */
286 SYSCTL_UINT(_net_classq_sfb
, OID_AUTO
, allocation
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
287 &sfb_allocation
, 0, "SFB bin allocation");
289 static u_int32_t sfb_ratelimit
= 0;
290 SYSCTL_UINT(_net_classq_sfb
, OID_AUTO
, ratelimit
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
291 &sfb_ratelimit
, 0, "SFB rate limit");
293 #define KBPS (1ULL * 1000) /* 1 Kbits per second */
294 #define MBPS (1ULL * 1000 * 1000) /* 1 Mbits per second */
295 #define GBPS (MBPS * 1000) /* 1 Gbits per second */
297 struct sfb_time_tbl
{
298 u_int64_t speed
; /* uplink speed */
299 u_int64_t holdtime
; /* hold time */
300 u_int64_t pboxtime
; /* penalty box time */
303 static struct sfb_time_tbl sfb_ttbl
[] = {
304 { 1 * MBPS
, HOLDTIME_BASE
* 1000, PBOXTIME_BASE
* 1000 },
305 { 10 * MBPS
, HOLDTIME_BASE
* 100, PBOXTIME_BASE
* 100 },
306 { 100 * MBPS
, HOLDTIME_BASE
* 10, PBOXTIME_BASE
* 10 },
307 { 1 * GBPS
, HOLDTIME_BASE
, PBOXTIME_BASE
},
308 { 10 * GBPS
, HOLDTIME_BASE
/ 10, PBOXTIME_BASE
/ 10 },
309 { 100 * GBPS
, HOLDTIME_BASE
/ 100, PBOXTIME_BASE
/ 100 },
316 _CASSERT(SFBF_ECN4
== CLASSQF_ECN4
);
317 _CASSERT(SFBF_ECN6
== CLASSQF_ECN6
);
319 sfb_size
= sizeof(struct sfb
);
320 sfb_zone
= zinit(sfb_size
, SFB_ZONE_MAX
* sfb_size
,
322 if (sfb_zone
== NULL
) {
323 panic("%s: failed allocating %s", __func__
, SFB_ZONE_NAME
);
326 zone_change(sfb_zone
, Z_EXPAND
, TRUE
);
327 zone_change(sfb_zone
, Z_CALLERACCT
, TRUE
);
329 sfb_bins_size
= sizeof(*((struct sfb
*)0)->sfb_bins
);
330 sfb_bins_zone
= zinit(sfb_bins_size
, SFB_BINS_ZONE_MAX
* sfb_bins_size
,
331 0, SFB_BINS_ZONE_NAME
);
332 if (sfb_bins_zone
== NULL
) {
333 panic("%s: failed allocating %s", __func__
, SFB_BINS_ZONE_NAME
);
336 zone_change(sfb_bins_zone
, Z_EXPAND
, TRUE
);
337 zone_change(sfb_bins_zone
, Z_CALLERACCT
, TRUE
);
339 sfb_fcl_size
= sizeof(*((struct sfb
*)0)->sfb_fc_lists
);
340 sfb_fcl_zone
= zinit(sfb_fcl_size
, SFB_FCL_ZONE_MAX
* sfb_fcl_size
,
341 0, SFB_FCL_ZONE_NAME
);
342 if (sfb_fcl_zone
== NULL
) {
343 panic("%s: failed allocating %s", __func__
, SFB_FCL_ZONE_NAME
);
346 zone_change(sfb_fcl_zone
, Z_EXPAND
, TRUE
);
347 zone_change(sfb_fcl_zone
, Z_CALLERACCT
, TRUE
);
351 sfb_random(struct sfb
*sp
)
353 IFCQ_CONVERT_LOCK(&sp
->sfb_ifp
->if_snd
);
354 return RandomULong();
358 sfb_calc_holdtime(struct sfb
*sp
, u_int64_t outbw
)
362 if (sfb_holdtime
!= 0) {
363 holdtime
= sfb_holdtime
;
364 } else if (outbw
== 0) {
365 holdtime
= SFB_RANDOM(sp
, HOLDTIME_MIN
, HOLDTIME_MAX
);
369 n
= sfb_ttbl
[0].holdtime
;
370 for (i
= 0; sfb_ttbl
[i
].speed
!= 0; i
++) {
371 if (outbw
< sfb_ttbl
[i
].speed
) {
374 n
= sfb_ttbl
[i
].holdtime
;
378 net_nsectimer(&holdtime
, &sp
->sfb_holdtime
);
382 sfb_calc_pboxtime(struct sfb
*sp
, u_int64_t outbw
)
386 if (sfb_pboxtime
!= 0) {
387 pboxtime
= sfb_pboxtime
;
388 } else if (outbw
== 0) {
389 pboxtime
= SFB_RANDOM(sp
, PBOXTIME_MIN
, PBOXTIME_MAX
);
393 n
= sfb_ttbl
[0].pboxtime
;
394 for (i
= 0; sfb_ttbl
[i
].speed
!= 0; i
++) {
395 if (outbw
< sfb_ttbl
[i
].speed
) {
398 n
= sfb_ttbl
[i
].pboxtime
;
402 net_nsectimer(&pboxtime
, &sp
->sfb_pboxtime
);
403 net_timerclear(&sp
->sfb_pboxfreeze
);
407 sfb_calc_hinterval(struct sfb
*sp
, u_int64_t
*t
)
409 u_int64_t hinterval
= 0;
414 * TODO adi@apple.com: use dq_avg to derive hinterval.
419 if (sfb_hinterval
!= 0) {
420 hinterval
= sfb_hinterval
;
421 } else if (t
== NULL
|| hinterval
== 0) {
422 hinterval
= ((u_int64_t
)SFB_HINTERVAL(sp
) * NSEC_PER_SEC
);
425 net_nsectimer(&hinterval
, &sp
->sfb_hinterval
);
428 net_timeradd(&now
, &sp
->sfb_hinterval
, &sp
->sfb_nextreset
);
432 sfb_calc_update_interval(struct sfb
*sp
, u_int64_t out_bw
)
434 #pragma unused(out_bw)
435 u_int64_t update_interval
= 0;
436 ifclassq_calc_update_interval(&update_interval
);
437 net_nsectimer(&update_interval
, &sp
->sfb_update_interval
);
441 * sfb support routines
444 sfb_alloc(struct ifnet
*ifp
, u_int32_t qid
, u_int32_t qlim
, u_int32_t flags
)
449 VERIFY(ifp
!= NULL
&& qlim
> 0);
451 sp
= zalloc(sfb_zone
);
453 log(LOG_ERR
, "%s: SFB unable to allocate\n", if_name(ifp
));
458 if ((sp
->sfb_bins
= zalloc(sfb_bins_zone
)) == NULL
) {
459 log(LOG_ERR
, "%s: SFB unable to allocate bins\n", if_name(ifp
));
463 bzero(sp
->sfb_bins
, sfb_bins_size
);
465 if ((sp
->sfb_fc_lists
= zalloc(sfb_fcl_zone
)) == NULL
) {
466 log(LOG_ERR
, "%s: SFB unable to allocate flow control lists\n",
471 bzero(sp
->sfb_fc_lists
, sfb_fcl_size
);
473 for (i
= 0; i
< SFB_BINS
; ++i
) {
474 STAILQ_INIT(&SFB_FC_LIST(sp
, i
)->fclist
);
480 sp
->sfb_flags
= (flags
& SFBF_USERFLAGS
);
482 if (sp
->sfb_flags
& SFBF_ECN
) {
483 sp
->sfb_flags
&= ~SFBF_ECN
;
484 log(LOG_ERR
, "%s: SFB qid=%d, ECN not available; ignoring "
485 "SFBF_ECN flag!\n", if_name(ifp
), sp
->sfb_qid
);
489 sfb_resetq(sp
, CLASSQ_EV_INIT
);
495 sfb_fclist_append(struct sfb
*sp
, struct sfb_fcl
*fcl
)
497 IFCQ_CONVERT_LOCK(&sp
->sfb_ifp
->if_snd
);
498 VERIFY(STAILQ_EMPTY(&fcl
->fclist
) || fcl
->cnt
> 0);
499 sp
->sfb_stats
.flow_feedback
+= fcl
->cnt
;
502 flowadv_add(&fcl
->fclist
);
503 VERIFY(fcl
->cnt
== 0 && STAILQ_EMPTY(&fcl
->fclist
));
507 sfb_fclists_clean(struct sfb
*sp
)
511 /* Move all the flow control entries to the flowadv list */
512 for (i
= 0; i
< SFB_BINS
; ++i
) {
513 struct sfb_fcl
*fcl
= SFB_FC_LIST(sp
, i
);
514 if (!STAILQ_EMPTY(&fcl
->fclist
)) {
515 sfb_fclist_append(sp
, fcl
);
521 sfb_destroy(struct sfb
*sp
)
523 sfb_fclists_clean(sp
);
524 if (sp
->sfb_bins
!= NULL
) {
525 zfree(sfb_bins_zone
, sp
->sfb_bins
);
528 if (sp
->sfb_fc_lists
!= NULL
) {
529 zfree(sfb_fcl_zone
, sp
->sfb_fc_lists
);
530 sp
->sfb_fc_lists
= NULL
;
536 sfb_resetq(struct sfb
*sp
, cqev_t ev
)
538 struct ifnet
*ifp
= sp
->sfb_ifp
;
543 if (ev
!= CLASSQ_EV_LINK_DOWN
) {
544 (*sp
->sfb_bins
)[0].fudge
= sfb_random(sp
);
545 (*sp
->sfb_bins
)[1].fudge
= sfb_random(sp
);
546 sp
->sfb_allocation
= ((sfb_allocation
== 0) ?
547 (sp
->sfb_qlim
/ 3) : sfb_allocation
);
548 sp
->sfb_drop_thresh
= sp
->sfb_allocation
+
549 (sp
->sfb_allocation
>> 1);
552 sp
->sfb_clearpkts
= 0;
555 eff_rate
= ifnet_output_linkrate(ifp
);
556 sp
->sfb_eff_rate
= eff_rate
;
558 sfb_calc_holdtime(sp
, eff_rate
);
559 sfb_calc_pboxtime(sp
, eff_rate
);
560 sfb_calc_hinterval(sp
, NULL
);
561 ifclassq_calc_target_qdelay(ifp
, &sp
->sfb_target_qdelay
);
562 sfb_calc_update_interval(sp
, eff_rate
);
564 if (ev
== CLASSQ_EV_LINK_DOWN
||
565 ev
== CLASSQ_EV_LINK_UP
) {
566 sfb_fclists_clean(sp
);
569 bzero(sp
->sfb_bins
, sizeof(*sp
->sfb_bins
));
570 bzero(&sp
->sfb_stats
, sizeof(sp
->sfb_stats
));
572 if (ev
== CLASSQ_EV_LINK_DOWN
|| !classq_verbose
) {
576 log(LOG_DEBUG
, "%s: SFB qid=%d, holdtime=%llu nsec, "
577 "pboxtime=%llu nsec, allocation=%d, drop_thresh=%d, "
578 "hinterval=%d sec, sfb_bins=%d bytes, eff_rate=%llu bps"
579 "target_qdelay= %llu nsec "
580 "update_interval=%llu sec %llu nsec flags=0x%x\n",
581 if_name(ifp
), sp
->sfb_qid
, (u_int64_t
)sp
->sfb_holdtime
.tv_nsec
,
582 (u_int64_t
)sp
->sfb_pboxtime
.tv_nsec
,
583 (u_int32_t
)sp
->sfb_allocation
, (u_int32_t
)sp
->sfb_drop_thresh
,
584 (int)sp
->sfb_hinterval
.tv_sec
, (int)sizeof(*sp
->sfb_bins
),
585 eff_rate
, (u_int64_t
)sp
->sfb_target_qdelay
,
586 (u_int64_t
)sp
->sfb_update_interval
.tv_sec
,
587 (u_int64_t
)sp
->sfb_update_interval
.tv_nsec
, sp
->sfb_flags
);
591 sfb_getstats(struct sfb
*sp
, struct sfb_stats
*sps
)
593 sps
->allocation
= sp
->sfb_allocation
;
594 sps
->dropthresh
= sp
->sfb_drop_thresh
;
595 sps
->clearpkts
= sp
->sfb_clearpkts
;
596 sps
->current
= sp
->sfb_current
;
597 sps
->target_qdelay
= sp
->sfb_target_qdelay
;
598 sps
->min_estdelay
= sp
->sfb_min_qdelay
;
599 sps
->delay_fcthreshold
= sp
->sfb_fc_threshold
;
600 sps
->flags
= sp
->sfb_flags
;
602 net_timernsec(&sp
->sfb_holdtime
, &sp
->sfb_stats
.hold_time
);
603 net_timernsec(&sp
->sfb_pboxtime
, &sp
->sfb_stats
.pbox_time
);
604 net_timernsec(&sp
->sfb_hinterval
, &sp
->sfb_stats
.rehash_intval
);
605 net_timernsec(&sp
->sfb_update_interval
, &sps
->update_interval
);
606 *(&(sps
->sfbstats
)) = *(&(sp
->sfb_stats
));
608 _CASSERT(sizeof((*sp
->sfb_bins
)[0].stats
) ==
609 sizeof(sps
->binstats
[0].stats
));
611 bcopy(&(*sp
->sfb_bins
)[0].stats
, &sps
->binstats
[0].stats
,
612 sizeof(sps
->binstats
[0].stats
));
613 bcopy(&(*sp
->sfb_bins
)[1].stats
, &sps
->binstats
[1].stats
,
614 sizeof(sps
->binstats
[1].stats
));
618 sfb_swap_bins(struct sfb
*sp
, u_int32_t len
)
622 if (sp
->sfb_flags
& SFBF_SUSPENDED
) {
627 VERIFY((s
+ (s
^ 1)) == 1);
629 (*sp
->sfb_bins
)[s
].fudge
= sfb_random(sp
); /* recompute perturbation */
630 sp
->sfb_clearpkts
= len
;
631 sp
->sfb_stats
.num_rehash
++;
633 s
= (sp
->sfb_current
^= 1); /* flip the bit (swap current) */
635 if (classq_verbose
) {
636 log(LOG_DEBUG
, "%s: SFB qid=%d, set %d is now current, "
637 "qlen=%d\n", if_name(sp
->sfb_ifp
), sp
->sfb_qid
, s
, len
);
640 /* clear freezetime for all current bins */
641 bzero(&(*sp
->sfb_bins
)[s
].freezetime
,
642 sizeof((*sp
->sfb_bins
)[s
].freezetime
));
644 /* clear/adjust bin statistics and flow control lists */
645 for (i
= 0; i
< SFB_BINS
; i
++) {
646 struct sfb_fcl
*fcl
= SFB_FC_LIST(sp
, i
);
648 if (!STAILQ_EMPTY(&fcl
->fclist
)) {
649 sfb_fclist_append(sp
, fcl
);
652 for (j
= 0; j
< SFB_LEVELS
; j
++) {
653 struct sfbbinstats
*cbin
, *wbin
;
655 cbin
= SFB_BINST(sp
, j
, i
, s
); /* current */
656 wbin
= SFB_BINST(sp
, j
, i
, s
^ 1); /* warm-up */
660 if (cbin
->pmark
> SFB_MAX_PMARK
) {
661 cbin
->pmark
= SFB_MAX_PMARK
;
663 if (cbin
->pmark
< 0) {
668 * Keep pmark from before to identify
669 * non-responsives immediately.
671 if (wbin
->pmark
> SFB_PMARK_WARM
) {
672 wbin
->pmark
= SFB_PMARK_WARM
;
679 sfb_pcheck(struct sfb
*sp
, uint32_t pkt_sfb_hash
)
683 #endif /* SFB_LEVELS != 2 */
684 uint8_t *pkt_sfb_hash8
= (uint8_t *)&pkt_sfb_hash
;
688 VERIFY((s
+ (s
^ 1)) == 1);
691 * For current bins, returns 1 if all pmark >= SFB_PMARK_TH,
692 * 0 otherwise; optimize for SFB_LEVELS=2.
696 * Level 0: bin index at [0] for set 0; [2] for set 1
697 * Level 1: bin index at [1] for set 0; [3] for set 1
699 if (SFB_BINST(sp
, 0, SFB_BINMASK(pkt_sfb_hash8
[(s
<< 1)]),
700 s
)->pmark
< SFB_PMARK_TH
||
701 SFB_BINST(sp
, 1, SFB_BINMASK(pkt_sfb_hash8
[(s
<< 1) + 1]),
702 s
)->pmark
< SFB_PMARK_TH
) {
705 #else /* SFB_LEVELS != 2 */
706 for (i
= 0; i
< SFB_LEVELS
; i
++) {
707 if (s
== 0) { /* set 0, bin index [0,1] */
708 n
= SFB_BINMASK(pkt_sfb_hash8
[i
]);
709 } else { /* set 1, bin index [2,3] */
710 n
= SFB_BINMASK(pkt_sfb_hash8
[i
+ 2]);
713 if (SFB_BINST(sp
, i
, n
, s
)->pmark
< SFB_PMARK_TH
) {
717 #endif /* SFB_LEVELS != 2 */
722 sfb_penalize(struct sfb
*sp
, uint32_t pkt_sfb_hash
, uint32_t *pkt_sfb_flags
,
723 struct timespec
*now
)
725 struct timespec delta
= { 0, 0 };
726 uint8_t *pkt_sfb_hash8
= (uint8_t *)&pkt_sfb_hash
;
728 /* If minimum pmark of current bins is < SFB_PMARK_TH, we're done */
729 if (!sfb_ratelimit
|| !sfb_pcheck(sp
, pkt_sfb_hash
)) {
733 net_timersub(now
, &sp
->sfb_pboxfreeze
, &delta
);
734 if (net_timercmp(&delta
, &sp
->sfb_pboxtime
, <)) {
737 #endif /* SFB_LEVELS != 2 */
738 struct sfbbinstats
*bin
;
741 w
= sp
->sfb_current
^ 1;
742 VERIFY((w
+ (w
^ 1)) == 1);
745 * Update warm-up bins; optimize for SFB_LEVELS=2
748 /* Level 0: bin index at [0] for set 0; [2] for set 1 */
749 n
= SFB_BINMASK(pkt_sfb_hash8
[(w
<< 1)]);
750 bin
= SFB_BINST(sp
, 0, n
, w
);
751 if (bin
->pkts
>= sp
->sfb_allocation
) {
752 sfb_increment_bin(sp
, bin
, SFB_BINFT(sp
, 0, n
, w
), now
);
755 /* Level 0: bin index at [1] for set 0; [3] for set 1 */
756 n
= SFB_BINMASK(pkt_sfb_hash8
[(w
<< 1) + 1]);
757 bin
= SFB_BINST(sp
, 1, n
, w
);
758 if (bin
->pkts
>= sp
->sfb_allocation
) {
759 sfb_increment_bin(sp
, bin
, SFB_BINFT(sp
, 1, n
, w
), now
);
761 #else /* SFB_LEVELS != 2 */
762 for (i
= 0; i
< SFB_LEVELS
; i
++) {
763 if (w
== 0) { /* set 0, bin index [0,1] */
764 n
= SFB_BINMASK(pkt_sfb_hash8
[i
]);
765 } else { /* set 1, bin index [2,3] */
766 n
= SFB_BINMASK(pkt_sfb_hash8
[i
+ 2]);
769 bin
= SFB_BINST(sp
, i
, n
, w
);
770 if (bin
->pkts
>= sp
->sfb_allocation
) {
771 sfb_increment_bin(sp
, bin
,
772 SFB_BINFT(sp
, i
, n
, w
), now
);
775 #endif /* SFB_LEVELS != 2 */
779 /* non-conformant or else misclassified flow; queue it anyway */
780 *pkt_sfb_flags
|= SFB_PKT_PBOX
;
781 *(&sp
->sfb_pboxfreeze
) = *now
;
787 sfb_adjust_bin(struct sfb
*sp
, struct sfbbinstats
*bin
, struct timespec
*ft
,
788 struct timespec
*now
, boolean_t inc
)
790 struct timespec delta
;
792 net_timersub(now
, ft
, &delta
);
793 if (net_timercmp(&delta
, &sp
->sfb_holdtime
, <)) {
794 if (classq_verbose
> 1) {
795 log(LOG_DEBUG
, "%s: SFB qid=%d, %s update frozen "
796 "(delta=%llu nsec)\n", if_name(sp
->sfb_ifp
),
797 sp
->sfb_qid
, inc
? "increment" : "decrement",
798 (u_int64_t
)delta
.tv_nsec
);
803 /* increment/decrement marking probability */
813 sfb_decrement_bin(struct sfb
*sp
, struct sfbbinstats
*bin
, struct timespec
*ft
,
814 struct timespec
*now
)
816 return sfb_adjust_bin(sp
, bin
, ft
, now
, FALSE
);
820 sfb_increment_bin(struct sfb
*sp
, struct sfbbinstats
*bin
, struct timespec
*ft
,
821 struct timespec
*now
)
823 return sfb_adjust_bin(sp
, bin
, ft
, now
, TRUE
);
827 sfb_dq_update_bins(struct sfb
*sp
, uint32_t pkt_sfb_hash
, uint32_t pkt_len
,
828 struct timespec
*now
, u_int32_t qsize
)
830 #if SFB_LEVELS != 2 || SFB_FC_LEVEL != 0
832 #endif /* SFB_LEVELS != 2 || SFB_FC_LEVEL != 0 */
833 struct sfbbinstats
*bin
;
835 struct sfb_fcl
*fcl
= NULL
;
836 uint8_t *pkt_sfb_hash8
= (uint8_t *)&pkt_sfb_hash
;
839 VERIFY((s
+ (s
^ 1)) == 1);
842 * Update current bins; optimize for SFB_LEVELS=2 and SFB_FC_LEVEL=0
844 #if SFB_LEVELS == 2 && SFB_FC_LEVEL == 0
845 /* Level 0: bin index at [0] for set 0; [2] for set 1 */
846 n
= SFB_BINMASK(pkt_sfb_hash8
[(s
<< 1)]);
847 bin
= SFB_BINST(sp
, 0, n
, s
);
849 VERIFY(bin
->pkts
> 0 && bin
->bytes
>= pkt_len
);
851 bin
->bytes
-= pkt_len
;
853 if (bin
->pkts
== 0) {
854 sfb_decrement_bin(sp
, bin
, SFB_BINFT(sp
, 0, n
, s
), now
);
857 /* Deliver flow control feedback to the sockets */
858 if (SFB_QUEUE_DELAYBASED(sp
)) {
859 if (!(SFB_IS_DELAYHIGH(sp
)) ||
860 bin
->bytes
<= sp
->sfb_fc_threshold
||
861 bin
->pkts
== 0 || qsize
== 0) {
862 fcl
= SFB_FC_LIST(sp
, n
);
864 } else if (bin
->pkts
<= (sp
->sfb_allocation
>> 2)) {
865 fcl
= SFB_FC_LIST(sp
, n
);
868 if (fcl
!= NULL
&& !STAILQ_EMPTY(&fcl
->fclist
)) {
869 sfb_fclist_append(sp
, fcl
);
873 /* Level 1: bin index at [1] for set 0; [3] for set 1 */
874 n
= SFB_BINMASK(pkt_sfb_hash8
[(s
<< 1) + 1]);
875 bin
= SFB_BINST(sp
, 1, n
, s
);
877 VERIFY(bin
->pkts
> 0 && bin
->bytes
>= (u_int64_t
)pkt_len
);
879 bin
->bytes
-= pkt_len
;
880 if (bin
->pkts
== 0) {
881 sfb_decrement_bin(sp
, bin
, SFB_BINFT(sp
, 1, n
, s
), now
);
883 #else /* SFB_LEVELS != 2 || SFB_FC_LEVEL != 0 */
884 for (i
= 0; i
< SFB_LEVELS
; i
++) {
885 if (s
== 0) { /* set 0, bin index [0,1] */
886 n
= SFB_BINMASK(pkt_sfb_hash8
[i
]);
887 } else { /* set 1, bin index [2,3] */
888 n
= SFB_BINMASK(pkt_sfb_hash8
[i
+ 2]);
891 bin
= SFB_BINST(sp
, i
, n
, s
);
893 VERIFY(bin
->pkts
> 0 && bin
->bytes
>= pkt_len
);
895 bin
->bytes
-= pkt_len
;
896 if (bin
->pkts
== 0) {
897 sfb_decrement_bin(sp
, bin
,
898 SFB_BINFT(sp
, i
, n
, s
), now
);
900 if (i
!= SFB_FC_LEVEL
) {
903 if (SFB_QUEUE_DELAYBASED(sp
)) {
904 if (!(SFB_IS_DELAYHIGH(sp
)) ||
905 bin
->bytes
<= sp
->sfb_fc_threshold
) {
906 fcl
= SFB_FC_LIST(sp
, n
);
908 } else if (bin
->pkts
<= (sp
->sfb_allocation
>> 2)) {
909 fcl
= SFB_FC_LIST(sp
, n
);
911 if (fcl
!= NULL
&& !STAILQ_EMPTY(&fcl
->fclist
)) {
912 sfb_fclist_append(sp
, fcl
);
916 #endif /* SFB_LEVELS != 2 || SFB_FC_LEVEL != 0 */
920 sfb_eq_update_bins(struct sfb
*sp
, uint32_t pkt_sfb_hash
, uint32_t pkt_len
)
924 #endif /* SFB_LEVELS != 2 */
926 struct sfbbinstats
*bin
;
927 uint8_t *pkt_sfb_hash8
= (uint8_t *)&pkt_sfb_hash
;
929 VERIFY((s
+ (s
^ 1)) == 1);
932 * Update current bins; optimize for SFB_LEVELS=2
935 /* Level 0: bin index at [0] for set 0; [2] for set 1 */
936 bin
= SFB_BINST(sp
, 0,
937 SFB_BINMASK(pkt_sfb_hash8
[(s
<< 1)]), s
);
939 bin
->bytes
+= pkt_len
;
941 /* Level 1: bin index at [1] for set 0; [3] for set 1 */
942 bin
= SFB_BINST(sp
, 1,
943 SFB_BINMASK(pkt_sfb_hash8
[(s
<< 1) + 1]), s
);
945 bin
->bytes
+= pkt_len
;
947 #else /* SFB_LEVELS != 2 */
948 for (i
= 0; i
< SFB_LEVELS
; i
++) {
949 if (s
== 0) { /* set 0, bin index [0,1] */
950 n
= SFB_BINMASK(pkt_sfb_hash8
[i
]);
951 } else { /* set 1, bin index [2,3] */
952 n
= SFB_BINMASK(pkt_sfb_hash8
[i
+ 2]);
955 bin
= SFB_BINST(sp
, i
, n
, s
);
957 bin
->bytes
+= pkt_len
;
959 #endif /* SFB_LEVELS != 2 */
963 sfb_bin_addfcentry(struct sfb
*sp
, pktsched_pkt_t
*pkt
, uint32_t pkt_sfb_hash
,
964 uint8_t flowsrc
, uint32_t flowid
)
966 struct flowadv_fcentry
*fce
;
969 uint8_t *pkt_sfb_hash8
= (uint8_t *)&pkt_sfb_hash
;
972 VERIFY((s
+ (s
^ 1)) == 1);
975 sp
->sfb_stats
.null_flowid
++;
980 * Use value at index 0 for set 0 and
981 * value at index 2 for set 1
983 fcl
= SFB_FC_LIST(sp
, SFB_BINMASK(pkt_sfb_hash8
[(s
<< 1)]));
984 STAILQ_FOREACH(fce
, &fcl
->fclist
, fce_link
) {
985 if ((uint8_t)fce
->fce_flowsrc_type
== flowsrc
&&
986 fce
->fce_flowid
== flowid
) {
987 /* Already on flow control list; just return */
992 IFCQ_CONVERT_LOCK(&sp
->sfb_ifp
->if_snd
);
993 fce
= pktsched_alloc_fcentry(pkt
, sp
->sfb_ifp
, M_WAITOK
);
995 STAILQ_INSERT_TAIL(&fcl
->fclist
, fce
, fce_link
);
997 sp
->sfb_stats
.flow_controlled
++;
1004 * check if this flow needs to be flow-controlled or if this
1005 * packet needs to be dropped.
1008 sfb_bin_mark_or_drop(struct sfb
*sp
, struct sfbbinstats
*bin
)
1011 if (SFB_QUEUE_DELAYBASED(sp
)) {
1013 * Mark or drop if this bin has more
1014 * bytes than the flowcontrol threshold.
1016 if (SFB_IS_DELAYHIGH(sp
) &&
1017 bin
->bytes
>= (sp
->sfb_fc_threshold
<< 1)) {
1021 if (bin
->pkts
>= sp
->sfb_allocation
&&
1022 bin
->pkts
>= sp
->sfb_drop_thresh
) {
1023 ret
= 1; /* drop or mark */
1030 * early-drop probability is kept in pmark of each bin of the flow
1033 sfb_drop_early(struct sfb
*sp
, uint32_t pkt_sfb_hash
, u_int16_t
*pmin
,
1034 struct timespec
*now
)
1038 #endif /* SFB_LEVELS != 2 */
1039 struct sfbbinstats
*bin
;
1041 uint8_t *pkt_sfb_hash8
= (uint8_t *)&pkt_sfb_hash
;
1043 s
= sp
->sfb_current
;
1044 VERIFY((s
+ (s
^ 1)) == 1);
1046 *pmin
= (u_int16_t
)-1;
1049 * Update current bins; optimize for SFB_LEVELS=2
1052 /* Level 0: bin index at [0] for set 0; [2] for set 1 */
1053 n
= SFB_BINMASK(pkt_sfb_hash8
[(s
<< 1)]);
1054 bin
= SFB_BINST(sp
, 0, n
, s
);
1055 if (*pmin
> (u_int16_t
)bin
->pmark
) {
1056 *pmin
= (u_int16_t
)bin
->pmark
;
1060 /* Update SFB probability */
1061 if (bin
->pkts
>= sp
->sfb_allocation
) {
1062 sfb_increment_bin(sp
, bin
, SFB_BINFT(sp
, 0, n
, s
), now
);
1065 ret
= sfb_bin_mark_or_drop(sp
, bin
);
1067 /* Level 1: bin index at [1] for set 0; [3] for set 1 */
1068 n
= SFB_BINMASK(pkt_sfb_hash8
[(s
<< 1) + 1]);
1069 bin
= SFB_BINST(sp
, 1, n
, s
);
1070 if (*pmin
> (u_int16_t
)bin
->pmark
) {
1071 *pmin
= (u_int16_t
)bin
->pmark
;
1074 if (bin
->pkts
>= sp
->sfb_allocation
) {
1075 sfb_increment_bin(sp
, bin
, SFB_BINFT(sp
, 1, n
, s
), now
);
1077 #else /* SFB_LEVELS != 2 */
1078 for (i
= 0; i
< SFB_LEVELS
; i
++) {
1079 if (s
== 0) { /* set 0, bin index [0,1] */
1080 n
= SFB_BINMASK(pkt_sfb_hash8
[i
]);
1081 } else { /* set 1, bin index [2,3] */
1082 n
= SFB_BINMASK(pkt_sfb_hash8
[i
+ 2]);
1085 bin
= SFB_BINST(sp
, i
, n
, s
);
1086 if (*pmin
> (u_int16_t
)bin
->pmark
) {
1087 *pmin
= (u_int16_t
)bin
->pmark
;
1090 if (bin
->pkts
>= sp
->sfb_allocation
) {
1091 sfb_increment_bin(sp
, bin
,
1092 SFB_BINFT(sp
, i
, n
, s
), now
);
1094 if (i
== SFB_FC_LEVEL
) {
1095 ret
= sfb_bin_mark_or_drop(sp
, bin
);
1098 #endif /* SFB_LEVELS != 2 */
1100 if (sp
->sfb_flags
& SFBF_SUSPENDED
) {
1101 ret
= 1; /* drop or mark */
1107 sfb_detect_dequeue_stall(struct sfb
*sp
, class_queue_t
*q
,
1108 struct timespec
*now
)
1110 struct timespec max_getqtime
;
1112 if (!SFB_QUEUE_DELAYBASED(sp
) || SFB_IS_DELAYHIGH(sp
) ||
1113 qsize(q
) <= SFB_MIN_FC_THRESHOLD_BYTES
||
1114 !net_timerisset(&sp
->sfb_getqtime
)) {
1118 net_timeradd(&sp
->sfb_getqtime
, &sp
->sfb_update_interval
,
1120 if (net_timercmp(now
, &max_getqtime
, >)) {
1122 * No packets have been dequeued in an update interval
1123 * worth of time. It means that the queue is stalled
1125 SFB_SET_DELAY_HIGH(sp
, q
);
1126 sp
->sfb_stats
.dequeue_stall
++;
1130 #define DTYPE_NODROP 0 /* no drop */
1131 #define DTYPE_FORCED 1 /* a "forced" drop */
1132 #define DTYPE_EARLY 2 /* an "unforced" (early) drop */
1135 sfb_addq(struct sfb
*sp
, class_queue_t
*q
, pktsched_pkt_t
*pkt
,
1140 #endif /* !PF_ECN */
1141 struct timespec now
;
1145 int ret
= CLASSQEQ_SUCCESS
;
1146 uint32_t maxqsize
= 0;
1147 uint64_t *pkt_timestamp
;
1148 uint32_t *pkt_sfb_hash
;
1149 uint16_t *pkt_sfb_hash16
;
1150 uint32_t *pkt_sfb_flags
;
1151 uint32_t pkt_flowid
;
1152 uint32_t *pkt_flags
;
1153 uint8_t pkt_proto
, pkt_flowsrc
;
1155 s
= sp
->sfb_current
;
1156 VERIFY((s
+ (s
^ 1)) == 1);
1158 pktsched_get_pkt_vars(pkt
, &pkt_flags
, &pkt_timestamp
, &pkt_flowid
,
1159 &pkt_flowsrc
, &pkt_proto
, NULL
);
1160 pkt_sfb_hash
= pktsched_get_pkt_sfb_vars(pkt
, &pkt_sfb_flags
);
1161 pkt_sfb_hash16
= (uint16_t *)pkt_sfb_hash
;
1163 if (pkt
->pktsched_ptype
== QP_MBUF
) {
1164 /* See comments in <rdar://problem/14040693> */
1165 VERIFY(!(*pkt_flags
& PKTF_PRIV_GUARDED
));
1166 *pkt_flags
|= PKTF_PRIV_GUARDED
;
1169 if (*pkt_timestamp
> 0) {
1170 net_nsectimer(pkt_timestamp
, &now
);
1173 net_timernsec(&now
, pkt_timestamp
);
1176 /* time to swap the bins? */
1177 if (net_timercmp(&now
, &sp
->sfb_nextreset
, >=)) {
1178 net_timeradd(&now
, &sp
->sfb_hinterval
, &sp
->sfb_nextreset
);
1179 sfb_swap_bins(sp
, qlen(q
));
1180 s
= sp
->sfb_current
;
1181 VERIFY((s
+ (s
^ 1)) == 1);
1184 if (!net_timerisset(&sp
->sfb_update_time
)) {
1185 net_timeradd(&now
, &sp
->sfb_update_interval
,
1186 &sp
->sfb_update_time
);
1190 * If getq time is not set because this is the first packet
1191 * or after idle time, set it now so that we can detect a stall.
1193 if (qsize(q
) == 0 && !net_timerisset(&sp
->sfb_getqtime
)) {
1194 *(&sp
->sfb_getqtime
) = *(&now
);
1199 (SFB_HASH(&pkt_flowid
, sizeof(pkt_flowid
),
1200 (*sp
->sfb_bins
)[s
].fudge
) & SFB_HASHMASK
);
1201 pkt_sfb_hash16
[s
^ 1] =
1202 (SFB_HASH(&pkt_flowid
, sizeof(pkt_flowid
),
1203 (*sp
->sfb_bins
)[s
^ 1].fudge
) & SFB_HASHMASK
);
1205 /* check if the queue has been stalled */
1206 sfb_detect_dequeue_stall(sp
, q
, &now
);
1208 /* see if we drop early */
1209 droptype
= DTYPE_NODROP
;
1210 if (sfb_drop_early(sp
, *pkt_sfb_hash
, &pmin
, &now
)) {
1211 /* flow control, mark or drop by sfb */
1212 if ((sp
->sfb_flags
& SFBF_FLOWCTL
) &&
1213 (*pkt_flags
& PKTF_FLOW_ADV
)) {
1215 /* drop all during suspension or for non-TCP */
1216 if ((sp
->sfb_flags
& SFBF_SUSPENDED
) ||
1217 pkt_proto
!= IPPROTO_TCP
) {
1218 droptype
= DTYPE_EARLY
;
1219 sp
->sfb_stats
.drop_early
++;
1223 /* XXX: only supported for mbuf */
1224 else if ((sp
->sfb_flags
& SFBF_ECN
) &&
1225 (pkt
->pktsched_ptype
== QP_MBUF
) &&
1226 (pkt_proto
== IPPROTO_TCP
) && /* only for TCP */
1227 ((sfb_random(sp
) & SFB_MAX_PMARK
) <= pmin
) &&
1228 mark_ecn(m
, t
, sp
->sfb_flags
) &&
1229 !(sp
->sfb_flags
& SFBF_SUSPENDED
)) {
1230 /* successfully marked; do not drop. */
1231 sp
->sfb_stats
.marked_packets
++;
1235 /* unforced drop by sfb */
1236 droptype
= DTYPE_EARLY
;
1237 sp
->sfb_stats
.drop_early
++;
1241 /* non-responsive flow penalty? */
1242 if (droptype
== DTYPE_NODROP
&& sfb_penalize(sp
, *pkt_sfb_hash
,
1243 pkt_sfb_flags
, &now
)) {
1244 droptype
= DTYPE_FORCED
;
1245 sp
->sfb_stats
.drop_pbox
++;
1248 if (SFB_QUEUE_DELAYBASED(sp
)) {
1249 maxqsize
= SFB_QUEUE_DELAYBASED_MAXSIZE
;
1251 maxqsize
= qlimit(q
);
1255 * When the queue length hits the queue limit, make it a forced
1258 if (droptype
== DTYPE_NODROP
&& qlen(q
) >= maxqsize
) {
1259 if (pkt_proto
== IPPROTO_TCP
&&
1260 qlen(q
) < (maxqsize
+ (maxqsize
>> 1)) &&
1261 ((*pkt_flags
& PKTF_TCP_REXMT
) ||
1262 (sp
->sfb_flags
& SFBF_LAST_PKT_DROPPED
))) {
1264 * At some level, dropping packets will make the
1265 * flows backoff and will keep memory requirements
1266 * under control. But we should not cause a tail
1267 * drop because it can take a long time for a
1268 * TCP flow to recover. We should try to drop
1269 * alternate packets instead.
1271 sp
->sfb_flags
&= ~SFBF_LAST_PKT_DROPPED
;
1273 droptype
= DTYPE_FORCED
;
1274 sp
->sfb_stats
.drop_queue
++;
1275 sp
->sfb_flags
|= SFBF_LAST_PKT_DROPPED
;
1279 if (fc_adv
== 1 && droptype
!= DTYPE_FORCED
&&
1280 sfb_bin_addfcentry(sp
, pkt
, *pkt_sfb_hash
, pkt_flowsrc
,
1282 /* deliver flow control advisory error */
1283 if (droptype
== DTYPE_NODROP
) {
1284 ret
= CLASSQEQ_SUCCESS_FC
;
1285 VERIFY(!(sp
->sfb_flags
& SFBF_SUSPENDED
));
1286 } else if (sp
->sfb_flags
& SFBF_SUSPENDED
) {
1287 /* drop due to suspension */
1288 ret
= CLASSQEQ_DROP_SP
;
1290 /* drop due to flow-control */
1291 ret
= CLASSQEQ_DROP_FC
;
1294 /* if successful enqueue this packet, else drop it */
1295 if (droptype
== DTYPE_NODROP
) {
1296 VERIFY(pkt
->pktsched_ptype
== qptype(q
));
1297 _addq(q
, pkt
->pktsched_pkt
);
1299 IFCQ_CONVERT_LOCK(&sp
->sfb_ifp
->if_snd
);
1300 return (ret
!= CLASSQEQ_SUCCESS
) ? ret
: CLASSQEQ_DROP
;
1303 if (!(*pkt_sfb_flags
& SFB_PKT_PBOX
)) {
1304 sfb_eq_update_bins(sp
, *pkt_sfb_hash
,
1305 pktsched_get_pkt_len(pkt
));
1307 sp
->sfb_stats
.pbox_packets
++;
1310 /* successfully queued */
1315 sfb_getq_flow(struct sfb
*sp
, class_queue_t
*q
, u_int32_t flow
, boolean_t purge
,
1316 pktsched_pkt_t
*pkt
)
1318 struct timespec now
;
1319 classq_pkt_type_t ptype
;
1320 uint64_t *pkt_timestamp
;
1321 uint32_t *pkt_flags
;
1322 uint32_t *pkt_sfb_flags
;
1323 uint32_t *pkt_sfb_hash
;
1326 if (!purge
&& (sp
->sfb_flags
& SFBF_SUSPENDED
)) {
1332 /* flow of 0 means head of queue */
1333 if ((p
= ((flow
== 0) ? _getq(q
) : _getq_flow(q
, flow
))) == NULL
) {
1335 net_timerclear(&sp
->sfb_getqtime
);
1341 pktsched_pkt_encap(pkt
, ptype
, p
);
1342 pktsched_get_pkt_vars(pkt
, &pkt_flags
, &pkt_timestamp
, NULL
,
1344 pkt_sfb_hash
= pktsched_get_pkt_sfb_vars(pkt
, &pkt_sfb_flags
);
1346 /* See comments in <rdar://problem/14040693> */
1347 if (ptype
== QP_MBUF
) {
1348 VERIFY(*pkt_flags
& PKTF_PRIV_GUARDED
);
1352 /* calculate EWMA of dequeues */
1353 if (net_timerisset(&sp
->sfb_getqtime
)) {
1354 struct timespec delta
;
1356 net_timersub(&now
, &sp
->sfb_getqtime
, &delta
);
1357 net_timernsec(&delta
, &new);
1358 avg
= sp
->sfb_stats
.dequeue_avg
;
1360 int decay
= DEQUEUE_DECAY
;
1362 * If the time since last dequeue is
1363 * significantly greater than the current
1364 * average, weigh the average more against
1367 if (DEQUEUE_SPIKE(new, avg
)) {
1370 avg
= (((avg
<< decay
) - avg
) + new) >> decay
;
1374 sp
->sfb_stats
.dequeue_avg
= avg
;
1376 *(&sp
->sfb_getqtime
) = *(&now
);
1379 if (!purge
&& SFB_QUEUE_DELAYBASED(sp
)) {
1380 u_int64_t dequeue_ns
, queue_delay
= 0;
1381 net_timernsec(&now
, &dequeue_ns
);
1382 if (dequeue_ns
> *pkt_timestamp
) {
1383 queue_delay
= dequeue_ns
- *pkt_timestamp
;
1386 if (sp
->sfb_min_qdelay
== 0 ||
1387 (queue_delay
> 0 && queue_delay
< sp
->sfb_min_qdelay
)) {
1388 sp
->sfb_min_qdelay
= queue_delay
;
1390 if (net_timercmp(&now
, &sp
->sfb_update_time
, >=)) {
1391 if (sp
->sfb_min_qdelay
> sp
->sfb_target_qdelay
) {
1392 if (!SFB_IS_DELAYHIGH(sp
)) {
1393 SFB_SET_DELAY_HIGH(sp
, q
);
1396 sp
->sfb_flags
&= ~(SFBF_DELAYHIGH
);
1397 sp
->sfb_fc_threshold
= 0;
1399 net_timeradd(&now
, &sp
->sfb_update_interval
,
1400 &sp
->sfb_update_time
);
1401 sp
->sfb_min_qdelay
= 0;
1407 * Clearpkts are the ones which were in the queue when the hash
1408 * function was perturbed. Since the perturbation value (fudge),
1409 * and thus bin information for these packets is not known, we do
1410 * not change accounting information while dequeuing these packets.
1411 * It is important not to set the hash interval too small due to
1412 * this reason. A rule of thumb is to set it to K*D, where D is
1413 * the time taken to drain queue.
1415 if (*pkt_sfb_flags
& SFB_PKT_PBOX
) {
1416 *pkt_sfb_flags
&= ~SFB_PKT_PBOX
;
1417 if (sp
->sfb_clearpkts
> 0) {
1418 sp
->sfb_clearpkts
--;
1420 } else if (sp
->sfb_clearpkts
> 0) {
1421 sp
->sfb_clearpkts
--;
1423 sfb_dq_update_bins(sp
, *pkt_sfb_hash
, pktsched_get_pkt_len(pkt
),
1427 /* See comments in <rdar://problem/14040693> */
1428 if (ptype
== QP_MBUF
) {
1429 *pkt_flags
&= ~PKTF_PRIV_GUARDED
;
1433 * If the queue becomes empty before the update interval, reset
1434 * the flow control threshold
1436 if (qsize(q
) == 0) {
1437 sp
->sfb_flags
&= ~SFBF_DELAYHIGH
;
1438 sp
->sfb_min_qdelay
= 0;
1439 sp
->sfb_fc_threshold
= 0;
1440 net_timerclear(&sp
->sfb_update_time
);
1441 net_timerclear(&sp
->sfb_getqtime
);
1447 sfb_getq(struct sfb
*sp
, class_queue_t
*q
, pktsched_pkt_t
*pkt
)
1449 sfb_getq_flow(sp
, q
, 0, FALSE
, pkt
);
1453 sfb_purgeq(struct sfb
*sp
, class_queue_t
*q
, u_int32_t flow
, u_int32_t
*packets
,
1456 u_int32_t cnt
= 0, len
= 0;
1459 IFCQ_CONVERT_LOCK(&sp
->sfb_ifp
->if_snd
);
1460 while (sfb_getq_flow(sp
, q
, flow
, TRUE
, &pkt
) != NULL
) {
1462 len
+= pktsched_get_pkt_len(&pkt
);
1463 pktsched_free_pkt(&pkt
);
1466 if (packets
!= NULL
) {
1469 if (bytes
!= NULL
) {
1475 sfb_updateq(struct sfb
*sp
, cqev_t ev
)
1477 struct ifnet
*ifp
= sp
->sfb_ifp
;
1479 VERIFY(ifp
!= NULL
);
1482 case CLASSQ_EV_LINK_BANDWIDTH
: {
1483 u_int64_t eff_rate
= ifnet_output_linkrate(ifp
);
1485 /* update parameters only if rate has changed */
1486 if (eff_rate
== sp
->sfb_eff_rate
) {
1490 if (classq_verbose
) {
1491 log(LOG_DEBUG
, "%s: SFB qid=%d, adapting to new "
1492 "eff_rate=%llu bps\n", if_name(ifp
), sp
->sfb_qid
,
1495 sfb_calc_holdtime(sp
, eff_rate
);
1496 sfb_calc_pboxtime(sp
, eff_rate
);
1497 ifclassq_calc_target_qdelay(ifp
, &sp
->sfb_target_qdelay
);
1498 sfb_calc_update_interval(sp
, eff_rate
);
1502 case CLASSQ_EV_LINK_UP
:
1503 case CLASSQ_EV_LINK_DOWN
:
1504 if (classq_verbose
) {
1505 log(LOG_DEBUG
, "%s: SFB qid=%d, resetting due to "
1506 "link %s\n", if_name(ifp
), sp
->sfb_qid
,
1507 (ev
== CLASSQ_EV_LINK_UP
) ? "UP" : "DOWN");
1512 case CLASSQ_EV_LINK_LATENCY
:
1513 case CLASSQ_EV_LINK_MTU
:
1520 sfb_suspendq(struct sfb
*sp
, class_queue_t
*q
, boolean_t on
)
1523 struct ifnet
*ifp
= sp
->sfb_ifp
;
1525 VERIFY(ifp
!= NULL
);
1527 if ((on
&& (sp
->sfb_flags
& SFBF_SUSPENDED
)) ||
1528 (!on
&& !(sp
->sfb_flags
& SFBF_SUSPENDED
))) {
1532 if (!(sp
->sfb_flags
& SFBF_FLOWCTL
)) {
1533 log(LOG_ERR
, "%s: SFB qid=%d, unable to %s queue since "
1534 "flow-control is not enabled", if_name(ifp
), sp
->sfb_qid
,
1535 (on
? "suspend" : "resume"));
1539 if (classq_verbose
) {
1540 log(LOG_DEBUG
, "%s: SFB qid=%d, setting state to %s",
1541 if_name(ifp
), sp
->sfb_qid
, (on
? "SUSPENDED" : "RUNNING"));
1545 sp
->sfb_flags
|= SFBF_SUSPENDED
;
1547 sp
->sfb_flags
&= ~SFBF_SUSPENDED
;
1548 sfb_swap_bins(sp
, qlen(q
));