2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
32 #include <sys/socket.h>
33 #include <sys/sockio.h>
34 #include <sys/systm.h>
35 #include <sys/sysctl.h>
36 #include <sys/syslog.h>
38 #include <sys/errno.h>
39 #include <sys/kernel.h>
40 #include <sys/kauth.h>
42 #include <kern/zalloc.h>
45 #include <net/if_var.h>
46 #include <net/if_types.h>
48 #include <net/flowadv.h>
50 #include <netinet/in.h>
51 #include <netinet/in_systm.h>
52 #include <netinet/ip.h>
54 #include <netinet/ip6.h>
57 #include <net/classq/classq_sfb.h>
58 #include <net/flowhash.h>
59 #include <net/net_osdep.h>
60 #include <dev/random/randomdev.h>
63 * Stochastic Fair Blue
65 * Wu-chang Feng, Dilip D. Kandlur, Debanjan Saha, Kang G. Shin
66 * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
68 * Based on the NS code with the following parameters:
73 * hold-time: 10ms-50ms (randomized)
76 * pbox-time: 50-100ms (randomized)
77 * hinterval: 11-23 (randomized)
79 * This implementation uses L = 2 and N = 32 for 2 sets of:
81 * B[L][N]: L x N array of bins (L levels, N bins per level)
83 * Each set effectively creates 32^2 virtual buckets (bin combinations)
84 * while using only O(32*2) states.
86 * Given a 32-bit hash value, we divide it such that octets [0,1,2,3] are
87 * used as index for the bins across the 2 levels, where level 1 uses [0,2]
88 * and level 2 uses [1,3]. The 2 values per level correspond to the indices
89 * for the current and warm-up sets (section 4.4. in the SFB paper regarding
90 * Moving Hash Functions explains the purposes of these 2 sets.)
94 * Use Murmur3A_x86_32 for hash function. It seems to perform consistently
95 * across platforms for 1-word key (32-bit flowhash value). See flowhash.h
96 * for other alternatives. We only need 16-bit hash output.
98 #define SFB_HASH net_flowhash_mh3_x86_32
99 #define SFB_HASHMASK HASHMASK(16)
101 #define SFB_BINMASK(_x) \
102 ((_x) & HASHMASK(SFB_BINS_SHIFT))
104 #define SFB_BINST(_sp, _l, _n, _c) \
105 (&(*(_sp)->sfb_bins)[_c].stats[_l][_n])
107 #define SFB_BINFT(_sp, _l, _n, _c) \
108 (&(*(_sp)->sfb_bins)[_c].freezetime[_l][_n])
110 #define SFB_FC_LIST(_sp, _n) \
111 (&(*(_sp)->sfb_fc_lists)[_n])
114 * The holdtime parameter determines the minimum time interval between
115 * two successive updates of the marking probability. In the event the
116 * uplink speed is not known, a default value is chosen and is randomized
117 * to be within the following range.
119 #define HOLDTIME_BASE (100ULL * 1000 * 1000) /* 100ms */
120 #define HOLDTIME_MIN (10ULL * 1000 * 1000) /* 10ms */
121 #define HOLDTIME_MAX (100ULL * 1000 * 1000) /* 100ms */
124 * The pboxtime parameter determines the bandwidth allocated for rogue
125 * flows, i.e. the rate limiting bandwidth. In the event the uplink speed
126 * is not known, a default value is chosen and is randomized to be within
127 * the following range.
129 #define PBOXTIME_BASE (300ULL * 1000 * 1000) /* 300ms */
130 #define PBOXTIME_MIN (30ULL * 1000 * 1000) /* 30ms */
131 #define PBOXTIME_MAX (300ULL * 1000 * 1000) /* 300ms */
134 * Target queueing delay is the amount of extra delay that can be added
135 * to accommodate variations in the link bandwidth. The queue should be
136 * large enough to induce this much delay and nothing more than that.
138 #define TARGET_QDELAY_BASE (10ULL * 1000 * 1000) /* 10ms */
139 #define TARGET_QDELAY_MIN (10ULL * 1000) /* 10us */
140 #define TARGET_QDELAY_MAX (20ULL * 1000 * 1000 * 1000) /* 20s */
143 * Update interval for checking the extra delay added by the queue. This
144 * should be 90-95 percentile of RTT experienced by any TCP connection
145 * so that it will take care of the burst traffic.
147 #define UPDATE_INTERVAL_BASE (100ULL * 1000 * 1000) /* 100ms */
148 #define UPDATE_INTERVAL_MIN (100ULL * 1000 * 1000) /* 100ms */
149 #define UPDATE_INTERVAL_MAX (10ULL * 1000 * 1000 * 1000) /* 10s */
151 #define SFB_RANDOM(sp, tmin, tmax) ((sfb_random(sp) % (tmax)) + (tmin))
153 #define SFB_PKT_PBOX 0x1 /* in penalty box */
155 /* The following mantissa values are in SFB_FP_SHIFT Q format */
156 #define SFB_MAX_PMARK (1 << SFB_FP_SHIFT) /* Q14 representation of 1.00 */
159 * These are d1 (increment) and d2 (decrement) parameters, used to determine
160 * the amount by which the marking probability is incremented when the queue
161 * overflows, or is decremented when the link is idle. d1 is set higher than
162 * d2, because link underutilization can occur when congestion management is
163 * either too conservative or too aggressive, but packet loss occurs only
164 * when congestion management is too conservative. By weighing heavily
165 * against packet loss, it can quickly reach to a substantial increase in
168 #define SFB_INCREMENT 82 /* Q14 representation of 0.005 */
169 #define SFB_DECREMENT 16 /* Q14 representation of 0.001 */
171 #define SFB_PMARK_TH 16056 /* Q14 representation of 0.98 */
172 #define SFB_PMARK_WARM 3276 /* Q14 representation of 0.2 */
174 #define SFB_PMARK_INC(_bin) do { \
175 (_bin)->pmark += sfb_increment; \
176 if ((_bin)->pmark > SFB_MAX_PMARK) \
177 (_bin)->pmark = SFB_MAX_PMARK; \
180 #define SFB_PMARK_DEC(_bin) do { \
181 if ((_bin)->pmark > 0) { \
182 (_bin)->pmark -= sfb_decrement; \
183 if ((_bin)->pmark < 0) \
188 /* Minimum nuber of bytes in queue to get flow controlled */
189 #define SFB_MIN_FC_THRESHOLD_BYTES 7500
191 #define SFB_SET_DELAY_HIGH(_sp_, _q_) do { \
192 (_sp_)->sfb_flags |= SFBF_DELAYHIGH; \
193 (_sp_)->sfb_fc_threshold = max(SFB_MIN_FC_THRESHOLD_BYTES, \
194 (qsize((_q_)) >> 3)); \
197 #define SFB_QUEUE_DELAYBASED(_sp_) ((_sp_)->sfb_flags & SFBF_DELAYBASED)
198 #define SFB_IS_DELAYHIGH(_sp_) ((_sp_)->sfb_flags & SFBF_DELAYHIGH)
199 #define SFB_QUEUE_DELAYBASED_MAXSIZE 2048 /* max pkts */
201 #define HINTERVAL_MIN (10) /* 10 seconds */
202 #define HINTERVAL_MAX (20) /* 20 seconds */
203 #define SFB_HINTERVAL(sp) ((sfb_random(sp) % HINTERVAL_MAX) + HINTERVAL_MIN)
205 #define DEQUEUE_DECAY 7 /* ilog2 of EWMA decay rate, (128) */
206 #define DEQUEUE_SPIKE(_new, _old) \
207 ((u_int64_t)ABS((int64_t)(_new) - (int64_t)(_old)) > ((_old) << 11))
209 #define ABS(v) (((v) > 0) ? (v) : -(v))
211 #define SFB_ZONE_MAX 32 /* maximum elements in zone */
212 #define SFB_ZONE_NAME "classq_sfb" /* zone name */
214 #define SFB_BINS_ZONE_MAX 32 /* maximum elements in zone */
215 #define SFB_BINS_ZONE_NAME "classq_sfb_bins" /* zone name */
217 #define SFB_FCL_ZONE_MAX 32 /* maximum elements in zone */
218 #define SFB_FCL_ZONE_NAME "classq_sfb_fcl" /* zone name */
220 /* Place the flow control entries in current bin on level 0 */
221 #define SFB_FC_LEVEL 0
223 /* Store SFB hash and flags in the module private scratch space */
224 #define pkt_sfb_hash8 pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val8
225 #define pkt_sfb_hash16 pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val16
226 #define pkt_sfb_hash32 pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val32
227 #define pkt_sfb_flags pkt_mpriv.__mpriv_u.__mpriv32[1].__mpriv32_u.__val32
229 static unsigned int sfb_size
; /* size of zone element */
230 static struct zone
*sfb_zone
; /* zone for sfb */
232 static unsigned int sfb_bins_size
; /* size of zone element */
233 static struct zone
*sfb_bins_zone
; /* zone for sfb_bins */
235 static unsigned int sfb_fcl_size
; /* size of zone element */
236 static struct zone
*sfb_fcl_zone
; /* zone for sfb_fc_lists */
238 /* internal function prototypes */
239 static u_int32_t
sfb_random(struct sfb
*);
240 static struct mbuf
*sfb_getq_flow(struct sfb
*, class_queue_t
*, u_int32_t
,
242 static void sfb_resetq(struct sfb
*, cqev_t
);
243 static void sfb_calc_holdtime(struct sfb
*, u_int64_t
);
244 static void sfb_calc_pboxtime(struct sfb
*, u_int64_t
);
245 static void sfb_calc_hinterval(struct sfb
*, u_int64_t
*);
246 static void sfb_calc_target_qdelay(struct sfb
*, u_int64_t
);
247 static void sfb_calc_update_interval(struct sfb
*, u_int64_t
);
248 static void sfb_swap_bins(struct sfb
*, u_int32_t
);
249 static inline int sfb_pcheck(struct sfb
*, struct pkthdr
*);
250 static int sfb_penalize(struct sfb
*, struct pkthdr
*, struct timespec
*);
251 static void sfb_adjust_bin(struct sfb
*, struct sfbbinstats
*,
252 struct timespec
*, struct timespec
*, boolean_t
);
253 static void sfb_decrement_bin(struct sfb
*, struct sfbbinstats
*,
254 struct timespec
*, struct timespec
*);
255 static void sfb_increment_bin(struct sfb
*, struct sfbbinstats
*,
256 struct timespec
*, struct timespec
*);
257 static inline void sfb_dq_update_bins(struct sfb
*, struct pkthdr
*,
258 struct timespec
*, u_int32_t qsize
);
259 static inline void sfb_eq_update_bins(struct sfb
*, struct pkthdr
*);
260 static int sfb_drop_early(struct sfb
*, struct pkthdr
*, u_int16_t
*,
262 static boolean_t
sfb_bin_addfcentry(struct sfb
*, struct pkthdr
*);
263 static void sfb_fclist_append(struct sfb
*, struct sfb_fcl
*);
264 static void sfb_fclists_clean(struct sfb
*sp
);
265 static int sfb_bin_mark_or_drop(struct sfb
*sp
, struct sfbbinstats
*bin
);
266 static void sfb_detect_dequeue_stall(struct sfb
*sp
, class_queue_t
*,
269 SYSCTL_NODE(_net_classq
, OID_AUTO
, sfb
, CTLFLAG_RW
|CTLFLAG_LOCKED
, 0, "SFB");
271 static u_int64_t sfb_holdtime
= 0; /* 0 indicates "automatic" */
272 SYSCTL_QUAD(_net_classq_sfb
, OID_AUTO
, holdtime
, CTLFLAG_RW
|CTLFLAG_LOCKED
,
273 &sfb_holdtime
, "SFB freeze time in nanoseconds");
275 static u_int64_t sfb_pboxtime
= 0; /* 0 indicates "automatic" */
276 SYSCTL_QUAD(_net_classq_sfb
, OID_AUTO
, pboxtime
, CTLFLAG_RW
|CTLFLAG_LOCKED
,
277 &sfb_pboxtime
, "SFB penalty box time in nanoseconds");
279 static u_int64_t sfb_hinterval
;
280 SYSCTL_QUAD(_net_classq_sfb
, OID_AUTO
, hinterval
, CTLFLAG_RW
|CTLFLAG_LOCKED
,
281 &sfb_hinterval
, "SFB hash interval in nanoseconds");
283 static u_int64_t sfb_target_qdelay
= 0;
284 SYSCTL_QUAD(_net_classq_sfb
, OID_AUTO
, target_qdelay
, CTLFLAG_RW
|CTLFLAG_LOCKED
,
285 &sfb_target_qdelay
, "SFB target queue delay in nanoseconds");
287 static u_int64_t sfb_update_interval
;
288 SYSCTL_QUAD(_net_classq_sfb
, OID_AUTO
, update_interval
,
289 CTLFLAG_RW
|CTLFLAG_LOCKED
, &sfb_update_interval
, "SFB update interval");
291 static u_int32_t sfb_increment
= SFB_INCREMENT
;
292 SYSCTL_UINT(_net_classq_sfb
, OID_AUTO
, increment
, CTLFLAG_RW
|CTLFLAG_LOCKED
,
293 &sfb_increment
, SFB_INCREMENT
, "SFB increment [d1]");
295 static u_int32_t sfb_decrement
= SFB_DECREMENT
;
296 SYSCTL_UINT(_net_classq_sfb
, OID_AUTO
, decrement
, CTLFLAG_RW
|CTLFLAG_LOCKED
,
297 &sfb_decrement
, SFB_DECREMENT
, "SFB decrement [d2]");
299 static u_int32_t sfb_allocation
= 0; /* 0 means "automatic" */
300 SYSCTL_UINT(_net_classq_sfb
, OID_AUTO
, allocation
, CTLFLAG_RW
|CTLFLAG_LOCKED
,
301 &sfb_allocation
, 0, "SFB bin allocation");
303 static u_int32_t sfb_ratelimit
= 0;
304 SYSCTL_UINT(_net_classq_sfb
, OID_AUTO
, ratelimit
, CTLFLAG_RW
|CTLFLAG_LOCKED
,
305 &sfb_ratelimit
, 0, "SFB rate limit");
307 #define KBPS (1ULL * 1000) /* 1 Kbits per second */
308 #define MBPS (1ULL * 1000 * 1000) /* 1 Mbits per second */
309 #define GBPS (MBPS * 1000) /* 1 Gbits per second */
311 struct sfb_time_tbl
{
312 u_int64_t speed
; /* uplink speed */
313 u_int64_t holdtime
; /* hold time */
314 u_int64_t pboxtime
; /* penalty box time */
317 static struct sfb_time_tbl sfb_ttbl
[] = {
318 { 1 * MBPS
, HOLDTIME_BASE
* 1000, PBOXTIME_BASE
* 1000 },
319 { 10 * MBPS
, HOLDTIME_BASE
* 100, PBOXTIME_BASE
* 100 },
320 { 100 * MBPS
, HOLDTIME_BASE
* 10, PBOXTIME_BASE
* 10 },
321 { 1 * GBPS
, HOLDTIME_BASE
, PBOXTIME_BASE
},
322 { 10 * GBPS
, HOLDTIME_BASE
/ 10, PBOXTIME_BASE
/ 10 },
323 { 100 * GBPS
, HOLDTIME_BASE
/ 100, PBOXTIME_BASE
/ 100 },
330 _CASSERT(SFBF_ECN4
== CLASSQF_ECN4
);
331 _CASSERT(SFBF_ECN6
== CLASSQF_ECN6
);
333 sfb_size
= sizeof (struct sfb
);
334 sfb_zone
= zinit(sfb_size
, SFB_ZONE_MAX
* sfb_size
,
336 if (sfb_zone
== NULL
) {
337 panic("%s: failed allocating %s", __func__
, SFB_ZONE_NAME
);
340 zone_change(sfb_zone
, Z_EXPAND
, TRUE
);
341 zone_change(sfb_zone
, Z_CALLERACCT
, TRUE
);
343 sfb_bins_size
= sizeof (*((struct sfb
*)0)->sfb_bins
);
344 sfb_bins_zone
= zinit(sfb_bins_size
, SFB_BINS_ZONE_MAX
* sfb_bins_size
,
345 0, SFB_BINS_ZONE_NAME
);
346 if (sfb_bins_zone
== NULL
) {
347 panic("%s: failed allocating %s", __func__
, SFB_BINS_ZONE_NAME
);
350 zone_change(sfb_bins_zone
, Z_EXPAND
, TRUE
);
351 zone_change(sfb_bins_zone
, Z_CALLERACCT
, TRUE
);
353 sfb_fcl_size
= sizeof (*((struct sfb
*)0)->sfb_fc_lists
);
354 sfb_fcl_zone
= zinit(sfb_fcl_size
, SFB_FCL_ZONE_MAX
* sfb_fcl_size
,
355 0, SFB_FCL_ZONE_NAME
);
356 if (sfb_fcl_zone
== NULL
) {
357 panic("%s: failed allocating %s", __func__
, SFB_FCL_ZONE_NAME
);
360 zone_change(sfb_fcl_zone
, Z_EXPAND
, TRUE
);
361 zone_change(sfb_fcl_zone
, Z_CALLERACCT
, TRUE
);
365 sfb_random(struct sfb
*sp
)
367 IFCQ_CONVERT_LOCK(&sp
->sfb_ifp
->if_snd
);
368 return (RandomULong());
372 sfb_calc_holdtime(struct sfb
*sp
, u_int64_t outbw
)
376 if (sfb_holdtime
!= 0) {
377 holdtime
= sfb_holdtime
;
378 } else if (outbw
== 0) {
379 holdtime
= SFB_RANDOM(sp
, HOLDTIME_MIN
, HOLDTIME_MAX
);
383 n
= sfb_ttbl
[0].holdtime
;
384 for (i
= 0; sfb_ttbl
[i
].speed
!= 0; i
++) {
385 if (outbw
< sfb_ttbl
[i
].speed
)
387 n
= sfb_ttbl
[i
].holdtime
;
391 net_nsectimer(&holdtime
, &sp
->sfb_holdtime
);
395 sfb_calc_pboxtime(struct sfb
*sp
, u_int64_t outbw
)
399 if (sfb_pboxtime
!= 0) {
400 pboxtime
= sfb_pboxtime
;
401 } else if (outbw
== 0) {
402 pboxtime
= SFB_RANDOM(sp
, PBOXTIME_MIN
, PBOXTIME_MAX
);
406 n
= sfb_ttbl
[0].pboxtime
;
407 for (i
= 0; sfb_ttbl
[i
].speed
!= 0; i
++) {
408 if (outbw
< sfb_ttbl
[i
].speed
)
410 n
= sfb_ttbl
[i
].pboxtime
;
414 net_nsectimer(&pboxtime
, &sp
->sfb_pboxtime
);
415 net_timerclear(&sp
->sfb_pboxfreeze
);
419 sfb_calc_hinterval(struct sfb
*sp
, u_int64_t
*t
)
426 * TODO adi@apple.com: use dq_avg to derive hinterval.
431 if (sfb_hinterval
!= 0)
432 hinterval
= sfb_hinterval
;
433 else if (t
== NULL
|| hinterval
== 0)
434 hinterval
= ((u_int64_t
)SFB_HINTERVAL(sp
) * NSEC_PER_SEC
);
436 net_nsectimer(&hinterval
, &sp
->sfb_hinterval
);
439 net_timeradd(&now
, &sp
->sfb_hinterval
, &sp
->sfb_nextreset
);
443 sfb_calc_target_qdelay(struct sfb
*sp
, u_int64_t out_bw
)
445 #pragma unused(out_bw)
446 u_int64_t target_qdelay
= 0;
447 struct ifnet
*ifp
= sp
->sfb_ifp
;
449 target_qdelay
= IFCQ_TARGET_QDELAY(&ifp
->if_snd
);
451 if (sfb_target_qdelay
!= 0)
452 target_qdelay
= sfb_target_qdelay
;
455 * If we do not know the effective bandwidth, use the default
456 * target queue delay.
458 if (target_qdelay
== 0)
459 target_qdelay
= IFQ_TARGET_DELAY
;
462 * If a delay has been added to ifnet start callback for
463 * coalescing, we have to add that to the pre-set target delay
464 * because the packets can be in the queue longer.
466 if ((ifp
->if_eflags
& IFEF_ENQUEUE_MULTI
) &&
467 ifp
->if_start_delay_timeout
> 0)
468 target_qdelay
+= ifp
->if_start_delay_timeout
;
470 sp
->sfb_target_qdelay
= target_qdelay
;
474 sfb_calc_update_interval(struct sfb
*sp
, u_int64_t out_bw
)
476 #pragma unused(out_bw)
477 u_int64_t update_interval
= 0;
479 /* If the system-level override is set, use it */
480 if (sfb_update_interval
!= 0)
481 update_interval
= sfb_update_interval
;
483 * If we do not know the effective bandwidth, use the default
486 if (update_interval
== 0)
487 update_interval
= IFQ_UPDATE_INTERVAL
;
489 net_nsectimer(&update_interval
, &sp
->sfb_update_interval
);
493 * sfb support routines
496 sfb_alloc(struct ifnet
*ifp
, u_int32_t qid
, u_int32_t qlim
, u_int32_t flags
)
501 VERIFY(ifp
!= NULL
&& qlim
> 0);
503 sp
= zalloc(sfb_zone
);
505 log(LOG_ERR
, "%s: SFB unable to allocate\n", if_name(ifp
));
510 if ((sp
->sfb_bins
= zalloc(sfb_bins_zone
)) == NULL
) {
511 log(LOG_ERR
, "%s: SFB unable to allocate bins\n", if_name(ifp
));
515 bzero(sp
->sfb_bins
, sfb_bins_size
);
517 if ((sp
->sfb_fc_lists
= zalloc(sfb_fcl_zone
)) == NULL
) {
518 log(LOG_ERR
, "%s: SFB unable to allocate flow control lists\n",
523 bzero(sp
->sfb_fc_lists
, sfb_fcl_size
);
525 for (i
= 0; i
< SFB_BINS
; ++i
)
526 STAILQ_INIT(&SFB_FC_LIST(sp
, i
)->fclist
);
531 sp
->sfb_flags
= (flags
& SFBF_USERFLAGS
);
533 if (sp
->sfb_flags
& SFBF_ECN
) {
534 sp
->sfb_flags
&= ~SFBF_ECN
;
535 log(LOG_ERR
, "%s: SFB qid=%d, ECN not available; ignoring "
536 "SFBF_ECN flag!\n", if_name(ifp
), sp
->sfb_qid
);
546 sfb_fclist_append(struct sfb
*sp
, struct sfb_fcl
*fcl
)
548 IFCQ_CONVERT_LOCK(&sp
->sfb_ifp
->if_snd
);
550 VERIFY(STAILQ_EMPTY(&fcl
->fclist
) || fcl
->cnt
> 0);
551 sp
->sfb_stats
.flow_feedback
+= fcl
->cnt
;
554 flowadv_add(&fcl
->fclist
);
555 VERIFY(fcl
->cnt
== 0 && STAILQ_EMPTY(&fcl
->fclist
));
559 sfb_fclists_clean(struct sfb
*sp
)
563 /* Move all the flow control entries to the flowadv list */
564 for (i
= 0; i
< SFB_BINS
; ++i
) {
565 struct sfb_fcl
*fcl
= SFB_FC_LIST(sp
, i
);
566 if (!STAILQ_EMPTY(&fcl
->fclist
))
567 sfb_fclist_append(sp
, fcl
);
572 sfb_destroy(struct sfb
*sp
)
574 sfb_fclists_clean(sp
);
575 if (sp
->sfb_bins
!= NULL
) {
576 zfree(sfb_bins_zone
, sp
->sfb_bins
);
579 if (sp
->sfb_fc_lists
!= NULL
) {
580 zfree(sfb_fcl_zone
, sp
->sfb_fc_lists
);
581 sp
->sfb_fc_lists
= NULL
;
587 sfb_resetq(struct sfb
*sp
, cqev_t ev
)
589 struct ifnet
*ifp
= sp
->sfb_ifp
;
594 if (ev
!= CLASSQ_EV_LINK_DOWN
) {
595 (*sp
->sfb_bins
)[0].fudge
= sfb_random(sp
);
596 (*sp
->sfb_bins
)[1].fudge
= sfb_random(sp
);
597 sp
->sfb_allocation
= ((sfb_allocation
== 0) ?
598 (sp
->sfb_qlim
/ 3) : sfb_allocation
);
599 sp
->sfb_drop_thresh
= sp
->sfb_allocation
+
600 (sp
->sfb_allocation
>> 1);
603 sp
->sfb_clearpkts
= 0;
606 eff_rate
= ifnet_output_linkrate(ifp
);
607 sp
->sfb_eff_rate
= eff_rate
;
609 sfb_calc_holdtime(sp
, eff_rate
);
610 sfb_calc_pboxtime(sp
, eff_rate
);
611 sfb_calc_hinterval(sp
, NULL
);
612 sfb_calc_target_qdelay(sp
, eff_rate
);
613 sfb_calc_update_interval(sp
, eff_rate
);
615 if (ev
== CLASSQ_EV_LINK_DOWN
||
616 ev
== CLASSQ_EV_LINK_UP
)
617 sfb_fclists_clean(sp
);
619 bzero(sp
->sfb_bins
, sizeof (*sp
->sfb_bins
));
620 bzero(&sp
->sfb_stats
, sizeof (sp
->sfb_stats
));
622 if (ev
== CLASSQ_EV_LINK_DOWN
|| !classq_verbose
)
625 log(LOG_DEBUG
, "%s: SFB qid=%d, holdtime=%llu nsec, "
626 "pboxtime=%llu nsec, allocation=%d, drop_thresh=%d, "
627 "hinterval=%d sec, sfb_bins=%d bytes, eff_rate=%llu bps"
628 "target_qdelay= %llu nsec "
629 "update_interval=%llu sec %llu nsec flags=0x%x\n",
630 if_name(ifp
), sp
->sfb_qid
, (u_int64_t
)sp
->sfb_holdtime
.tv_nsec
,
631 (u_int64_t
)sp
->sfb_pboxtime
.tv_nsec
,
632 (u_int32_t
)sp
->sfb_allocation
, (u_int32_t
)sp
->sfb_drop_thresh
,
633 (int)sp
->sfb_hinterval
.tv_sec
, (int)sizeof (*sp
->sfb_bins
),
634 eff_rate
, (u_int64_t
)sp
->sfb_target_qdelay
,
635 (u_int64_t
)sp
->sfb_update_interval
.tv_sec
,
636 (u_int64_t
)sp
->sfb_update_interval
.tv_nsec
, sp
->sfb_flags
);
640 sfb_getstats(struct sfb
*sp
, struct sfb_stats
*sps
)
642 sps
->allocation
= sp
->sfb_allocation
;
643 sps
->dropthresh
= sp
->sfb_drop_thresh
;
644 sps
->clearpkts
= sp
->sfb_clearpkts
;
645 sps
->current
= sp
->sfb_current
;
646 sps
->target_qdelay
= sp
->sfb_target_qdelay
;
647 sps
->min_estdelay
= sp
->sfb_min_qdelay
;
648 sps
->delay_fcthreshold
= sp
->sfb_fc_threshold
;
649 sps
->flags
= sp
->sfb_flags
;
651 net_timernsec(&sp
->sfb_holdtime
, &sp
->sfb_stats
.hold_time
);
652 net_timernsec(&sp
->sfb_pboxtime
, &sp
->sfb_stats
.pbox_time
);
653 net_timernsec(&sp
->sfb_hinterval
, &sp
->sfb_stats
.rehash_intval
);
654 net_timernsec(&sp
->sfb_update_interval
, &sps
->update_interval
);
655 *(&(sps
->sfbstats
)) = *(&(sp
->sfb_stats
));
657 _CASSERT(sizeof ((*sp
->sfb_bins
)[0].stats
) ==
658 sizeof (sps
->binstats
[0].stats
));
660 bcopy(&(*sp
->sfb_bins
)[0].stats
, &sps
->binstats
[0].stats
,
661 sizeof (sps
->binstats
[0].stats
));
662 bcopy(&(*sp
->sfb_bins
)[1].stats
, &sps
->binstats
[1].stats
,
663 sizeof (sps
->binstats
[1].stats
));
667 sfb_swap_bins(struct sfb
*sp
, u_int32_t len
)
671 if (sp
->sfb_flags
& SFBF_SUSPENDED
)
675 VERIFY((s
+ (s
^ 1)) == 1);
677 (*sp
->sfb_bins
)[s
].fudge
= sfb_random(sp
); /* recompute perturbation */
678 sp
->sfb_clearpkts
= len
;
679 sp
->sfb_stats
.num_rehash
++;
681 s
= (sp
->sfb_current
^= 1); /* flip the bit (swap current) */
683 if (classq_verbose
) {
684 log(LOG_DEBUG
, "%s: SFB qid=%d, set %d is now current, "
685 "qlen=%d\n", if_name(sp
->sfb_ifp
), sp
->sfb_qid
, s
, len
);
688 /* clear freezetime for all current bins */
689 bzero(&(*sp
->sfb_bins
)[s
].freezetime
,
690 sizeof ((*sp
->sfb_bins
)[s
].freezetime
));
692 /* clear/adjust bin statistics and flow control lists */
693 for (i
= 0; i
< SFB_BINS
; i
++) {
694 struct sfb_fcl
*fcl
= SFB_FC_LIST(sp
, i
);
696 if (!STAILQ_EMPTY(&fcl
->fclist
))
697 sfb_fclist_append(sp
, fcl
);
699 for (j
= 0; j
< SFB_LEVELS
; j
++) {
700 struct sfbbinstats
*cbin
, *wbin
;
702 cbin
= SFB_BINST(sp
, j
, i
, s
); /* current */
703 wbin
= SFB_BINST(sp
, j
, i
, s
^ 1); /* warm-up */
707 if (cbin
->pmark
> SFB_MAX_PMARK
)
708 cbin
->pmark
= SFB_MAX_PMARK
;
713 * Keep pmark from before to identify
714 * non-responsives immediately.
716 if (wbin
->pmark
> SFB_PMARK_WARM
)
717 wbin
->pmark
= SFB_PMARK_WARM
;
723 sfb_pcheck(struct sfb
*sp
, struct pkthdr
*pkt
)
727 #endif /* SFB_LEVELS != 2 */
731 VERIFY((s
+ (s
^ 1)) == 1);
734 * For current bins, returns 1 if all pmark >= SFB_PMARK_TH,
735 * 0 otherwise; optimize for SFB_LEVELS=2.
739 * Level 0: bin index at [0] for set 0; [2] for set 1
740 * Level 1: bin index at [1] for set 0; [3] for set 1
742 if (SFB_BINST(sp
, 0, SFB_BINMASK(pkt
->pkt_sfb_hash8
[(s
<< 1)]),
743 s
)->pmark
< SFB_PMARK_TH
||
744 SFB_BINST(sp
, 1, SFB_BINMASK(pkt
->pkt_sfb_hash8
[(s
<< 1) + 1]),
745 s
)->pmark
< SFB_PMARK_TH
)
747 #else /* SFB_LEVELS != 2 */
748 for (i
= 0; i
< SFB_LEVELS
; i
++) {
749 if (s
== 0) /* set 0, bin index [0,1] */
750 n
= SFB_BINMASK(pkt
->pkt_sfb_hash8
[i
]);
751 else /* set 1, bin index [2,3] */
752 n
= SFB_BINMASK(pkt
->pkt_sfb_hash8
[i
+ 2]);
754 if (SFB_BINST(sp
, i
, n
, s
)->pmark
< SFB_PMARK_TH
)
757 #endif /* SFB_LEVELS != 2 */
762 sfb_penalize(struct sfb
*sp
, struct pkthdr
*pkt
, struct timespec
*now
)
764 struct timespec delta
= { 0, 0 };
766 /* If minimum pmark of current bins is < SFB_PMARK_TH, we're done */
767 if (!sfb_ratelimit
|| !sfb_pcheck(sp
, pkt
))
770 net_timersub(now
, &sp
->sfb_pboxfreeze
, &delta
);
771 if (net_timercmp(&delta
, &sp
->sfb_pboxtime
, <)) {
774 #endif /* SFB_LEVELS != 2 */
775 struct sfbbinstats
*bin
;
778 w
= sp
->sfb_current
^ 1;
779 VERIFY((w
+ (w
^ 1)) == 1);
782 * Update warm-up bins; optimize for SFB_LEVELS=2
785 /* Level 0: bin index at [0] for set 0; [2] for set 1 */
786 n
= SFB_BINMASK(pkt
->pkt_sfb_hash8
[(w
<< 1)]);
787 bin
= SFB_BINST(sp
, 0, n
, w
);
788 if (bin
->pkts
>= sp
->sfb_allocation
)
789 sfb_increment_bin(sp
, bin
, SFB_BINFT(sp
, 0, n
, w
), now
);
791 /* Level 0: bin index at [1] for set 0; [3] for set 1 */
792 n
= SFB_BINMASK(pkt
->pkt_sfb_hash8
[(w
<< 1) + 1]);
793 bin
= SFB_BINST(sp
, 1, n
, w
);
794 if (bin
->pkts
>= sp
->sfb_allocation
)
795 sfb_increment_bin(sp
, bin
, SFB_BINFT(sp
, 1, n
, w
), now
);
796 #else /* SFB_LEVELS != 2 */
797 for (i
= 0; i
< SFB_LEVELS
; i
++) {
798 if (w
== 0) /* set 0, bin index [0,1] */
799 n
= SFB_BINMASK(pkt
->pkt_sfb_hash8
[i
]);
800 else /* set 1, bin index [2,3] */
801 n
= SFB_BINMASK(pkt
->pkt_sfb_hash8
[i
+ 2]);
803 bin
= SFB_BINST(sp
, i
, n
, w
);
804 if (bin
->pkts
>= sp
->sfb_allocation
) {
805 sfb_increment_bin(sp
, bin
,
806 SFB_BINFT(sp
, i
, n
, w
), now
);
809 #endif /* SFB_LEVELS != 2 */
813 /* non-conformant or else misclassified flow; queue it anyway */
814 pkt
->pkt_sfb_flags
|= SFB_PKT_PBOX
;
815 *(&sp
->sfb_pboxfreeze
) = *now
;
821 sfb_adjust_bin(struct sfb
*sp
, struct sfbbinstats
*bin
, struct timespec
*ft
,
822 struct timespec
*now
, boolean_t inc
)
824 struct timespec delta
;
826 net_timersub(now
, ft
, &delta
);
827 if (net_timercmp(&delta
, &sp
->sfb_holdtime
, <)) {
828 if (classq_verbose
> 1) {
829 log(LOG_DEBUG
, "%s: SFB qid=%d, %s update frozen "
830 "(delta=%llu nsec)\n", if_name(sp
->sfb_ifp
),
831 sp
->sfb_qid
, inc
? "increment" : "decrement",
832 (u_int64_t
)delta
.tv_nsec
);
837 /* increment/decrement marking probability */
846 sfb_decrement_bin(struct sfb
*sp
, struct sfbbinstats
*bin
, struct timespec
*ft
,
847 struct timespec
*now
)
849 return (sfb_adjust_bin(sp
, bin
, ft
, now
, FALSE
));
853 sfb_increment_bin(struct sfb
*sp
, struct sfbbinstats
*bin
, struct timespec
*ft
,
854 struct timespec
*now
)
856 return (sfb_adjust_bin(sp
, bin
, ft
, now
, TRUE
));
860 sfb_dq_update_bins(struct sfb
*sp
, struct pkthdr
*pkt
,
861 struct timespec
*now
, u_int32_t qsize
)
863 #if SFB_LEVELS != 2 || SFB_FC_LEVEL != 0
865 #endif /* SFB_LEVELS != 2 || SFB_FC_LEVEL != 0 */
866 struct sfbbinstats
*bin
;
868 struct sfb_fcl
*fcl
= NULL
;
871 VERIFY((s
+ (s
^ 1)) == 1);
874 * Update current bins; optimize for SFB_LEVELS=2 and SFB_FC_LEVEL=0
876 #if SFB_LEVELS == 2 && SFB_FC_LEVEL == 0
877 /* Level 0: bin index at [0] for set 0; [2] for set 1 */
878 n
= SFB_BINMASK(pkt
->pkt_sfb_hash8
[(s
<< 1)]);
879 bin
= SFB_BINST(sp
, 0, n
, s
);
881 VERIFY(bin
->pkts
> 0 && bin
->bytes
>= (u_int32_t
)pkt
->len
);
883 bin
->bytes
-= pkt
->len
;
886 sfb_decrement_bin(sp
, bin
, SFB_BINFT(sp
, 0, n
, s
), now
);
888 /* Deliver flow control feedback to the sockets */
889 if (SFB_QUEUE_DELAYBASED(sp
)) {
890 if (!(SFB_IS_DELAYHIGH(sp
)) ||
891 bin
->bytes
<= sp
->sfb_fc_threshold
||
892 bin
->pkts
== 0 || qsize
== 0)
893 fcl
= SFB_FC_LIST(sp
, n
);
894 } else if (bin
->pkts
<= (sp
->sfb_allocation
>> 2)) {
895 fcl
= SFB_FC_LIST(sp
, n
);
898 if (fcl
!= NULL
&& !STAILQ_EMPTY(&fcl
->fclist
))
899 sfb_fclist_append(sp
, fcl
);
902 /* Level 1: bin index at [1] for set 0; [3] for set 1 */
903 n
= SFB_BINMASK(pkt
->pkt_sfb_hash8
[(s
<< 1) + 1]);
904 bin
= SFB_BINST(sp
, 1, n
, s
);
906 VERIFY(bin
->pkts
> 0 && bin
->bytes
>= (u_int64_t
)pkt
->len
);
908 bin
->bytes
-= pkt
->len
;
910 sfb_decrement_bin(sp
, bin
, SFB_BINFT(sp
, 1, n
, s
), now
);
911 #else /* SFB_LEVELS != 2 || SFB_FC_LEVEL != 0 */
912 for (i
= 0; i
< SFB_LEVELS
; i
++) {
913 if (s
== 0) /* set 0, bin index [0,1] */
914 n
= SFB_BINMASK(pkt
->pkt_sfb_hash8
[i
]);
915 else /* set 1, bin index [2,3] */
916 n
= SFB_BINMASK(pkt
->pkt_sfb_hash8
[i
+ 2]);
918 bin
= SFB_BINST(sp
, i
, n
, s
);
920 VERIFY(bin
->pkts
> 0 && bin
->bytes
>= pkt
->len
);
922 bin
->bytes
-= pkt
->len
;
924 sfb_decrement_bin(sp
, bin
,
925 SFB_BINFT(sp
, i
, n
, s
), now
);
926 if (i
!= SFB_FC_LEVEL
)
928 if (SFB_QUEUE_DELAYBASED(sp
)) {
929 if (!(SFB_IS_DELAYHIGH(sp
)) ||
930 bin
->bytes
<= sp
->sfb_fc_threshold
)
931 fcl
= SFB_FC_LIST(sp
, n
);
932 } else if (bin
->pkts
<= (sp
->sfb_allocation
>> 2)) {
933 fcl
= SFB_FC_LIST(sp
, n
);
935 if (fcl
!= NULL
&& !STAILQ_EMPTY(&fcl
->fclist
))
936 sfb_fclist_append(sp
, fcl
);
939 #endif /* SFB_LEVELS != 2 || SFB_FC_LEVEL != 0 */
943 sfb_eq_update_bins(struct sfb
*sp
, struct pkthdr
*pkt
)
947 #endif /* SFB_LEVELS != 2 */
949 struct sfbbinstats
*bin
;
951 VERIFY((s
+ (s
^ 1)) == 1);
954 * Update current bins; optimize for SFB_LEVELS=2
957 /* Level 0: bin index at [0] for set 0; [2] for set 1 */
958 bin
= SFB_BINST(sp
, 0,
959 SFB_BINMASK(pkt
->pkt_sfb_hash8
[(s
<< 1)]), s
);
961 bin
->bytes
+= pkt
->len
;
963 /* Level 1: bin index at [1] for set 0; [3] for set 1 */
964 bin
= SFB_BINST(sp
, 1,
965 SFB_BINMASK(pkt
->pkt_sfb_hash8
[(s
<< 1) + 1]), s
);
967 bin
->bytes
+= pkt
->len
;
969 #else /* SFB_LEVELS != 2 */
970 for (i
= 0; i
< SFB_LEVELS
; i
++) {
971 if (s
== 0) /* set 0, bin index [0,1] */
972 n
= SFB_BINMASK(pkt
->pkt_sfb_hash8
[i
]);
973 else /* set 1, bin index [2,3] */
974 n
= SFB_BINMASK(pkt
->pkt_sfb_hash8
[i
+ 2]);
976 bin
= SFB_BINST(sp
, i
, n
, s
);
978 bin
->bytes
+= pkt
->len
;
980 #endif /* SFB_LEVELS != 2 */
984 sfb_bin_addfcentry(struct sfb
*sp
, struct pkthdr
*pkt
)
986 struct flowadv_fcentry
*fce
;
987 u_int32_t flowsrc
, flowid
;
992 VERIFY((s
+ (s
^ 1)) == 1);
994 flowsrc
= pkt
->pkt_flowsrc
;
995 flowid
= pkt
->pkt_flowid
;
998 sp
->sfb_stats
.null_flowid
++;
1003 * Use value at index 0 for set 0 and
1004 * value at index 2 for set 1
1006 fcl
= SFB_FC_LIST(sp
, SFB_BINMASK(pkt
->pkt_sfb_hash8
[(s
<< 1)]));
1007 STAILQ_FOREACH(fce
, &fcl
->fclist
, fce_link
) {
1008 if (fce
->fce_flowsrc
== flowsrc
&&
1009 fce
->fce_flowid
== flowid
) {
1010 /* Already on flow control list; just return */
1015 IFCQ_CONVERT_LOCK(&sp
->sfb_ifp
->if_snd
);
1016 fce
= flowadv_alloc_entry(M_WAITOK
);
1018 fce
->fce_flowsrc
= flowsrc
;
1019 fce
->fce_flowid
= flowid
;
1020 STAILQ_INSERT_TAIL(&fcl
->fclist
, fce
, fce_link
);
1022 sp
->sfb_stats
.flow_controlled
++;
1025 return (fce
!= NULL
);
1029 * check if this flow needs to be flow-controlled or if this
1030 * packet needs to be dropped.
1033 sfb_bin_mark_or_drop(struct sfb
*sp
, struct sfbbinstats
*bin
)
1036 if (SFB_QUEUE_DELAYBASED(sp
)) {
1038 * Mark or drop if this bin has more
1039 * bytes than the flowcontrol threshold.
1041 if (SFB_IS_DELAYHIGH(sp
) &&
1042 bin
->bytes
>= (sp
->sfb_fc_threshold
<< 1))
1045 if (bin
->pkts
>= sp
->sfb_allocation
&&
1046 bin
->pkts
>= sp
->sfb_drop_thresh
)
1047 ret
= 1; /* drop or mark */
1053 * early-drop probability is kept in pmark of each bin of the flow
1056 sfb_drop_early(struct sfb
*sp
, struct pkthdr
*pkt
, u_int16_t
*pmin
,
1057 struct timespec
*now
)
1061 #endif /* SFB_LEVELS != 2 */
1062 struct sfbbinstats
*bin
;
1065 s
= sp
->sfb_current
;
1066 VERIFY((s
+ (s
^ 1)) == 1);
1068 *pmin
= (u_int16_t
)-1;
1071 * Update current bins; optimize for SFB_LEVELS=2
1074 /* Level 0: bin index at [0] for set 0; [2] for set 1 */
1075 n
= SFB_BINMASK(pkt
->pkt_sfb_hash8
[(s
<< 1)]);
1076 bin
= SFB_BINST(sp
, 0, n
, s
);
1077 if (*pmin
> (u_int16_t
)bin
->pmark
)
1078 *pmin
= (u_int16_t
)bin
->pmark
;
1081 /* Update SFB probability */
1082 if (bin
->pkts
>= sp
->sfb_allocation
)
1083 sfb_increment_bin(sp
, bin
, SFB_BINFT(sp
, 0, n
, s
), now
);
1085 ret
= sfb_bin_mark_or_drop(sp
, bin
);
1087 /* Level 1: bin index at [1] for set 0; [3] for set 1 */
1088 n
= SFB_BINMASK(pkt
->pkt_sfb_hash8
[(s
<< 1) + 1]);
1089 bin
= SFB_BINST(sp
, 1, n
, s
);
1090 if (*pmin
> (u_int16_t
)bin
->pmark
)
1091 *pmin
= (u_int16_t
)bin
->pmark
;
1093 if (bin
->pkts
>= sp
->sfb_allocation
)
1094 sfb_increment_bin(sp
, bin
, SFB_BINFT(sp
, 1, n
, s
), now
);
1095 #else /* SFB_LEVELS != 2 */
1096 for (i
= 0; i
< SFB_LEVELS
; i
++) {
1097 if (s
== 0) /* set 0, bin index [0,1] */
1098 n
= SFB_BINMASK(pkt
->pkt_sfb_hash8
[i
]);
1099 else /* set 1, bin index [2,3] */
1100 n
= SFB_BINMASK(pkt
->pkt_sfb_hash8
[i
+ 2]);
1102 bin
= SFB_BINST(sp
, i
, n
, s
);
1103 if (*pmin
> (u_int16_t
)bin
->pmark
)
1104 *pmin
= (u_int16_t
)bin
->pmark
;
1106 if (bin
->pkts
>= sp
->sfb_allocation
)
1107 sfb_increment_bin(sp
, bin
,
1108 SFB_BINFT(sp
, i
, n
, s
), now
);
1109 if (i
== SFB_FC_LEVEL
)
1110 ret
= sfb_bin_mark_or_drop(sp
, bin
);
1112 #endif /* SFB_LEVELS != 2 */
1114 if (sp
->sfb_flags
& SFBF_SUSPENDED
)
1115 ret
= 1; /* drop or mark */
1121 sfb_detect_dequeue_stall(struct sfb
*sp
, class_queue_t
*q
,
1122 struct timespec
*now
)
1124 struct timespec max_getqtime
;
1126 if (!SFB_QUEUE_DELAYBASED(sp
) || SFB_IS_DELAYHIGH(sp
) ||
1127 qsize(q
) <= SFB_MIN_FC_THRESHOLD_BYTES
||
1128 !net_timerisset(&sp
->sfb_getqtime
))
1131 net_timeradd(&sp
->sfb_getqtime
, &sp
->sfb_update_interval
,
1133 if (net_timercmp(now
, &max_getqtime
, >)) {
1135 * No packets have been dequeued in an update interval
1136 * worth of time. It means that the queue is stalled
1138 SFB_SET_DELAY_HIGH(sp
, q
);
1139 sp
->sfb_stats
.dequeue_stall
++;
1143 #define DTYPE_NODROP 0 /* no drop */
1144 #define DTYPE_FORCED 1 /* a "forced" drop */
1145 #define DTYPE_EARLY 2 /* an "unforced" (early) drop */
1148 sfb_addq(struct sfb
*sp
, class_queue_t
*q
, struct mbuf
*m
, struct pf_mtag
*t
)
1152 #endif /* !PF_ECN */
1153 struct pkthdr
*pkt
= &m
->m_pkthdr
;
1154 struct timespec now
;
1158 int ret
= CLASSQEQ_SUCCESS
;
1159 u_int32_t maxqsize
= 0;
1161 s
= sp
->sfb_current
;
1162 VERIFY((s
+ (s
^ 1)) == 1);
1164 /* See comments in <rdar://problem/14040693> */
1165 VERIFY(!(pkt
->pkt_flags
& PKTF_PRIV_GUARDED
));
1166 pkt
->pkt_flags
|= PKTF_PRIV_GUARDED
;
1168 if (pkt
->pkt_enqueue_ts
> 0) {
1169 net_nsectimer(&pkt
->pkt_enqueue_ts
, &now
);
1172 net_timernsec(&now
, &pkt
->pkt_enqueue_ts
);
1175 /* time to swap the bins? */
1176 if (net_timercmp(&now
, &sp
->sfb_nextreset
, >=)) {
1177 net_timeradd(&now
, &sp
->sfb_hinterval
, &sp
->sfb_nextreset
);
1178 sfb_swap_bins(sp
, qlen(q
));
1179 s
= sp
->sfb_current
;
1180 VERIFY((s
+ (s
^ 1)) == 1);
1183 if (!net_timerisset(&sp
->sfb_update_time
)) {
1184 net_timeradd(&now
, &sp
->sfb_update_interval
,
1185 &sp
->sfb_update_time
);
1189 * If getq time is not set because this is the first packet
1190 * or after idle time, set it now so that we can detect a stall.
1192 if (qsize(q
) == 0 && !net_timerisset(&sp
->sfb_getqtime
))
1193 *(&sp
->sfb_getqtime
) = *(&now
);
1195 pkt
->pkt_sfb_flags
= 0;
1196 pkt
->pkt_sfb_hash16
[s
] =
1197 (SFB_HASH(&pkt
->pkt_flowid
, sizeof (pkt
->pkt_flowid
),
1198 (*sp
->sfb_bins
)[s
].fudge
) & SFB_HASHMASK
);
1199 pkt
->pkt_sfb_hash16
[s
^ 1] =
1200 (SFB_HASH(&pkt
->pkt_flowid
, sizeof (pkt
->pkt_flowid
),
1201 (*sp
->sfb_bins
)[s
^ 1].fudge
) & SFB_HASHMASK
);
1203 /* check if the queue has been stalled */
1204 sfb_detect_dequeue_stall(sp
, q
, &now
);
1206 /* see if we drop early */
1207 droptype
= DTYPE_NODROP
;
1208 if (sfb_drop_early(sp
, pkt
, &pmin
, &now
)) {
1209 /* flow control, mark or drop by sfb */
1210 if ((sp
->sfb_flags
& SFBF_FLOWCTL
) &&
1211 (pkt
->pkt_flags
& PKTF_FLOW_ADV
)) {
1213 /* drop all during suspension or for non-TCP */
1214 if ((sp
->sfb_flags
& SFBF_SUSPENDED
) ||
1215 pkt
->pkt_proto
!= IPPROTO_TCP
) {
1216 droptype
= DTYPE_EARLY
;
1217 sp
->sfb_stats
.drop_early
++;
1221 else if ((sp
->sfb_flags
& SFBF_ECN
) &&
1222 (pkt
->pkt_proto
== IPPROTO_TCP
) && /* only for TCP */
1223 ((sfb_random(sp
) & SFB_MAX_PMARK
) <= pmin
) &&
1224 mark_ecn(m
, t
, sp
->sfb_flags
) &&
1225 !(sp
->sfb_flags
& SFBF_SUSPENDED
)) {
1226 /* successfully marked; do not drop. */
1227 sp
->sfb_stats
.marked_packets
++;
1231 /* unforced drop by sfb */
1232 droptype
= DTYPE_EARLY
;
1233 sp
->sfb_stats
.drop_early
++;
1237 /* non-responsive flow penalty? */
1238 if (droptype
== DTYPE_NODROP
&& sfb_penalize(sp
, pkt
, &now
)) {
1239 droptype
= DTYPE_FORCED
;
1240 sp
->sfb_stats
.drop_pbox
++;
1243 if (SFB_QUEUE_DELAYBASED(sp
))
1244 maxqsize
= SFB_QUEUE_DELAYBASED_MAXSIZE
;
1246 maxqsize
= qlimit(q
);
1249 * When the queue length hits the queue limit, make it a forced
1252 if (droptype
== DTYPE_NODROP
&& qlen(q
) >= maxqsize
) {
1253 if (pkt
->pkt_proto
== IPPROTO_TCP
&&
1254 ((pkt
->pkt_flags
& PKTF_TCP_REXMT
) ||
1255 (sp
->sfb_flags
& SFBF_LAST_PKT_DROPPED
))) {
1257 * At some level, dropping packets will make the
1258 * flows backoff and will keep memory requirements
1259 * under control. But we should not cause a tail
1260 * drop because it can take a long time for a
1261 * TCP flow to recover. We should try to drop
1262 * alternate packets instead.
1264 sp
->sfb_flags
&= ~SFBF_LAST_PKT_DROPPED
;
1266 droptype
= DTYPE_FORCED
;
1267 sp
->sfb_stats
.drop_queue
++;
1268 sp
->sfb_flags
|= SFBF_LAST_PKT_DROPPED
;
1272 if (fc_adv
== 1 && droptype
!= DTYPE_FORCED
&&
1273 sfb_bin_addfcentry(sp
, pkt
)) {
1274 /* deliver flow control advisory error */
1275 if (droptype
== DTYPE_NODROP
) {
1276 ret
= CLASSQEQ_SUCCESS_FC
;
1277 VERIFY(!(sp
->sfb_flags
& SFBF_SUSPENDED
));
1278 } else if (sp
->sfb_flags
& SFBF_SUSPENDED
) {
1279 /* dropped due to suspension */
1280 ret
= CLASSQEQ_DROPPED_SP
;
1282 /* dropped due to flow-control */
1283 ret
= CLASSQEQ_DROPPED_FC
;
1286 /* if successful enqueue this packet, else drop it */
1287 if (droptype
== DTYPE_NODROP
) {
1290 IFCQ_CONVERT_LOCK(&sp
->sfb_ifp
->if_snd
);
1292 return ((ret
!= CLASSQEQ_SUCCESS
) ? ret
: CLASSQEQ_DROPPED
);
1295 if (!(pkt
->pkt_sfb_flags
& SFB_PKT_PBOX
))
1296 sfb_eq_update_bins(sp
, pkt
);
1298 sp
->sfb_stats
.pbox_packets
++;
1300 /* successfully queued */
1304 static struct mbuf
*
1305 sfb_getq_flow(struct sfb
*sp
, class_queue_t
*q
, u_int32_t flow
, boolean_t purge
)
1307 struct timespec now
;
1311 if (!purge
&& (sp
->sfb_flags
& SFBF_SUSPENDED
))
1316 /* flow of 0 means head of queue */
1317 if ((m
= ((flow
== 0) ? _getq(q
) : _getq_flow(q
, flow
))) == NULL
) {
1319 net_timerclear(&sp
->sfb_getqtime
);
1323 VERIFY(m
->m_flags
& M_PKTHDR
);
1326 VERIFY(pkt
->pkt_flags
& PKTF_PRIV_GUARDED
);
1329 /* calculate EWMA of dequeues */
1330 if (net_timerisset(&sp
->sfb_getqtime
)) {
1331 struct timespec delta
;
1333 net_timersub(&now
, &sp
->sfb_getqtime
, &delta
);
1334 net_timernsec(&delta
, &new);
1335 avg
= sp
->sfb_stats
.dequeue_avg
;
1337 int decay
= DEQUEUE_DECAY
;
1339 * If the time since last dequeue is
1340 * significantly greater than the current
1341 * average, weigh the average more against
1344 if (DEQUEUE_SPIKE(new, avg
))
1346 avg
= (((avg
<< decay
) - avg
) + new) >> decay
;
1350 sp
->sfb_stats
.dequeue_avg
= avg
;
1352 *(&sp
->sfb_getqtime
) = *(&now
);
1355 if (!purge
&& SFB_QUEUE_DELAYBASED(sp
)) {
1356 u_int64_t dequeue_ns
, queue_delay
= 0;
1357 net_timernsec(&now
, &dequeue_ns
);
1358 if (dequeue_ns
> pkt
->pkt_enqueue_ts
)
1359 queue_delay
= dequeue_ns
- pkt
->pkt_enqueue_ts
;
1361 if (sp
->sfb_min_qdelay
== 0 ||
1362 (queue_delay
> 0 && queue_delay
< sp
->sfb_min_qdelay
))
1363 sp
->sfb_min_qdelay
= queue_delay
;
1364 if (net_timercmp(&now
, &sp
->sfb_update_time
, >=)) {
1365 if (sp
->sfb_min_qdelay
> sp
->sfb_target_qdelay
) {
1366 if (!SFB_IS_DELAYHIGH(sp
))
1367 SFB_SET_DELAY_HIGH(sp
, q
);
1369 sp
->sfb_flags
&= ~(SFBF_DELAYHIGH
);
1370 sp
->sfb_fc_threshold
= 0;
1373 net_timeradd(&now
, &sp
->sfb_update_interval
,
1374 &sp
->sfb_update_time
);
1375 sp
->sfb_min_qdelay
= 0;
1378 pkt
->pkt_enqueue_ts
= 0;
1381 * Clearpkts are the ones which were in the queue when the hash
1382 * function was perturbed. Since the perturbation value (fudge),
1383 * and thus bin information for these packets is not known, we do
1384 * not change accounting information while dequeuing these packets.
1385 * It is important not to set the hash interval too small due to
1386 * this reason. A rule of thumb is to set it to K*D, where D is
1387 * the time taken to drain queue.
1389 if (pkt
->pkt_sfb_flags
& SFB_PKT_PBOX
) {
1390 pkt
->pkt_sfb_flags
&= ~SFB_PKT_PBOX
;
1391 if (sp
->sfb_clearpkts
> 0)
1392 sp
->sfb_clearpkts
--;
1393 } else if (sp
->sfb_clearpkts
> 0) {
1394 sp
->sfb_clearpkts
--;
1396 sfb_dq_update_bins(sp
, pkt
, &now
, qsize(q
));
1399 /* See comments in <rdar://problem/14040693> */
1400 pkt
->pkt_flags
&= ~PKTF_PRIV_GUARDED
;
1403 * If the queue becomes empty before the update interval, reset
1404 * the flow control threshold
1406 if (qsize(q
) == 0) {
1407 sp
->sfb_flags
&= ~SFBF_DELAYHIGH
;
1408 sp
->sfb_min_qdelay
= 0;
1409 sp
->sfb_fc_threshold
= 0;
1410 net_timerclear(&sp
->sfb_update_time
);
1411 net_timerclear(&sp
->sfb_getqtime
);
1418 sfb_getq(struct sfb
*sp
, class_queue_t
*q
)
1420 return (sfb_getq_flow(sp
, q
, 0, FALSE
));
1424 sfb_purgeq(struct sfb
*sp
, class_queue_t
*q
, u_int32_t flow
, u_int32_t
*packets
,
1427 u_int32_t cnt
= 0, len
= 0;
1430 IFCQ_CONVERT_LOCK(&sp
->sfb_ifp
->if_snd
);
1432 while ((m
= sfb_getq_flow(sp
, q
, flow
, TRUE
)) != NULL
) {
1438 if (packets
!= NULL
)
1445 sfb_updateq(struct sfb
*sp
, cqev_t ev
)
1447 struct ifnet
*ifp
= sp
->sfb_ifp
;
1449 VERIFY(ifp
!= NULL
);
1452 case CLASSQ_EV_LINK_BANDWIDTH
: {
1453 u_int64_t eff_rate
= ifnet_output_linkrate(ifp
);
1455 /* update parameters only if rate has changed */
1456 if (eff_rate
== sp
->sfb_eff_rate
)
1459 if (classq_verbose
) {
1460 log(LOG_DEBUG
, "%s: SFB qid=%d, adapting to new "
1461 "eff_rate=%llu bps\n", if_name(ifp
), sp
->sfb_qid
,
1464 sfb_calc_holdtime(sp
, eff_rate
);
1465 sfb_calc_pboxtime(sp
, eff_rate
);
1466 sfb_calc_target_qdelay(sp
, eff_rate
);
1467 sfb_calc_update_interval(sp
, eff_rate
);
1471 case CLASSQ_EV_LINK_UP
:
1472 case CLASSQ_EV_LINK_DOWN
:
1473 if (classq_verbose
) {
1474 log(LOG_DEBUG
, "%s: SFB qid=%d, resetting due to "
1475 "link %s\n", if_name(ifp
), sp
->sfb_qid
,
1476 (ev
== CLASSQ_EV_LINK_UP
) ? "UP" : "DOWN");
1481 case CLASSQ_EV_LINK_LATENCY
:
1482 case CLASSQ_EV_LINK_MTU
:
1489 sfb_suspendq(struct sfb
*sp
, class_queue_t
*q
, boolean_t on
)
1492 struct ifnet
*ifp
= sp
->sfb_ifp
;
1494 VERIFY(ifp
!= NULL
);
1496 if ((on
&& (sp
->sfb_flags
& SFBF_SUSPENDED
)) ||
1497 (!on
&& !(sp
->sfb_flags
& SFBF_SUSPENDED
)))
1500 if (!(sp
->sfb_flags
& SFBF_FLOWCTL
)) {
1501 log(LOG_ERR
, "%s: SFB qid=%d, unable to %s queue since "
1502 "flow-control is not enabled", if_name(ifp
), sp
->sfb_qid
,
1503 (on
? "suspend" : "resume"));
1507 if (classq_verbose
) {
1508 log(LOG_DEBUG
, "%s: SFB qid=%d, setting state to %s",
1509 if_name(ifp
), sp
->sfb_qid
, (on
? "SUSPENDED" : "RUNNING"));
1513 sp
->sfb_flags
|= SFBF_SUSPENDED
;
1515 sp
->sfb_flags
&= ~SFBF_SUSPENDED
;
1516 sfb_swap_bins(sp
, qlen(q
));