]> git.saurik.com Git - apple/xnu.git/blame - bsd/net/classq/classq_fq_codel.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / net / classq / classq_fq_codel.c
CommitLineData
39037602 1/*
f427ee49 2 * Copyright (c) 2016-2020 Apple Inc. All rights reserved.
39037602
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <sys/cdefs.h>
30#include <sys/param.h>
31#include <sys/mbuf.h>
32#include <sys/socket.h>
33#include <sys/sockio.h>
34#include <sys/systm.h>
35#include <sys/sysctl.h>
36#include <sys/syslog.h>
37#include <sys/proc.h>
38#include <sys/errno.h>
39#include <sys/kernel.h>
40#include <sys/kauth.h>
f427ee49 41#include <sys/sdt.h>
39037602
A
42#include <kern/zalloc.h>
43#include <netinet/in.h>
44
5ba3f43e
A
45#include <net/classq/classq.h>
46#include <net/classq/if_classq.h>
47#include <net/pktsched/pktsched.h>
39037602
A
48#include <net/pktsched/pktsched_fq_codel.h>
49#include <net/classq/classq_fq_codel.h>
50
f427ee49
A
51#include <netinet/tcp_var.h>
52
0a7de745
A
53static uint32_t flowq_size; /* size of flowq */
54static struct mcache *flowq_cache = NULL; /* mcache for flowq */
39037602 55
0a7de745 56#define FQ_ZONE_MAX (32 * 1024) /* across all interfaces */
5ba3f43e 57
0a7de745
A
58#define DTYPE_NODROP 0 /* no drop */
59#define DTYPE_FORCED 1 /* a "forced" drop */
60#define DTYPE_EARLY 2 /* an "unforced" (early) drop */
39037602
A
61
62void
63fq_codel_init(void)
64{
0a7de745 65 if (flowq_cache != NULL) {
39037602 66 return;
0a7de745 67 }
39037602 68
0a7de745
A
69 flowq_size = sizeof(fq_t);
70 flowq_cache = mcache_create("fq.flowq", flowq_size, sizeof(uint64_t),
a39ff7e2
A
71 0, MCR_SLEEP);
72 if (flowq_cache == NULL) {
73 panic("%s: failed to allocate flowq_cache", __func__);
39037602 74 /* NOTREACHED */
cb323159 75 __builtin_unreachable();
39037602 76 }
a39ff7e2
A
77}
78
79void
80fq_codel_reap_caches(boolean_t purge)
81{
82 mcache_reap_now(flowq_cache, purge);
39037602
A
83}
84
85fq_t *
5ba3f43e 86fq_alloc(classq_pkt_type_t ptype)
39037602
A
87{
88 fq_t *fq = NULL;
a39ff7e2 89 fq = mcache_alloc(flowq_cache, MCR_SLEEP);
39037602 90 if (fq == NULL) {
f427ee49 91 log(LOG_ERR, "%s: unable to allocate from flowq_cache\n", __func__);
0a7de745 92 return NULL;
39037602
A
93 }
94
a39ff7e2 95 bzero(fq, flowq_size);
5ba3f43e
A
96 fq->fq_ptype = ptype;
97 if (ptype == QP_MBUF) {
98 MBUFQ_INIT(&fq->fq_mbufq);
99 }
c3c9b80d
A
100 CLASSQ_PKT_INIT(&fq->fq_dq_head);
101 CLASSQ_PKT_INIT(&fq->fq_dq_tail);
102 fq->fq_in_dqlist = false;
0a7de745 103 return fq;
39037602
A
104}
105
106void
107fq_destroy(fq_t *fq)
108{
c3c9b80d 109 VERIFY(fq->fq_flags & FQF_DESTROYED);
5ba3f43e 110 VERIFY(fq_empty(fq));
39037602 111 VERIFY(!(fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW)));
5ba3f43e 112 VERIFY(fq->fq_bytes == 0);
a39ff7e2 113 mcache_free(flowq_cache, fq);
39037602
A
114}
115
f427ee49 116static inline void
39037602
A
117fq_detect_dequeue_stall(fq_if_t *fqs, fq_t *flowq, fq_if_classq_t *fq_cl,
118 u_int64_t *now)
119{
120 u_int64_t maxgetqtime;
121 if (FQ_IS_DELAYHIGH(flowq) || flowq->fq_getqtime == 0 ||
5ba3f43e 122 fq_empty(flowq) ||
0a7de745 123 flowq->fq_bytes < FQ_MIN_FC_THRESHOLD_BYTES) {
39037602 124 return;
0a7de745 125 }
39037602
A
126 maxgetqtime = flowq->fq_getqtime + fqs->fqs_update_interval;
127 if ((*now) > maxgetqtime) {
128 /*
129 * there was no dequeue in an update interval worth of
130 * time. It means that the queue is stalled.
131 */
132 FQ_SET_DELAY_HIGH(flowq);
133 fq_cl->fcl_stat.fcl_dequeue_stall++;
c3c9b80d
A
134 os_log_error(OS_LOG_DEFAULT, "%s: dequeue stall num: %d, "
135 "scidx: %d, flow: 0x%x, iface: %s", __func__,
136 fq_cl->fcl_stat.fcl_dequeue_stall, flowq->fq_sc_index,
137 flowq->fq_flowhash, if_name(fqs->fqs_ifq->ifcq_ifp));
39037602
A
138 }
139}
140
141void
142fq_head_drop(fq_if_t *fqs, fq_t *fq)
143{
5ba3f43e 144 pktsched_pkt_t pkt;
cb323159 145 volatile uint32_t *pkt_flags;
5ba3f43e 146 uint64_t *pkt_timestamp;
39037602
A
147 struct ifclassq *ifq = fqs->fqs_ifq;
148
5ba3f43e 149 _PKTSCHED_PKT_INIT(&pkt);
cb323159
A
150 fq_getq_flow_internal(fqs, fq, &pkt);
151 if (pkt.pktsched_pkt_mbuf == NULL) {
39037602 152 return;
0a7de745 153 }
39037602 154
5ba3f43e
A
155 pktsched_get_pkt_vars(&pkt, &pkt_flags, &pkt_timestamp, NULL, NULL,
156 NULL, NULL);
157
158 *pkt_timestamp = 0;
cb323159
A
159 switch (pkt.pktsched_ptype) {
160 case QP_MBUF:
5ba3f43e 161 *pkt_flags &= ~PKTF_PRIV_GUARDED;
cb323159
A
162 break;
163 default:
164 VERIFY(0);
165 /* NOTREACHED */
166 __builtin_unreachable();
0a7de745 167 }
5ba3f43e
A
168
169 IFCQ_DROP_ADD(ifq, 1, pktsched_get_pkt_len(&pkt));
39037602 170 IFCQ_CONVERT_LOCK(ifq);
5ba3f43e 171 pktsched_free_pkt(&pkt);
39037602
A
172}
173
f427ee49
A
174
175static int
176fq_compressor(fq_if_t *fqs, fq_t *fq, fq_if_classq_t *fq_cl,
177 pktsched_pkt_t *pkt)
178{
179 classq_pkt_type_t ptype = fq->fq_ptype;
180 uint32_t comp_gencnt = 0;
181 uint64_t *pkt_timestamp;
182 uint64_t old_timestamp = 0;
183 uint32_t old_pktlen = 0;
184 struct ifclassq *ifq = fqs->fqs_ifq;
185
186 if (__improbable(!tcp_do_ack_compression)) {
187 return 0;
188 }
189
190 pktsched_get_pkt_vars(pkt, NULL, &pkt_timestamp, NULL, NULL, NULL,
191 &comp_gencnt);
192
193 if (comp_gencnt == 0) {
194 return 0;
195 }
196
197 fq_cl->fcl_stat.fcl_pkts_compressible++;
198
199 if (fq_empty(fq)) {
200 return 0;
201 }
202
203 if (ptype == QP_MBUF) {
204 struct mbuf *m = MBUFQ_LAST(&fq->fq_mbufq);
205
206 if (comp_gencnt != m->m_pkthdr.comp_gencnt) {
207 return 0;
208 }
209
210 /* If we got until here, we should merge/replace the segment */
211 MBUFQ_REMOVE(&fq->fq_mbufq, m);
212 old_pktlen = m_pktlen(m);
213 old_timestamp = m->m_pkthdr.pkt_timestamp;
214
215 IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
216 m_freem(m);
217 }
218
219 fq->fq_bytes -= old_pktlen;
220 fq_cl->fcl_stat.fcl_byte_cnt -= old_pktlen;
221 fq_cl->fcl_stat.fcl_pkt_cnt--;
222 IFCQ_DEC_LEN(ifq);
223 IFCQ_DEC_BYTES(ifq, old_pktlen);
224
225 *pkt_timestamp = old_timestamp;
226
227 return CLASSQEQ_COMPRESSED;
228}
229
39037602 230int
5ba3f43e 231fq_addq(fq_if_t *fqs, pktsched_pkt_t *pkt, fq_if_classq_t *fq_cl)
39037602 232{
39037602
A
233 int droptype = DTYPE_NODROP, fc_adv = 0, ret = CLASSQEQ_SUCCESS;
234 u_int64_t now;
235 fq_t *fq = NULL;
5ba3f43e 236 uint64_t *pkt_timestamp;
cb323159 237 volatile uint32_t *pkt_flags;
f427ee49 238 uint32_t pkt_flowid, cnt;
5ba3f43e
A
239 uint8_t pkt_proto, pkt_flowsrc;
240
f427ee49 241 cnt = pkt->pktsched_pcnt;
5ba3f43e 242 pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, &pkt_flowid,
f427ee49 243 &pkt_flowsrc, &pkt_proto, NULL);
5ba3f43e 244
f427ee49
A
245 /*
246 * XXX Not walking the chain to set this flag on every packet.
247 * This flag is only used for debugging. Nothing is affected if it's
248 * not set.
249 */
cb323159
A
250 switch (pkt->pktsched_ptype) {
251 case QP_MBUF:
5ba3f43e
A
252 /* See comments in <rdar://problem/14040693> */
253 VERIFY(!(*pkt_flags & PKTF_PRIV_GUARDED));
254 *pkt_flags |= PKTF_PRIV_GUARDED;
cb323159
A
255 break;
256 default:
257 VERIFY(0);
258 /* NOTREACHED */
259 __builtin_unreachable();
5ba3f43e 260 }
39037602 261
f427ee49
A
262 /*
263 * Timestamps for every packet must be set prior to entering this path.
264 */
265 now = *pkt_timestamp;
266 ASSERT(now > 0);
39037602
A
267
268 /* find the flowq for this packet */
5ba3f43e
A
269 fq = fq_if_hash_pkt(fqs, pkt_flowid, pktsched_get_pkt_svc(pkt),
270 now, TRUE, pkt->pktsched_ptype);
f427ee49
A
271 if (__improbable(fq == NULL)) {
272 DTRACE_IP1(memfail__drop, fq_if_t *, fqs);
39037602 273 /* drop the packet if we could not allocate a flow queue */
f427ee49 274 fq_cl->fcl_stat.fcl_drop_memfailure += cnt;
0a7de745 275 return CLASSQEQ_DROP;
39037602 276 }
5ba3f43e 277 VERIFY(fq->fq_ptype == pkt->pktsched_ptype);
39037602
A
278
279 fq_detect_dequeue_stall(fqs, fq, fq_cl, &now);
280
f427ee49 281 if (__improbable(FQ_IS_DELAYHIGH(fq))) {
39037602 282 if ((fq->fq_flags & FQF_FLOWCTL_CAPABLE) &&
5ba3f43e 283 (*pkt_flags & PKTF_FLOW_ADV)) {
39037602
A
284 fc_adv = 1;
285 /*
286 * If the flow is suspended or it is not
f427ee49 287 * TCP/QUIC, drop the chain.
39037602 288 */
cb323159
A
289 if ((pkt_proto != IPPROTO_TCP) &&
290 (pkt_proto != IPPROTO_QUIC)) {
39037602 291 droptype = DTYPE_EARLY;
f427ee49 292 fq_cl->fcl_stat.fcl_drop_early += cnt;
39037602 293 }
f427ee49
A
294 DTRACE_IP6(flow__adv, fq_if_t *, fqs,
295 fq_if_classq_t *, fq_cl, fq_t *, fq,
296 int, droptype, pktsched_pkt_t *, pkt,
297 uint32_t, cnt);
39037602
A
298 } else {
299 /*
f427ee49
A
300 * Need to drop packets to make room for the new
301 * ones. Try to drop from the head of the queue
302 * instead of the latest packets.
39037602 303 */
5ba3f43e 304 if (!fq_empty(fq)) {
f427ee49
A
305 uint32_t i;
306
307 for (i = 0; i < cnt; i++) {
308 fq_head_drop(fqs, fq);
309 }
39037602
A
310 droptype = DTYPE_NODROP;
311 } else {
312 droptype = DTYPE_EARLY;
313 }
f427ee49
A
314 fq_cl->fcl_stat.fcl_drop_early += cnt;
315
316 DTRACE_IP6(no__flow__adv, fq_if_t *, fqs,
317 fq_if_classq_t *, fq_cl, fq_t *, fq,
318 int, droptype, pktsched_pkt_t *, pkt,
319 uint32_t, cnt);
39037602 320 }
39037602
A
321 }
322
39037602 323 /* Set the return code correctly */
f427ee49 324 if (__improbable(fc_adv == 1 && droptype != DTYPE_FORCED)) {
c3c9b80d 325 if (fq_if_add_fcentry(fqs, pkt, pkt_flowsrc, fq, fq_cl)) {
39037602
A
326 fq->fq_flags |= FQF_FLOWCTL_ON;
327 /* deliver flow control advisory error */
328 if (droptype == DTYPE_NODROP) {
329 ret = CLASSQEQ_SUCCESS_FC;
330 } else {
331 /* dropped due to flow control */
5ba3f43e 332 ret = CLASSQEQ_DROP_FC;
39037602
A
333 }
334 } else {
335 /*
336 * if we could not flow control the flow, it is
337 * better to drop
338 */
339 droptype = DTYPE_FORCED;
5ba3f43e 340 ret = CLASSQEQ_DROP_FC;
39037602
A
341 fq_cl->fcl_stat.fcl_flow_control_fail++;
342 }
f427ee49 343 DTRACE_IP3(fc__ret, fq_if_t *, fqs, int, droptype, int, ret);
39037602
A
344 }
345
346 /*
f427ee49
A
347 * If the queue length hits the queue limit, drop a chain with the
348 * same number of packets from the front of the queue for a flow with
349 * maximum number of bytes. This will penalize heavy and unresponsive
350 * flows. It will also avoid a tail drop.
39037602 351 */
f427ee49
A
352 if (__improbable(droptype == DTYPE_NODROP &&
353 fq_if_at_drop_limit(fqs))) {
354 uint32_t i;
355
5ba3f43e
A
356 if (fqs->fqs_large_flow == fq) {
357 /*
358 * Drop from the head of the current fq. Since a
359 * new packet will be added to the tail, it is ok
360 * to leave fq in place.
361 */
f427ee49
A
362 DTRACE_IP5(large__flow, fq_if_t *, fqs,
363 fq_if_classq_t *, fq_cl, fq_t *, fq,
364 pktsched_pkt_t *, pkt, uint32_t, cnt);
365
366 for (i = 0; i < cnt; i++) {
367 fq_head_drop(fqs, fq);
368 }
5ba3f43e
A
369 } else {
370 if (fqs->fqs_large_flow == NULL) {
371 droptype = DTYPE_FORCED;
f427ee49 372 fq_cl->fcl_stat.fcl_drop_overflow += cnt;
5c9f4661 373 ret = CLASSQEQ_DROP;
5ba3f43e 374
f427ee49
A
375 DTRACE_IP5(no__large__flow, fq_if_t *, fqs,
376 fq_if_classq_t *, fq_cl, fq_t *, fq,
377 pktsched_pkt_t *, pkt, uint32_t, cnt);
378
5ba3f43e
A
379 /*
380 * if this fq was freshly created and there
381 * is nothing to enqueue, free it
382 */
383 if (fq_empty(fq) && !(fq->fq_flags &
384 (FQF_NEW_FLOW | FQF_OLD_FLOW))) {
c3c9b80d 385 fq_if_destroy_flow(fqs, fq_cl, fq, true);
5ba3f43e
A
386 fq = NULL;
387 }
388 } else {
f427ee49
A
389 DTRACE_IP5(different__large__flow,
390 fq_if_t *, fqs, fq_if_classq_t *, fq_cl,
391 fq_t *, fq, pktsched_pkt_t *, pkt,
392 uint32_t, cnt);
393
394 for (i = 0; i < cnt; i++) {
395 fq_if_drop_packet(fqs);
396 }
5ba3f43e
A
397 }
398 }
39037602
A
399 }
400
f427ee49
A
401 if (__probable(droptype == DTYPE_NODROP)) {
402 uint32_t chain_len = pktsched_get_pkt_len(pkt);
403
404 /*
405 * We do not compress if we are enqueuing a chain.
406 * Traversing the chain to look for acks would defeat the
407 * purpose of batch enqueueing.
408 */
409 if (cnt == 1) {
410 ret = fq_compressor(fqs, fq, fq_cl, pkt);
411 if (ret != CLASSQEQ_COMPRESSED) {
412 ret = CLASSQEQ_SUCCESS;
413 } else {
414 fq_cl->fcl_stat.fcl_pkts_compressed++;
415 }
416 }
417 DTRACE_IP5(fq_enqueue, fq_if_t *, fqs, fq_if_classq_t *, fq_cl,
418 fq_t *, fq, pktsched_pkt_t *, pkt, uint32_t, cnt);
419 fq_enqueue(fq, pkt->pktsched_pkt, pkt->pktsched_tail, cnt);
420
421 fq->fq_bytes += chain_len;
422 fq_cl->fcl_stat.fcl_byte_cnt += chain_len;
423 fq_cl->fcl_stat.fcl_pkt_cnt += cnt;
39037602
A
424
425 /*
426 * check if this queue will qualify to be the next
427 * victim queue
428 */
429 fq_if_is_flow_heavy(fqs, fq);
430 } else {
f427ee49 431 DTRACE_IP3(fq_drop, fq_if_t *, fqs, int, droptype, int, ret);
0a7de745 432 return (ret != CLASSQEQ_SUCCESS) ? ret : CLASSQEQ_DROP;
39037602
A
433 }
434
435 /*
436 * If the queue is not currently active, add it to the end of new
437 * flows list for that service class.
438 */
0a7de745 439 if ((fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW)) == 0) {
39037602
A
440 VERIFY(STAILQ_NEXT(fq, fq_actlink) == NULL);
441 STAILQ_INSERT_TAIL(&fq_cl->fcl_new_flows, fq, fq_actlink);
442 fq->fq_flags |= FQF_NEW_FLOW;
443
444 fq_cl->fcl_stat.fcl_newflows_cnt++;
445
446 fq->fq_deficit = fq_cl->fcl_quantum;
447 }
0a7de745 448 return ret;
39037602
A
449}
450
cb323159 451void
5ba3f43e 452fq_getq_flow_internal(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt)
39037602 453{
cb323159 454 classq_pkt_t p = CLASSQ_PKT_INITIALIZER(p);
5ba3f43e 455 uint32_t plen;
39037602 456 fq_if_classq_t *fq_cl;
5ba3f43e 457 struct ifclassq *ifq = fqs->fqs_ifq;
39037602 458
cb323159
A
459 fq_dequeue(fq, &p);
460 if (p.cp_ptype == QP_INVALID) {
461 VERIFY(p.cp_mbuf == NULL);
462 return;
0a7de745 463 }
39037602 464
cb323159 465 pktsched_pkt_encap(pkt, &p);
5ba3f43e 466 plen = pktsched_get_pkt_len(pkt);
39037602 467
5ba3f43e
A
468 VERIFY(fq->fq_bytes >= plen);
469 fq->fq_bytes -= plen;
39037602
A
470
471 fq_cl = &fqs->fqs_classq[fq->fq_sc_index];
5ba3f43e 472 fq_cl->fcl_stat.fcl_byte_cnt -= plen;
39037602
A
473 fq_cl->fcl_stat.fcl_pkt_cnt--;
474 IFCQ_DEC_LEN(ifq);
5ba3f43e 475 IFCQ_DEC_BYTES(ifq, plen);
39037602 476
5ba3f43e 477 /* Reset getqtime so that we don't count idle times */
0a7de745 478 if (fq_empty(fq)) {
5ba3f43e 479 fq->fq_getqtime = 0;
0a7de745 480 }
5ba3f43e
A
481}
482
cb323159 483void
5ba3f43e
A
484fq_getq_flow(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt)
485{
5ba3f43e
A
486 fq_if_classq_t *fq_cl;
487 u_int64_t now;
488 int64_t qdelay = 0;
489 struct timespec now_ts;
cb323159 490 volatile uint32_t *pkt_flags;
5ba3f43e
A
491 uint64_t *pkt_timestamp;
492
cb323159
A
493 fq_getq_flow_internal(fqs, fq, pkt);
494 if (pkt->pktsched_ptype == QP_INVALID) {
495 VERIFY(pkt->pktsched_pkt_mbuf == NULL);
496 return;
0a7de745 497 }
5ba3f43e
A
498
499 pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, NULL, NULL,
f427ee49 500 NULL, NULL);
5ba3f43e
A
501
502 nanouptime(&now_ts);
503 now = (now_ts.tv_sec * NSEC_PER_SEC) + now_ts.tv_nsec;
39037602
A
504
505 /* this will compute qdelay in nanoseconds */
0a7de745 506 if (now > *pkt_timestamp) {
5ba3f43e 507 qdelay = now - *pkt_timestamp;
0a7de745 508 }
5ba3f43e 509 fq_cl = &fqs->fqs_classq[fq->fq_sc_index];
39037602
A
510
511 if (fq->fq_min_qdelay == 0 ||
0a7de745 512 (qdelay > 0 && (u_int64_t)qdelay < fq->fq_min_qdelay)) {
39037602 513 fq->fq_min_qdelay = qdelay;
0a7de745 514 }
5ba3f43e
A
515 if (now >= fq->fq_updatetime) {
516 if (fq->fq_min_qdelay > fqs->fqs_target_qdelay) {
0a7de745 517 if (!FQ_IS_DELAYHIGH(fq)) {
39037602 518 FQ_SET_DELAY_HIGH(fq);
c3c9b80d
A
519 os_log_error(OS_LOG_DEFAULT,
520 "%s: high delay idx: %d, %llu, flow: 0x%x, "
521 "iface: %s", __func__, fq->fq_sc_index,
522 fq->fq_min_qdelay, fq->fq_flowhash,
523 if_name(fqs->fqs_ifq->ifcq_ifp));
0a7de745 524 }
5ba3f43e 525 } else {
39037602 526 FQ_CLEAR_DELAY_HIGH(fq);
39037602 527 }
39037602
A
528 /* Reset measured queue delay and update time */
529 fq->fq_updatetime = now + fqs->fqs_update_interval;
530 fq->fq_min_qdelay = 0;
531 }
5ba3f43e
A
532 if (!FQ_IS_DELAYHIGH(fq) || fq_empty(fq)) {
533 FQ_CLEAR_DELAY_HIGH(fq);
534 if (fq->fq_flags & FQF_FLOWCTL_ON) {
535 fq_if_flow_feedback(fqs, fq, fq_cl);
536 }
537 }
39037602 538
5ba3f43e 539 if (fq_empty(fq)) {
39037602
A
540 /* Reset getqtime so that we don't count idle times */
541 fq->fq_getqtime = 0;
542 } else {
543 fq->fq_getqtime = now;
544 }
5ba3f43e
A
545 fq_if_is_flow_heavy(fqs, fq);
546
547 *pkt_timestamp = 0;
cb323159
A
548 switch (pkt->pktsched_ptype) {
549 case QP_MBUF:
5ba3f43e 550 *pkt_flags &= ~PKTF_PRIV_GUARDED;
cb323159
A
551 break;
552 default:
553 VERIFY(0);
554 /* NOTREACHED */
555 __builtin_unreachable();
0a7de745 556 }
39037602 557}