]> git.saurik.com Git - apple/xnu.git/blame - bsd/net/classq/classq_fq_codel.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / bsd / net / classq / classq_fq_codel.c
CommitLineData
39037602 1/*
cb323159 2 * Copyright (c) 2016-2018 Apple Inc. All rights reserved.
39037602
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <sys/cdefs.h>
30#include <sys/param.h>
31#include <sys/mbuf.h>
32#include <sys/socket.h>
33#include <sys/sockio.h>
34#include <sys/systm.h>
35#include <sys/sysctl.h>
36#include <sys/syslog.h>
37#include <sys/proc.h>
38#include <sys/errno.h>
39#include <sys/kernel.h>
40#include <sys/kauth.h>
41#include <kern/zalloc.h>
42#include <netinet/in.h>
43
5ba3f43e
A
44#include <net/classq/classq.h>
45#include <net/classq/if_classq.h>
46#include <net/pktsched/pktsched.h>
39037602
A
47#include <net/pktsched/pktsched_fq_codel.h>
48#include <net/classq/classq_fq_codel.h>
49
0a7de745
A
50static uint32_t flowq_size; /* size of flowq */
51static struct mcache *flowq_cache = NULL; /* mcache for flowq */
39037602 52
0a7de745 53#define FQ_ZONE_MAX (32 * 1024) /* across all interfaces */
5ba3f43e 54
0a7de745
A
55#define DTYPE_NODROP 0 /* no drop */
56#define DTYPE_FORCED 1 /* a "forced" drop */
57#define DTYPE_EARLY 2 /* an "unforced" (early) drop */
39037602
A
58
59void
60fq_codel_init(void)
61{
0a7de745 62 if (flowq_cache != NULL) {
39037602 63 return;
0a7de745 64 }
39037602 65
0a7de745
A
66 flowq_size = sizeof(fq_t);
67 flowq_cache = mcache_create("fq.flowq", flowq_size, sizeof(uint64_t),
a39ff7e2
A
68 0, MCR_SLEEP);
69 if (flowq_cache == NULL) {
70 panic("%s: failed to allocate flowq_cache", __func__);
39037602 71 /* NOTREACHED */
cb323159 72 __builtin_unreachable();
39037602 73 }
a39ff7e2
A
74}
75
76void
77fq_codel_reap_caches(boolean_t purge)
78{
79 mcache_reap_now(flowq_cache, purge);
39037602
A
80}
81
82fq_t *
5ba3f43e 83fq_alloc(classq_pkt_type_t ptype)
39037602
A
84{
85 fq_t *fq = NULL;
a39ff7e2 86 fq = mcache_alloc(flowq_cache, MCR_SLEEP);
39037602 87 if (fq == NULL) {
a39ff7e2 88 log(LOG_ERR, "%s: unable to allocate from flowq_cache\n");
0a7de745 89 return NULL;
39037602
A
90 }
91
a39ff7e2 92 bzero(fq, flowq_size);
5ba3f43e
A
93 fq->fq_ptype = ptype;
94 if (ptype == QP_MBUF) {
95 MBUFQ_INIT(&fq->fq_mbufq);
96 }
0a7de745 97 return fq;
39037602
A
98}
99
100void
101fq_destroy(fq_t *fq)
102{
5ba3f43e 103 VERIFY(fq_empty(fq));
39037602 104 VERIFY(!(fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW)));
5ba3f43e 105 VERIFY(fq->fq_bytes == 0);
a39ff7e2 106 mcache_free(flowq_cache, fq);
39037602
A
107}
108
109static void
110fq_detect_dequeue_stall(fq_if_t *fqs, fq_t *flowq, fq_if_classq_t *fq_cl,
111 u_int64_t *now)
112{
113 u_int64_t maxgetqtime;
114 if (FQ_IS_DELAYHIGH(flowq) || flowq->fq_getqtime == 0 ||
5ba3f43e 115 fq_empty(flowq) ||
0a7de745 116 flowq->fq_bytes < FQ_MIN_FC_THRESHOLD_BYTES) {
39037602 117 return;
0a7de745 118 }
39037602
A
119 maxgetqtime = flowq->fq_getqtime + fqs->fqs_update_interval;
120 if ((*now) > maxgetqtime) {
121 /*
122 * there was no dequeue in an update interval worth of
123 * time. It means that the queue is stalled.
124 */
125 FQ_SET_DELAY_HIGH(flowq);
126 fq_cl->fcl_stat.fcl_dequeue_stall++;
127 }
128}
129
130void
131fq_head_drop(fq_if_t *fqs, fq_t *fq)
132{
5ba3f43e 133 pktsched_pkt_t pkt;
cb323159 134 volatile uint32_t *pkt_flags;
5ba3f43e 135 uint64_t *pkt_timestamp;
39037602
A
136 struct ifclassq *ifq = fqs->fqs_ifq;
137
5ba3f43e 138 _PKTSCHED_PKT_INIT(&pkt);
cb323159
A
139 fq_getq_flow_internal(fqs, fq, &pkt);
140 if (pkt.pktsched_pkt_mbuf == NULL) {
39037602 141 return;
0a7de745 142 }
39037602 143
5ba3f43e
A
144 pktsched_get_pkt_vars(&pkt, &pkt_flags, &pkt_timestamp, NULL, NULL,
145 NULL, NULL);
146
147 *pkt_timestamp = 0;
cb323159
A
148 switch (pkt.pktsched_ptype) {
149 case QP_MBUF:
5ba3f43e 150 *pkt_flags &= ~PKTF_PRIV_GUARDED;
cb323159
A
151 break;
152 default:
153 VERIFY(0);
154 /* NOTREACHED */
155 __builtin_unreachable();
0a7de745 156 }
5ba3f43e
A
157
158 IFCQ_DROP_ADD(ifq, 1, pktsched_get_pkt_len(&pkt));
39037602 159 IFCQ_CONVERT_LOCK(ifq);
5ba3f43e 160 pktsched_free_pkt(&pkt);
39037602
A
161}
162
163int
5ba3f43e 164fq_addq(fq_if_t *fqs, pktsched_pkt_t *pkt, fq_if_classq_t *fq_cl)
39037602 165{
39037602
A
166 int droptype = DTYPE_NODROP, fc_adv = 0, ret = CLASSQEQ_SUCCESS;
167 u_int64_t now;
168 fq_t *fq = NULL;
5ba3f43e 169 uint64_t *pkt_timestamp;
cb323159 170 volatile uint32_t *pkt_flags;
5ba3f43e
A
171 uint32_t pkt_flowid, pkt_tx_start_seq;
172 uint8_t pkt_proto, pkt_flowsrc;
173
174 pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, &pkt_flowid,
175 &pkt_flowsrc, &pkt_proto, &pkt_tx_start_seq);
176
cb323159
A
177 switch (pkt->pktsched_ptype) {
178 case QP_MBUF:
5ba3f43e
A
179 /* See comments in <rdar://problem/14040693> */
180 VERIFY(!(*pkt_flags & PKTF_PRIV_GUARDED));
181 *pkt_flags |= PKTF_PRIV_GUARDED;
cb323159
A
182 break;
183 default:
184 VERIFY(0);
185 /* NOTREACHED */
186 __builtin_unreachable();
5ba3f43e 187 }
39037602 188
5ba3f43e
A
189 if (*pkt_timestamp > 0) {
190 now = *pkt_timestamp;
39037602 191 } else {
5ba3f43e
A
192 struct timespec now_ts;
193 nanouptime(&now_ts);
194 now = (now_ts.tv_sec * NSEC_PER_SEC) + now_ts.tv_nsec;
195 *pkt_timestamp = now;
39037602
A
196 }
197
198 /* find the flowq for this packet */
5ba3f43e
A
199 fq = fq_if_hash_pkt(fqs, pkt_flowid, pktsched_get_pkt_svc(pkt),
200 now, TRUE, pkt->pktsched_ptype);
39037602
A
201 if (fq == NULL) {
202 /* drop the packet if we could not allocate a flow queue */
203 fq_cl->fcl_stat.fcl_drop_memfailure++;
204 IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
0a7de745 205 return CLASSQEQ_DROP;
39037602 206 }
5ba3f43e 207 VERIFY(fq->fq_ptype == pkt->pktsched_ptype);
39037602
A
208
209 fq_detect_dequeue_stall(fqs, fq, fq_cl, &now);
210
211 if (FQ_IS_DELAYHIGH(fq)) {
212 if ((fq->fq_flags & FQF_FLOWCTL_CAPABLE) &&
5ba3f43e 213 (*pkt_flags & PKTF_FLOW_ADV)) {
39037602
A
214 fc_adv = 1;
215 /*
216 * If the flow is suspended or it is not
cb323159 217 * TCP/QUIC, drop the packet
39037602 218 */
cb323159
A
219 if ((pkt_proto != IPPROTO_TCP) &&
220 (pkt_proto != IPPROTO_QUIC)) {
39037602
A
221 droptype = DTYPE_EARLY;
222 fq_cl->fcl_stat.fcl_drop_early++;
223 }
224 } else {
225 /*
226 * Need to drop a packet, instead of dropping this
227 * one, try to drop from the head of the queue
228 */
5ba3f43e 229 if (!fq_empty(fq)) {
39037602
A
230 fq_head_drop(fqs, fq);
231 droptype = DTYPE_NODROP;
232 } else {
233 droptype = DTYPE_EARLY;
234 }
235 fq_cl->fcl_stat.fcl_drop_early++;
236 }
39037602
A
237 }
238
39037602
A
239 /* Set the return code correctly */
240 if (fc_adv == 1 && droptype != DTYPE_FORCED) {
5ba3f43e
A
241 if (fq_if_add_fcentry(fqs, pkt, pkt_flowid, pkt_flowsrc,
242 fq_cl)) {
39037602
A
243 fq->fq_flags |= FQF_FLOWCTL_ON;
244 /* deliver flow control advisory error */
245 if (droptype == DTYPE_NODROP) {
246 ret = CLASSQEQ_SUCCESS_FC;
247 } else {
248 /* dropped due to flow control */
5ba3f43e 249 ret = CLASSQEQ_DROP_FC;
39037602
A
250 }
251 } else {
252 /*
253 * if we could not flow control the flow, it is
254 * better to drop
255 */
256 droptype = DTYPE_FORCED;
5ba3f43e 257 ret = CLASSQEQ_DROP_FC;
39037602
A
258 fq_cl->fcl_stat.fcl_flow_control_fail++;
259 }
260 }
261
262 /*
263 * If the queue length hits the queue limit, drop a packet from the
264 * front of the queue for a flow with maximum number of bytes. This
265 * will penalize heavy and unresponsive flows. It will also avoid a
266 * tail drop.
267 */
268 if (droptype == DTYPE_NODROP && fq_if_at_drop_limit(fqs)) {
5ba3f43e
A
269 if (fqs->fqs_large_flow == fq) {
270 /*
271 * Drop from the head of the current fq. Since a
272 * new packet will be added to the tail, it is ok
273 * to leave fq in place.
274 */
275 fq_head_drop(fqs, fq);
276 } else {
277 if (fqs->fqs_large_flow == NULL) {
278 droptype = DTYPE_FORCED;
279 fq_cl->fcl_stat.fcl_drop_overflow++;
5c9f4661 280 ret = CLASSQEQ_DROP;
5ba3f43e
A
281
282 /*
283 * if this fq was freshly created and there
284 * is nothing to enqueue, free it
285 */
286 if (fq_empty(fq) && !(fq->fq_flags &
287 (FQF_NEW_FLOW | FQF_OLD_FLOW))) {
288 fq_if_destroy_flow(fqs, fq_cl, fq);
289 fq = NULL;
290 }
291 } else {
292 fq_if_drop_packet(fqs);
293 }
294 }
39037602
A
295 }
296
297 if (droptype == DTYPE_NODROP) {
5ba3f43e
A
298 uint32_t pkt_len = pktsched_get_pkt_len(pkt);
299 fq_enqueue(fq, pkt->pktsched_pkt);
300 fq->fq_bytes += pkt_len;
301 fq_cl->fcl_stat.fcl_byte_cnt += pkt_len;
39037602
A
302 fq_cl->fcl_stat.fcl_pkt_cnt++;
303
304 /*
305 * check if this queue will qualify to be the next
306 * victim queue
307 */
308 fq_if_is_flow_heavy(fqs, fq);
309 } else {
310 IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
0a7de745 311 return (ret != CLASSQEQ_SUCCESS) ? ret : CLASSQEQ_DROP;
39037602
A
312 }
313
314 /*
315 * If the queue is not currently active, add it to the end of new
316 * flows list for that service class.
317 */
0a7de745 318 if ((fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW)) == 0) {
39037602
A
319 VERIFY(STAILQ_NEXT(fq, fq_actlink) == NULL);
320 STAILQ_INSERT_TAIL(&fq_cl->fcl_new_flows, fq, fq_actlink);
321 fq->fq_flags |= FQF_NEW_FLOW;
322
323 fq_cl->fcl_stat.fcl_newflows_cnt++;
324
325 fq->fq_deficit = fq_cl->fcl_quantum;
326 }
0a7de745 327 return ret;
39037602
A
328}
329
cb323159 330void
5ba3f43e 331fq_getq_flow_internal(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt)
39037602 332{
cb323159 333 classq_pkt_t p = CLASSQ_PKT_INITIALIZER(p);
5ba3f43e 334 uint32_t plen;
39037602 335 fq_if_classq_t *fq_cl;
5ba3f43e 336 struct ifclassq *ifq = fqs->fqs_ifq;
39037602 337
cb323159
A
338 fq_dequeue(fq, &p);
339 if (p.cp_ptype == QP_INVALID) {
340 VERIFY(p.cp_mbuf == NULL);
341 return;
0a7de745 342 }
39037602 343
cb323159 344 pktsched_pkt_encap(pkt, &p);
5ba3f43e 345 plen = pktsched_get_pkt_len(pkt);
39037602 346
5ba3f43e
A
347 VERIFY(fq->fq_bytes >= plen);
348 fq->fq_bytes -= plen;
39037602
A
349
350 fq_cl = &fqs->fqs_classq[fq->fq_sc_index];
5ba3f43e 351 fq_cl->fcl_stat.fcl_byte_cnt -= plen;
39037602
A
352 fq_cl->fcl_stat.fcl_pkt_cnt--;
353 IFCQ_DEC_LEN(ifq);
5ba3f43e 354 IFCQ_DEC_BYTES(ifq, plen);
39037602 355
5ba3f43e 356 /* Reset getqtime so that we don't count idle times */
0a7de745 357 if (fq_empty(fq)) {
5ba3f43e 358 fq->fq_getqtime = 0;
0a7de745 359 }
5ba3f43e
A
360}
361
cb323159 362void
5ba3f43e
A
363fq_getq_flow(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt)
364{
5ba3f43e
A
365 fq_if_classq_t *fq_cl;
366 u_int64_t now;
367 int64_t qdelay = 0;
368 struct timespec now_ts;
cb323159
A
369 volatile uint32_t *pkt_flags;
370 uint32_t pkt_tx_start_seq;
5ba3f43e
A
371 uint64_t *pkt_timestamp;
372
cb323159
A
373 fq_getq_flow_internal(fqs, fq, pkt);
374 if (pkt->pktsched_ptype == QP_INVALID) {
375 VERIFY(pkt->pktsched_pkt_mbuf == NULL);
376 return;
0a7de745 377 }
5ba3f43e
A
378
379 pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, NULL, NULL,
380 NULL, &pkt_tx_start_seq);
381
382 nanouptime(&now_ts);
383 now = (now_ts.tv_sec * NSEC_PER_SEC) + now_ts.tv_nsec;
39037602
A
384
385 /* this will compute qdelay in nanoseconds */
0a7de745 386 if (now > *pkt_timestamp) {
5ba3f43e 387 qdelay = now - *pkt_timestamp;
0a7de745 388 }
5ba3f43e 389 fq_cl = &fqs->fqs_classq[fq->fq_sc_index];
39037602
A
390
391 if (fq->fq_min_qdelay == 0 ||
0a7de745 392 (qdelay > 0 && (u_int64_t)qdelay < fq->fq_min_qdelay)) {
39037602 393 fq->fq_min_qdelay = qdelay;
0a7de745 394 }
5ba3f43e
A
395 if (now >= fq->fq_updatetime) {
396 if (fq->fq_min_qdelay > fqs->fqs_target_qdelay) {
0a7de745 397 if (!FQ_IS_DELAYHIGH(fq)) {
39037602 398 FQ_SET_DELAY_HIGH(fq);
0a7de745 399 }
5ba3f43e 400 } else {
39037602 401 FQ_CLEAR_DELAY_HIGH(fq);
39037602 402 }
39037602
A
403 /* Reset measured queue delay and update time */
404 fq->fq_updatetime = now + fqs->fqs_update_interval;
405 fq->fq_min_qdelay = 0;
406 }
5ba3f43e
A
407 if (!FQ_IS_DELAYHIGH(fq) || fq_empty(fq)) {
408 FQ_CLEAR_DELAY_HIGH(fq);
409 if (fq->fq_flags & FQF_FLOWCTL_ON) {
410 fq_if_flow_feedback(fqs, fq, fq_cl);
411 }
412 }
39037602 413
5ba3f43e 414 if (fq_empty(fq)) {
39037602
A
415 /* Reset getqtime so that we don't count idle times */
416 fq->fq_getqtime = 0;
417 } else {
418 fq->fq_getqtime = now;
419 }
5ba3f43e
A
420 fq_if_is_flow_heavy(fqs, fq);
421
422 *pkt_timestamp = 0;
cb323159
A
423 switch (pkt->pktsched_ptype) {
424 case QP_MBUF:
5ba3f43e 425 *pkt_flags &= ~PKTF_PRIV_GUARDED;
cb323159
A
426 break;
427 default:
428 VERIFY(0);
429 /* NOTREACHED */
430 __builtin_unreachable();
0a7de745 431 }
39037602 432}