]> git.saurik.com Git - apple/xnu.git/blame - bsd/net/classq/classq_fq_codel.c
xnu-4570.1.46.tar.gz
[apple/xnu.git] / bsd / net / classq / classq_fq_codel.c
CommitLineData
39037602 1/*
5ba3f43e 2 * Copyright (c) 2016-2017 Apple Inc. All rights reserved.
39037602
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <sys/cdefs.h>
30#include <sys/param.h>
31#include <sys/mbuf.h>
32#include <sys/socket.h>
33#include <sys/sockio.h>
34#include <sys/systm.h>
35#include <sys/sysctl.h>
36#include <sys/syslog.h>
37#include <sys/proc.h>
38#include <sys/errno.h>
39#include <sys/kernel.h>
40#include <sys/kauth.h>
41#include <kern/zalloc.h>
42#include <netinet/in.h>
43
5ba3f43e
A
44#include <net/classq/classq.h>
45#include <net/classq/if_classq.h>
46#include <net/pktsched/pktsched.h>
39037602
A
47#include <net/pktsched/pktsched_fq_codel.h>
48#include <net/classq/classq_fq_codel.h>
49
50static struct zone *flowq_zone = NULL;
39037602
A
51
52#define FQ_ZONE_MAX (32 * 1024) /* across all interfaces */
5ba3f43e
A
53
54#define DTYPE_NODROP 0 /* no drop */
55#define DTYPE_FORCED 1 /* a "forced" drop */
56#define DTYPE_EARLY 2 /* an "unforced" (early) drop */
39037602
A
57
58void
59fq_codel_init(void)
60{
61 if (flowq_zone != NULL)
62 return;
63
5ba3f43e
A
64 flowq_zone = zinit(sizeof (struct flowq),
65 FQ_ZONE_MAX * sizeof (struct flowq), 0, "flowq_zone");
39037602
A
66 if (flowq_zone == NULL) {
67 panic("%s: failed to allocate flowq_zone", __func__);
68 /* NOTREACHED */
69 }
70 zone_change(flowq_zone, Z_EXPAND, TRUE);
71 zone_change(flowq_zone, Z_CALLERACCT, TRUE);
72}
73
74fq_t *
5ba3f43e 75fq_alloc(classq_pkt_type_t ptype)
39037602
A
76{
77 fq_t *fq = NULL;
5ba3f43e 78 fq = zalloc(flowq_zone);
39037602
A
79 if (fq == NULL) {
80 log(LOG_ERR, "%s: unable to allocate from flowq_zone\n");
81 return (NULL);
82 }
83
5ba3f43e
A
84 bzero(fq, sizeof (*fq));
85 fq->fq_ptype = ptype;
86 if (ptype == QP_MBUF) {
87 MBUFQ_INIT(&fq->fq_mbufq);
88 }
39037602
A
89 return (fq);
90}
91
92void
93fq_destroy(fq_t *fq)
94{
5ba3f43e 95 VERIFY(fq_empty(fq));
39037602 96 VERIFY(!(fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW)));
5ba3f43e 97 VERIFY(fq->fq_bytes == 0);
39037602
A
98 zfree(flowq_zone, fq);
99}
100
101static void
102fq_detect_dequeue_stall(fq_if_t *fqs, fq_t *flowq, fq_if_classq_t *fq_cl,
103 u_int64_t *now)
104{
105 u_int64_t maxgetqtime;
106 if (FQ_IS_DELAYHIGH(flowq) || flowq->fq_getqtime == 0 ||
5ba3f43e 107 fq_empty(flowq) ||
39037602
A
108 flowq->fq_bytes < FQ_MIN_FC_THRESHOLD_BYTES)
109 return;
110 maxgetqtime = flowq->fq_getqtime + fqs->fqs_update_interval;
111 if ((*now) > maxgetqtime) {
112 /*
113 * there was no dequeue in an update interval worth of
114 * time. It means that the queue is stalled.
115 */
116 FQ_SET_DELAY_HIGH(flowq);
117 fq_cl->fcl_stat.fcl_dequeue_stall++;
118 }
119}
120
121void
122fq_head_drop(fq_if_t *fqs, fq_t *fq)
123{
5ba3f43e
A
124 pktsched_pkt_t pkt;
125 uint32_t *pkt_flags;
126 uint64_t *pkt_timestamp;
39037602
A
127 struct ifclassq *ifq = fqs->fqs_ifq;
128
5ba3f43e
A
129 _PKTSCHED_PKT_INIT(&pkt);
130 if (fq_getq_flow_internal(fqs, fq, &pkt) == NULL)
39037602
A
131 return;
132
5ba3f43e
A
133 pktsched_get_pkt_vars(&pkt, &pkt_flags, &pkt_timestamp, NULL, NULL,
134 NULL, NULL);
135
136 *pkt_timestamp = 0;
137 if (pkt.pktsched_ptype == QP_MBUF)
138 *pkt_flags &= ~PKTF_PRIV_GUARDED;
139
140 IFCQ_DROP_ADD(ifq, 1, pktsched_get_pkt_len(&pkt));
39037602 141 IFCQ_CONVERT_LOCK(ifq);
5ba3f43e 142 pktsched_free_pkt(&pkt);
39037602
A
143}
144
145int
5ba3f43e 146fq_addq(fq_if_t *fqs, pktsched_pkt_t *pkt, fq_if_classq_t *fq_cl)
39037602 147{
39037602
A
148 int droptype = DTYPE_NODROP, fc_adv = 0, ret = CLASSQEQ_SUCCESS;
149 u_int64_t now;
150 fq_t *fq = NULL;
5ba3f43e
A
151 uint64_t *pkt_timestamp;
152 uint32_t *pkt_flags;
153 uint32_t pkt_flowid, pkt_tx_start_seq;
154 uint8_t pkt_proto, pkt_flowsrc;
155
156 pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, &pkt_flowid,
157 &pkt_flowsrc, &pkt_proto, &pkt_tx_start_seq);
158
159 if (pkt->pktsched_ptype == QP_MBUF) {
160 /* See comments in <rdar://problem/14040693> */
161 VERIFY(!(*pkt_flags & PKTF_PRIV_GUARDED));
162 *pkt_flags |= PKTF_PRIV_GUARDED;
163 }
39037602 164
5ba3f43e
A
165 if (*pkt_timestamp > 0) {
166 now = *pkt_timestamp;
39037602 167 } else {
5ba3f43e
A
168 struct timespec now_ts;
169 nanouptime(&now_ts);
170 now = (now_ts.tv_sec * NSEC_PER_SEC) + now_ts.tv_nsec;
171 *pkt_timestamp = now;
39037602
A
172 }
173
174 /* find the flowq for this packet */
5ba3f43e
A
175 fq = fq_if_hash_pkt(fqs, pkt_flowid, pktsched_get_pkt_svc(pkt),
176 now, TRUE, pkt->pktsched_ptype);
39037602
A
177 if (fq == NULL) {
178 /* drop the packet if we could not allocate a flow queue */
179 fq_cl->fcl_stat.fcl_drop_memfailure++;
180 IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
5ba3f43e 181 return (CLASSQEQ_DROP);
39037602 182 }
5ba3f43e 183 VERIFY(fq->fq_ptype == pkt->pktsched_ptype);
39037602
A
184
185 fq_detect_dequeue_stall(fqs, fq, fq_cl, &now);
186
187 if (FQ_IS_DELAYHIGH(fq)) {
188 if ((fq->fq_flags & FQF_FLOWCTL_CAPABLE) &&
5ba3f43e 189 (*pkt_flags & PKTF_FLOW_ADV)) {
39037602
A
190 fc_adv = 1;
191 /*
192 * If the flow is suspended or it is not
193 * TCP, drop the packet
194 */
5ba3f43e 195 if (pkt_proto != IPPROTO_TCP) {
39037602
A
196 droptype = DTYPE_EARLY;
197 fq_cl->fcl_stat.fcl_drop_early++;
198 }
199 } else {
200 /*
201 * Need to drop a packet, instead of dropping this
202 * one, try to drop from the head of the queue
203 */
5ba3f43e 204 if (!fq_empty(fq)) {
39037602
A
205 fq_head_drop(fqs, fq);
206 droptype = DTYPE_NODROP;
207 } else {
208 droptype = DTYPE_EARLY;
209 }
210 fq_cl->fcl_stat.fcl_drop_early++;
211 }
212
213 }
214
39037602
A
215 /* Set the return code correctly */
216 if (fc_adv == 1 && droptype != DTYPE_FORCED) {
5ba3f43e
A
217 if (fq_if_add_fcentry(fqs, pkt, pkt_flowid, pkt_flowsrc,
218 fq_cl)) {
39037602
A
219 fq->fq_flags |= FQF_FLOWCTL_ON;
220 /* deliver flow control advisory error */
221 if (droptype == DTYPE_NODROP) {
222 ret = CLASSQEQ_SUCCESS_FC;
223 } else {
224 /* dropped due to flow control */
5ba3f43e 225 ret = CLASSQEQ_DROP_FC;
39037602
A
226 }
227 } else {
228 /*
229 * if we could not flow control the flow, it is
230 * better to drop
231 */
232 droptype = DTYPE_FORCED;
5ba3f43e 233 ret = CLASSQEQ_DROP_FC;
39037602
A
234 fq_cl->fcl_stat.fcl_flow_control_fail++;
235 }
236 }
237
238 /*
239 * If the queue length hits the queue limit, drop a packet from the
240 * front of the queue for a flow with maximum number of bytes. This
241 * will penalize heavy and unresponsive flows. It will also avoid a
242 * tail drop.
243 */
244 if (droptype == DTYPE_NODROP && fq_if_at_drop_limit(fqs)) {
5ba3f43e
A
245 if (fqs->fqs_large_flow == fq) {
246 /*
247 * Drop from the head of the current fq. Since a
248 * new packet will be added to the tail, it is ok
249 * to leave fq in place.
250 */
251 fq_head_drop(fqs, fq);
252 } else {
253 if (fqs->fqs_large_flow == NULL) {
254 droptype = DTYPE_FORCED;
255 fq_cl->fcl_stat.fcl_drop_overflow++;
256
257 /*
258 * if this fq was freshly created and there
259 * is nothing to enqueue, free it
260 */
261 if (fq_empty(fq) && !(fq->fq_flags &
262 (FQF_NEW_FLOW | FQF_OLD_FLOW))) {
263 fq_if_destroy_flow(fqs, fq_cl, fq);
264 fq = NULL;
265 }
266 } else {
267 fq_if_drop_packet(fqs);
268 }
269 }
39037602
A
270 }
271
272 if (droptype == DTYPE_NODROP) {
5ba3f43e
A
273 uint32_t pkt_len = pktsched_get_pkt_len(pkt);
274 fq_enqueue(fq, pkt->pktsched_pkt);
275 fq->fq_bytes += pkt_len;
276 fq_cl->fcl_stat.fcl_byte_cnt += pkt_len;
39037602
A
277 fq_cl->fcl_stat.fcl_pkt_cnt++;
278
279 /*
280 * check if this queue will qualify to be the next
281 * victim queue
282 */
283 fq_if_is_flow_heavy(fqs, fq);
284 } else {
285 IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
5ba3f43e 286 return ((ret != CLASSQEQ_SUCCESS) ? ret : CLASSQEQ_DROP);
39037602
A
287 }
288
289 /*
290 * If the queue is not currently active, add it to the end of new
291 * flows list for that service class.
292 */
293 if ((fq->fq_flags & (FQF_NEW_FLOW|FQF_OLD_FLOW)) == 0) {
294 VERIFY(STAILQ_NEXT(fq, fq_actlink) == NULL);
295 STAILQ_INSERT_TAIL(&fq_cl->fcl_new_flows, fq, fq_actlink);
296 fq->fq_flags |= FQF_NEW_FLOW;
297
298 fq_cl->fcl_stat.fcl_newflows_cnt++;
299
300 fq->fq_deficit = fq_cl->fcl_quantum;
301 }
302 return (ret);
303}
304
5ba3f43e
A
305void *
306fq_getq_flow_internal(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt)
39037602 307{
5ba3f43e
A
308 void *p;
309 uint32_t plen;
39037602 310 fq_if_classq_t *fq_cl;
5ba3f43e 311 struct ifclassq *ifq = fqs->fqs_ifq;
39037602 312
5ba3f43e
A
313 fq_dequeue(fq, p);
314 if (p == NULL)
39037602
A
315 return (NULL);
316
5ba3f43e
A
317 pktsched_pkt_encap(pkt, fq->fq_ptype, p);
318 plen = pktsched_get_pkt_len(pkt);
39037602 319
5ba3f43e
A
320 VERIFY(fq->fq_bytes >= plen);
321 fq->fq_bytes -= plen;
39037602
A
322
323 fq_cl = &fqs->fqs_classq[fq->fq_sc_index];
5ba3f43e 324 fq_cl->fcl_stat.fcl_byte_cnt -= plen;
39037602
A
325 fq_cl->fcl_stat.fcl_pkt_cnt--;
326 IFCQ_DEC_LEN(ifq);
5ba3f43e 327 IFCQ_DEC_BYTES(ifq, plen);
39037602 328
5ba3f43e
A
329 /* Reset getqtime so that we don't count idle times */
330 if (fq_empty(fq))
331 fq->fq_getqtime = 0;
332
333 return (p);
334}
335
336void *
337fq_getq_flow(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt)
338{
339 void *p;
340 fq_if_classq_t *fq_cl;
341 u_int64_t now;
342 int64_t qdelay = 0;
343 struct timespec now_ts;
344 uint32_t *pkt_flags, pkt_tx_start_seq;
345 uint64_t *pkt_timestamp;
346
347 p = fq_getq_flow_internal(fqs, fq, pkt);
348 if (p == NULL)
349 return (NULL);
350
351 pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, NULL, NULL,
352 NULL, &pkt_tx_start_seq);
353
354 nanouptime(&now_ts);
355 now = (now_ts.tv_sec * NSEC_PER_SEC) + now_ts.tv_nsec;
39037602
A
356
357 /* this will compute qdelay in nanoseconds */
5ba3f43e
A
358 if (now > *pkt_timestamp)
359 qdelay = now - *pkt_timestamp;
360 fq_cl = &fqs->fqs_classq[fq->fq_sc_index];
39037602
A
361
362 if (fq->fq_min_qdelay == 0 ||
363 (qdelay > 0 && (u_int64_t)qdelay < fq->fq_min_qdelay))
364 fq->fq_min_qdelay = qdelay;
5ba3f43e
A
365 if (now >= fq->fq_updatetime) {
366 if (fq->fq_min_qdelay > fqs->fqs_target_qdelay) {
39037602
A
367 if (!FQ_IS_DELAYHIGH(fq))
368 FQ_SET_DELAY_HIGH(fq);
5ba3f43e 369 } else {
39037602 370 FQ_CLEAR_DELAY_HIGH(fq);
39037602
A
371 }
372
5ba3f43e 373
39037602
A
374 /* Reset measured queue delay and update time */
375 fq->fq_updatetime = now + fqs->fqs_update_interval;
376 fq->fq_min_qdelay = 0;
377 }
5ba3f43e
A
378 if (!FQ_IS_DELAYHIGH(fq) || fq_empty(fq)) {
379 FQ_CLEAR_DELAY_HIGH(fq);
380 if (fq->fq_flags & FQF_FLOWCTL_ON) {
381 fq_if_flow_feedback(fqs, fq, fq_cl);
382 }
383 }
39037602 384
5ba3f43e 385 if (fq_empty(fq)) {
39037602
A
386 /* Reset getqtime so that we don't count idle times */
387 fq->fq_getqtime = 0;
388 } else {
389 fq->fq_getqtime = now;
390 }
5ba3f43e
A
391 fq_if_is_flow_heavy(fqs, fq);
392
393 *pkt_timestamp = 0;
394 if (pkt->pktsched_ptype == QP_MBUF)
395 *pkt_flags &= ~PKTF_PRIV_GUARDED;
39037602 396
5ba3f43e 397 return (p);
39037602 398}