]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/classq/classq_fq_codel.c
75a568d2c521be2ac2063ca5cf58fd98c458151f
[apple/xnu.git] / bsd / net / classq / classq_fq_codel.c
1 /*
2 * Copyright (c) 2016-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/mbuf.h>
32 #include <sys/socket.h>
33 #include <sys/sockio.h>
34 #include <sys/systm.h>
35 #include <sys/sysctl.h>
36 #include <sys/syslog.h>
37 #include <sys/proc.h>
38 #include <sys/errno.h>
39 #include <sys/kernel.h>
40 #include <sys/kauth.h>
41 #include <kern/zalloc.h>
42 #include <netinet/in.h>
43
44 #include <net/classq/classq.h>
45 #include <net/classq/if_classq.h>
46 #include <net/pktsched/pktsched.h>
47 #include <net/pktsched/pktsched_fq_codel.h>
48 #include <net/classq/classq_fq_codel.h>
49
50 static uint32_t flowq_size; /* size of flowq */
51 static struct mcache *flowq_cache = NULL; /* mcache for flowq */
52
53 #define FQ_ZONE_MAX (32 * 1024) /* across all interfaces */
54
55 #define DTYPE_NODROP 0 /* no drop */
56 #define DTYPE_FORCED 1 /* a "forced" drop */
57 #define DTYPE_EARLY 2 /* an "unforced" (early) drop */
58
59 void
60 fq_codel_init(void)
61 {
62 if (flowq_cache != NULL) {
63 return;
64 }
65
66 flowq_size = sizeof(fq_t);
67 flowq_cache = mcache_create("fq.flowq", flowq_size, sizeof(uint64_t),
68 0, MCR_SLEEP);
69 if (flowq_cache == NULL) {
70 panic("%s: failed to allocate flowq_cache", __func__);
71 /* NOTREACHED */
72 }
73 }
74
75 void
76 fq_codel_reap_caches(boolean_t purge)
77 {
78 mcache_reap_now(flowq_cache, purge);
79 }
80
81 fq_t *
82 fq_alloc(classq_pkt_type_t ptype)
83 {
84 fq_t *fq = NULL;
85 fq = mcache_alloc(flowq_cache, MCR_SLEEP);
86 if (fq == NULL) {
87 log(LOG_ERR, "%s: unable to allocate from flowq_cache\n");
88 return NULL;
89 }
90
91 bzero(fq, flowq_size);
92 fq->fq_ptype = ptype;
93 if (ptype == QP_MBUF) {
94 MBUFQ_INIT(&fq->fq_mbufq);
95 }
96 return fq;
97 }
98
99 void
100 fq_destroy(fq_t *fq)
101 {
102 VERIFY(fq_empty(fq));
103 VERIFY(!(fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW)));
104 VERIFY(fq->fq_bytes == 0);
105 mcache_free(flowq_cache, fq);
106 }
107
108 static void
109 fq_detect_dequeue_stall(fq_if_t *fqs, fq_t *flowq, fq_if_classq_t *fq_cl,
110 u_int64_t *now)
111 {
112 u_int64_t maxgetqtime;
113 if (FQ_IS_DELAYHIGH(flowq) || flowq->fq_getqtime == 0 ||
114 fq_empty(flowq) ||
115 flowq->fq_bytes < FQ_MIN_FC_THRESHOLD_BYTES) {
116 return;
117 }
118 maxgetqtime = flowq->fq_getqtime + fqs->fqs_update_interval;
119 if ((*now) > maxgetqtime) {
120 /*
121 * there was no dequeue in an update interval worth of
122 * time. It means that the queue is stalled.
123 */
124 FQ_SET_DELAY_HIGH(flowq);
125 fq_cl->fcl_stat.fcl_dequeue_stall++;
126 }
127 }
128
129 void
130 fq_head_drop(fq_if_t *fqs, fq_t *fq)
131 {
132 pktsched_pkt_t pkt;
133 uint32_t *pkt_flags;
134 uint64_t *pkt_timestamp;
135 struct ifclassq *ifq = fqs->fqs_ifq;
136
137 _PKTSCHED_PKT_INIT(&pkt);
138 if (fq_getq_flow_internal(fqs, fq, &pkt) == NULL) {
139 return;
140 }
141
142 pktsched_get_pkt_vars(&pkt, &pkt_flags, &pkt_timestamp, NULL, NULL,
143 NULL, NULL);
144
145 *pkt_timestamp = 0;
146 if (pkt.pktsched_ptype == QP_MBUF) {
147 *pkt_flags &= ~PKTF_PRIV_GUARDED;
148 }
149
150 IFCQ_DROP_ADD(ifq, 1, pktsched_get_pkt_len(&pkt));
151 IFCQ_CONVERT_LOCK(ifq);
152 pktsched_free_pkt(&pkt);
153 }
154
155 int
156 fq_addq(fq_if_t *fqs, pktsched_pkt_t *pkt, fq_if_classq_t *fq_cl)
157 {
158 int droptype = DTYPE_NODROP, fc_adv = 0, ret = CLASSQEQ_SUCCESS;
159 u_int64_t now;
160 fq_t *fq = NULL;
161 uint64_t *pkt_timestamp;
162 uint32_t *pkt_flags;
163 uint32_t pkt_flowid, pkt_tx_start_seq;
164 uint8_t pkt_proto, pkt_flowsrc;
165
166 pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, &pkt_flowid,
167 &pkt_flowsrc, &pkt_proto, &pkt_tx_start_seq);
168
169 if (pkt->pktsched_ptype == QP_MBUF) {
170 /* See comments in <rdar://problem/14040693> */
171 VERIFY(!(*pkt_flags & PKTF_PRIV_GUARDED));
172 *pkt_flags |= PKTF_PRIV_GUARDED;
173 }
174
175 if (*pkt_timestamp > 0) {
176 now = *pkt_timestamp;
177 } else {
178 struct timespec now_ts;
179 nanouptime(&now_ts);
180 now = (now_ts.tv_sec * NSEC_PER_SEC) + now_ts.tv_nsec;
181 *pkt_timestamp = now;
182 }
183
184 /* find the flowq for this packet */
185 fq = fq_if_hash_pkt(fqs, pkt_flowid, pktsched_get_pkt_svc(pkt),
186 now, TRUE, pkt->pktsched_ptype);
187 if (fq == NULL) {
188 /* drop the packet if we could not allocate a flow queue */
189 fq_cl->fcl_stat.fcl_drop_memfailure++;
190 IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
191 return CLASSQEQ_DROP;
192 }
193 VERIFY(fq->fq_ptype == pkt->pktsched_ptype);
194
195 fq_detect_dequeue_stall(fqs, fq, fq_cl, &now);
196
197 if (FQ_IS_DELAYHIGH(fq)) {
198 if ((fq->fq_flags & FQF_FLOWCTL_CAPABLE) &&
199 (*pkt_flags & PKTF_FLOW_ADV)) {
200 fc_adv = 1;
201 /*
202 * If the flow is suspended or it is not
203 * TCP, drop the packet
204 */
205 if (pkt_proto != IPPROTO_TCP) {
206 droptype = DTYPE_EARLY;
207 fq_cl->fcl_stat.fcl_drop_early++;
208 }
209 } else {
210 /*
211 * Need to drop a packet, instead of dropping this
212 * one, try to drop from the head of the queue
213 */
214 if (!fq_empty(fq)) {
215 fq_head_drop(fqs, fq);
216 droptype = DTYPE_NODROP;
217 } else {
218 droptype = DTYPE_EARLY;
219 }
220 fq_cl->fcl_stat.fcl_drop_early++;
221 }
222 }
223
224 /* Set the return code correctly */
225 if (fc_adv == 1 && droptype != DTYPE_FORCED) {
226 if (fq_if_add_fcentry(fqs, pkt, pkt_flowid, pkt_flowsrc,
227 fq_cl)) {
228 fq->fq_flags |= FQF_FLOWCTL_ON;
229 /* deliver flow control advisory error */
230 if (droptype == DTYPE_NODROP) {
231 ret = CLASSQEQ_SUCCESS_FC;
232 } else {
233 /* dropped due to flow control */
234 ret = CLASSQEQ_DROP_FC;
235 }
236 } else {
237 /*
238 * if we could not flow control the flow, it is
239 * better to drop
240 */
241 droptype = DTYPE_FORCED;
242 ret = CLASSQEQ_DROP_FC;
243 fq_cl->fcl_stat.fcl_flow_control_fail++;
244 }
245 }
246
247 /*
248 * If the queue length hits the queue limit, drop a packet from the
249 * front of the queue for a flow with maximum number of bytes. This
250 * will penalize heavy and unresponsive flows. It will also avoid a
251 * tail drop.
252 */
253 if (droptype == DTYPE_NODROP && fq_if_at_drop_limit(fqs)) {
254 if (fqs->fqs_large_flow == fq) {
255 /*
256 * Drop from the head of the current fq. Since a
257 * new packet will be added to the tail, it is ok
258 * to leave fq in place.
259 */
260 fq_head_drop(fqs, fq);
261 } else {
262 if (fqs->fqs_large_flow == NULL) {
263 droptype = DTYPE_FORCED;
264 fq_cl->fcl_stat.fcl_drop_overflow++;
265 ret = CLASSQEQ_DROP;
266
267 /*
268 * if this fq was freshly created and there
269 * is nothing to enqueue, free it
270 */
271 if (fq_empty(fq) && !(fq->fq_flags &
272 (FQF_NEW_FLOW | FQF_OLD_FLOW))) {
273 fq_if_destroy_flow(fqs, fq_cl, fq);
274 fq = NULL;
275 }
276 } else {
277 fq_if_drop_packet(fqs);
278 }
279 }
280 }
281
282 if (droptype == DTYPE_NODROP) {
283 uint32_t pkt_len = pktsched_get_pkt_len(pkt);
284 fq_enqueue(fq, pkt->pktsched_pkt);
285 fq->fq_bytes += pkt_len;
286 fq_cl->fcl_stat.fcl_byte_cnt += pkt_len;
287 fq_cl->fcl_stat.fcl_pkt_cnt++;
288
289 /*
290 * check if this queue will qualify to be the next
291 * victim queue
292 */
293 fq_if_is_flow_heavy(fqs, fq);
294 } else {
295 IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
296 return (ret != CLASSQEQ_SUCCESS) ? ret : CLASSQEQ_DROP;
297 }
298
299 /*
300 * If the queue is not currently active, add it to the end of new
301 * flows list for that service class.
302 */
303 if ((fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW)) == 0) {
304 VERIFY(STAILQ_NEXT(fq, fq_actlink) == NULL);
305 STAILQ_INSERT_TAIL(&fq_cl->fcl_new_flows, fq, fq_actlink);
306 fq->fq_flags |= FQF_NEW_FLOW;
307
308 fq_cl->fcl_stat.fcl_newflows_cnt++;
309
310 fq->fq_deficit = fq_cl->fcl_quantum;
311 }
312 return ret;
313 }
314
315 void *
316 fq_getq_flow_internal(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt)
317 {
318 void *p;
319 uint32_t plen;
320 fq_if_classq_t *fq_cl;
321 struct ifclassq *ifq = fqs->fqs_ifq;
322
323 fq_dequeue(fq, p);
324 if (p == NULL) {
325 return NULL;
326 }
327
328 pktsched_pkt_encap(pkt, fq->fq_ptype, p);
329 plen = pktsched_get_pkt_len(pkt);
330
331 VERIFY(fq->fq_bytes >= plen);
332 fq->fq_bytes -= plen;
333
334 fq_cl = &fqs->fqs_classq[fq->fq_sc_index];
335 fq_cl->fcl_stat.fcl_byte_cnt -= plen;
336 fq_cl->fcl_stat.fcl_pkt_cnt--;
337 IFCQ_DEC_LEN(ifq);
338 IFCQ_DEC_BYTES(ifq, plen);
339
340 /* Reset getqtime so that we don't count idle times */
341 if (fq_empty(fq)) {
342 fq->fq_getqtime = 0;
343 }
344
345 return p;
346 }
347
348 void *
349 fq_getq_flow(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt)
350 {
351 void *p;
352 fq_if_classq_t *fq_cl;
353 u_int64_t now;
354 int64_t qdelay = 0;
355 struct timespec now_ts;
356 uint32_t *pkt_flags, pkt_tx_start_seq;
357 uint64_t *pkt_timestamp;
358
359 p = fq_getq_flow_internal(fqs, fq, pkt);
360 if (p == NULL) {
361 return NULL;
362 }
363
364 pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, NULL, NULL,
365 NULL, &pkt_tx_start_seq);
366
367 nanouptime(&now_ts);
368 now = (now_ts.tv_sec * NSEC_PER_SEC) + now_ts.tv_nsec;
369
370 /* this will compute qdelay in nanoseconds */
371 if (now > *pkt_timestamp) {
372 qdelay = now - *pkt_timestamp;
373 }
374 fq_cl = &fqs->fqs_classq[fq->fq_sc_index];
375
376 if (fq->fq_min_qdelay == 0 ||
377 (qdelay > 0 && (u_int64_t)qdelay < fq->fq_min_qdelay)) {
378 fq->fq_min_qdelay = qdelay;
379 }
380 if (now >= fq->fq_updatetime) {
381 if (fq->fq_min_qdelay > fqs->fqs_target_qdelay) {
382 if (!FQ_IS_DELAYHIGH(fq)) {
383 FQ_SET_DELAY_HIGH(fq);
384 }
385 } else {
386 FQ_CLEAR_DELAY_HIGH(fq);
387 }
388
389
390 /* Reset measured queue delay and update time */
391 fq->fq_updatetime = now + fqs->fqs_update_interval;
392 fq->fq_min_qdelay = 0;
393 }
394 if (!FQ_IS_DELAYHIGH(fq) || fq_empty(fq)) {
395 FQ_CLEAR_DELAY_HIGH(fq);
396 if (fq->fq_flags & FQF_FLOWCTL_ON) {
397 fq_if_flow_feedback(fqs, fq, fq_cl);
398 }
399 }
400
401 if (fq_empty(fq)) {
402 /* Reset getqtime so that we don't count idle times */
403 fq->fq_getqtime = 0;
404 } else {
405 fq->fq_getqtime = now;
406 }
407 fq_if_is_flow_heavy(fqs, fq);
408
409 *pkt_timestamp = 0;
410 if (pkt->pktsched_ptype == QP_MBUF) {
411 *pkt_flags &= ~PKTF_PRIV_GUARDED;
412 }
413
414 return p;
415 }