]> git.saurik.com Git - apple/xnu.git/blame - bsd/net/classq/if_classq.h
xnu-6153.11.26.tar.gz
[apple/xnu.git] / bsd / net / classq / if_classq.h
CommitLineData
316670eb 1/*
cb323159 2 * Copyright (c) 2011-2018 Apple Inc. All rights reserved.
316670eb
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#ifndef _NET_CLASSQ_IF_CLASSQ_H_
0a7de745 30#define _NET_CLASSQ_IF_CLASSQ_H_
316670eb
A
31
32#ifdef PRIVATE
0a7de745 33#define IFCQ_SC_MAX 10 /* max number of queues */
316670eb
A
34
35#ifdef BSD_KERNEL_PRIVATE
36#include <net/classq/classq.h>
5ba3f43e
A
37
38/* maximum number of packets stored across all queues */
0a7de745 39#define IFCQ_DEFAULT_PKT_DROP_LIMIT 2048
316670eb
A
40
41/* classq request types */
42typedef enum cqrq {
0a7de745
A
43 CLASSQRQ_PURGE = 1, /* purge all packets */
44 CLASSQRQ_PURGE_SC = 2, /* purge service class (and flow) */
45 CLASSQRQ_EVENT = 3, /* interface events */
46 CLASSQRQ_THROTTLE = 4, /* throttle packets */
47 CLASSQRQ_STAT_SC = 5, /* get service class queue stats */
316670eb
A
48} cqrq_t;
49
50/* classq purge_sc request argument */
51typedef struct cqrq_purge_sc {
0a7de745
A
52 mbuf_svc_class_t sc; /* (in) service class */
53 u_int32_t flow; /* (in) 0 means all flows */
54 u_int32_t packets; /* (out) purged packets */
55 u_int32_t bytes; /* (out) purged bytes */
316670eb
A
56} cqrq_purge_sc_t;
57
58/* classq throttle request argument */
59typedef struct cqrq_throttle {
0a7de745
A
60 u_int32_t set; /* set or get */
61 u_int32_t level; /* (in/out) throttling level */
316670eb
A
62} cqrq_throttle_t;
63
39236c6e
A
64/* classq service class stats request argument */
65typedef struct cqrq_stat_sc {
0a7de745
A
66 mbuf_svc_class_t sc; /* (in) service class */
67 u_int32_t packets; /* (out) packets enqueued */
68 u_int32_t bytes; /* (out) bytes enqueued */
39236c6e
A
69} cqrq_stat_sc_t;
70
316670eb
A
71/*
72 * A token-bucket regulator limits the rate that a network driver can
73 * dequeue packets from the output queue. Modern cards are able to buffer
74 * a large amount of packets and dequeue too many packets at a time. This
75 * bursty dequeue behavior makes it impossible to schedule packets by
76 * queueing disciplines. A token-bucket is used to control the burst size
77 * in a device independent manner.
78 */
79struct tb_regulator {
0a7de745
A
80 u_int64_t tbr_rate_raw; /* (unscaled) token bucket rate */
81 u_int32_t tbr_percent; /* token bucket rate in percentage */
82 int64_t tbr_rate; /* (scaled) token bucket rate */
83 int64_t tbr_depth; /* (scaled) token bucket depth */
316670eb 84
0a7de745
A
85 int64_t tbr_token; /* (scaled) current token */
86 int64_t tbr_filluptime; /* (scaled) time to fill up bucket */
87 u_int64_t tbr_last; /* last time token was updated */
316670eb 88
0a7de745 89 /* needed for poll-and-dequeue */
316670eb
A
90};
91
92/* simple token bucket meter profile */
93struct tb_profile {
0a7de745
A
94 u_int64_t rate; /* rate in bit-per-sec */
95 u_int32_t percent; /* rate in percentage */
96 u_int32_t depth; /* depth in bytes */
316670eb
A
97};
98
99struct ifclassq;
100enum cqdq_op;
101enum cqrq;
102
cb323159 103typedef int (*ifclassq_enq_func)(struct ifclassq *, classq_pkt_t *,
5ba3f43e 104 boolean_t *);
cb323159
A
105typedef void (*ifclassq_deq_func)(struct ifclassq *, classq_pkt_t *);
106typedef void (*ifclassq_deq_sc_func)(struct ifclassq *, mbuf_svc_class_t,
107 classq_pkt_t *);
5ba3f43e 108typedef int (*ifclassq_deq_multi_func)(struct ifclassq *, u_int32_t,
cb323159 109 u_int32_t, classq_pkt_t *, classq_pkt_t *, u_int32_t *, u_int32_t *);
5ba3f43e 110typedef int (*ifclassq_deq_sc_multi_func)(struct ifclassq *,
cb323159
A
111 mbuf_svc_class_t, u_int32_t, u_int32_t, classq_pkt_t *, classq_pkt_t *,
112 u_int32_t *, u_int32_t *);
316670eb
A
113typedef int (*ifclassq_req_func)(struct ifclassq *, enum cqrq, void *);
114
115/*
116 * Structure defining a queue for a network interface.
117 */
118struct ifclassq {
119 decl_lck_mtx_data(, ifcq_lock);
120
0a7de745
A
121 struct ifnet *ifcq_ifp; /* back pointer to interface */
122 u_int32_t ifcq_len; /* packet count */
123 u_int32_t ifcq_maxlen;
124 struct pktcntr ifcq_xmitcnt;
125 struct pktcntr ifcq_dropcnt;
126
127 u_int32_t ifcq_type; /* scheduler type */
128 u_int32_t ifcq_flags; /* flags */
129 u_int32_t ifcq_sflags; /* scheduler flags */
130 u_int32_t ifcq_target_qdelay; /* target queue delay */
131 u_int32_t ifcq_bytes; /* bytes count */
132 u_int32_t ifcq_pkt_drop_limit;
133 void *ifcq_disc; /* for scheduler-specific use */
316670eb
A
134 /*
135 * ifcq_disc_slots[] represents the leaf classes configured for the
136 * corresponding discpline/scheduler, ordered by their corresponding
137 * service class index. Each slot holds the queue ID used to identify
138 * the class instance, as well as the class instance pointer itself.
139 * The latter is used during enqueue and dequeue in order to avoid the
140 * costs associated with looking up the class pointer based on the
141 * queue ID. The queue ID is used when querying the statistics from
142 * user space.
143 *
144 * Avoiding the use of queue ID during enqueue and dequeue is made
145 * possible by virtue of knowing the particular mbuf service class
146 * associated with the packets. The service class index of the
147 * packet is used as the index to ifcq_disc_slots[].
148 *
149 * ifcq_disc_slots[] therefore also acts as a lookup table which
150 * provides for the mapping between MBUF_SC values and the actual
151 * scheduler classes.
152 */
153 struct ifclassq_disc_slot {
0a7de745
A
154 u_int32_t qid;
155 void *cl;
316670eb
A
156 } ifcq_disc_slots[IFCQ_SC_MAX]; /* for discipline use */
157
0a7de745
A
158 ifclassq_enq_func ifcq_enqueue;
159 ifclassq_deq_func ifcq_dequeue;
160 ifclassq_deq_sc_func ifcq_dequeue_sc;
39037602 161 ifclassq_deq_multi_func ifcq_dequeue_multi;
5ba3f43e 162 ifclassq_deq_sc_multi_func ifcq_dequeue_sc_multi;
0a7de745 163 ifclassq_req_func ifcq_request;
316670eb
A
164
165 /* token bucket regulator */
0a7de745 166 struct tb_regulator ifcq_tbr; /* TBR */
316670eb
A
167};
168
316670eb 169/* ifcq_flags */
0a7de745
A
170#define IFCQF_READY 0x01 /* ifclassq supports discipline */
171#define IFCQF_ENABLED 0x02 /* ifclassq is in use */
172#define IFCQF_TBR 0x04 /* Token Bucket Regulator is in use */
316670eb 173
0a7de745
A
174#define IFCQ_IS_READY(_ifcq) ((_ifcq)->ifcq_flags & IFCQF_READY)
175#define IFCQ_IS_ENABLED(_ifcq) ((_ifcq)->ifcq_flags & IFCQF_ENABLED)
176#define IFCQ_TBR_IS_ENABLED(_ifcq) ((_ifcq)->ifcq_flags & IFCQF_TBR)
316670eb
A
177
178/* classq enqueue return value */
5ba3f43e 179/* packet has to be dropped */
0a7de745 180#define CLASSQEQ_DROP (-1)
5ba3f43e 181/* packet successfully enqueued */
0a7de745 182#define CLASSQEQ_SUCCESS 0
5ba3f43e 183/* packet enqueued; give flow control feedback */
0a7de745 184#define CLASSQEQ_SUCCESS_FC 1
5ba3f43e 185/* packet needs to be dropped due to flowcontrol; give flow control feedback */
0a7de745 186#define CLASSQEQ_DROP_FC 2
5ba3f43e 187/* packet needs to be dropped due to suspension; give flow control feedback */
0a7de745 188#define CLASSQEQ_DROP_SP 3
316670eb
A
189
190/* interface event argument for CLASSQRQ_EVENT */
191typedef enum cqev {
5ba3f43e 192 CLASSQ_EV_INIT = 0,
0a7de745
A
193 CLASSQ_EV_LINK_BANDWIDTH = 1, /* link bandwidth has changed */
194 CLASSQ_EV_LINK_LATENCY = 2, /* link latency has changed */
195 CLASSQ_EV_LINK_MTU = 3, /* link MTU has changed */
196 CLASSQ_EV_LINK_UP = 4, /* link is now up */
197 CLASSQ_EV_LINK_DOWN = 5, /* link is now down */
316670eb
A
198} cqev_t;
199#endif /* BSD_KERNEL_PRIVATE */
200
316670eb 201#include <net/pktsched/pktsched_tcq.h>
316670eb 202#include <net/pktsched/pktsched_qfq.h>
39037602 203#include <net/pktsched/pktsched_fq_codel.h>
316670eb
A
204
205#ifdef __cplusplus
206extern "C" {
207#endif
316670eb 208struct if_ifclassq_stats {
0a7de745
A
209 u_int32_t ifqs_len;
210 u_int32_t ifqs_maxlen;
211 struct pktcntr ifqs_xmitcnt;
212 struct pktcntr ifqs_dropcnt;
213 u_int32_t ifqs_scheduler;
316670eb 214 union {
0a7de745
A
215 struct tcq_classstats ifqs_tcq_stats;
216 struct qfq_classstats ifqs_qfq_stats;
217 struct fq_codel_classstats ifqs_fq_codel_stats;
316670eb
A
218 };
219} __attribute__((aligned(8)));
220
221#ifdef __cplusplus
222}
223#endif
224
225#ifdef BSD_KERNEL_PRIVATE
226/*
227 * For ifclassq lock
228 */
0a7de745 229#define IFCQ_LOCK_ASSERT_HELD(_ifcq) \
5ba3f43e 230 LCK_MTX_ASSERT(&(_ifcq)->ifcq_lock, LCK_MTX_ASSERT_OWNED)
316670eb 231
0a7de745 232#define IFCQ_LOCK_ASSERT_NOTHELD(_ifcq) \
5ba3f43e 233 LCK_MTX_ASSERT(&(_ifcq)->ifcq_lock, LCK_MTX_ASSERT_NOTOWNED)
316670eb 234
0a7de745 235#define IFCQ_LOCK(_ifcq) \
316670eb
A
236 lck_mtx_lock(&(_ifcq)->ifcq_lock)
237
0a7de745 238#define IFCQ_LOCK_SPIN(_ifcq) \
316670eb
A
239 lck_mtx_lock_spin(&(_ifcq)->ifcq_lock)
240
0a7de745
A
241#define IFCQ_CONVERT_LOCK(_ifcq) do { \
242 IFCQ_LOCK_ASSERT_HELD(_ifcq); \
243 lck_mtx_convert_spin(&(_ifcq)->ifcq_lock); \
316670eb
A
244} while (0)
245
0a7de745 246#define IFCQ_UNLOCK(_ifcq) \
316670eb
A
247 lck_mtx_unlock(&(_ifcq)->ifcq_lock)
248
249/*
250 * For ifclassq operations
251 */
cb323159
A
252#define IFCQ_ENQUEUE(_ifq, _p, _err, _drop) do { \
253 (_err) = (*(_ifq)->ifcq_enqueue)(_ifq, _p, _drop); \
316670eb
A
254} while (0)
255
cb323159
A
256#define IFCQ_DEQUEUE(_ifq, _p) do { \
257 (*(_ifq)->ifcq_dequeue)(_ifq, _p); \
316670eb
A
258} while (0)
259
cb323159
A
260#define IFCQ_DEQUEUE_SC(_ifq, _sc, _p) do { \
261 (*(_ifq)->ifcq_dequeue_sc)(_ifq, _sc, _p); \
316670eb
A
262} while (0)
263
cb323159
A
264#define IFCQ_TBR_DEQUEUE(_ifcq, _p) do { \
265 ifclassq_tbr_dequeue(_ifcq, _p); \
316670eb
A
266} while (0)
267
cb323159
A
268#define IFCQ_TBR_DEQUEUE_SC(_ifcq, _sc, _p) do { \
269 ifclassq_tbr_dequeue_sc(_ifcq, _sc, _p); \
316670eb
A
270} while (0)
271
0a7de745
A
272#define IFCQ_PURGE(_ifq) do { \
273 (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_PURGE, NULL); \
316670eb
A
274} while (0)
275
0a7de745
A
276#define IFCQ_PURGE_SC(_ifq, _sc, _flow, _packets, _bytes) do { \
277 cqrq_purge_sc_t _req = { _sc, _flow, 0, 0 }; \
278 (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_PURGE_SC, &_req); \
279 (_packets) = _req.packets; \
280 (_bytes) = _req.bytes; \
316670eb
A
281} while (0)
282
0a7de745
A
283#define IFCQ_UPDATE(_ifq, _ev) do { \
284 (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_EVENT, \
285 (void *)(_ev)); \
316670eb
A
286} while (0)
287
0a7de745
A
288#define IFCQ_SET_THROTTLE(_ifq, _level, _err) do { \
289 cqrq_throttle_t _req = { 1, _level }; \
290 (_err) = (*(_ifq)->ifcq_request) \
291 (_ifq, CLASSQRQ_THROTTLE, &_req); \
316670eb
A
292} while (0)
293
0a7de745
A
294#define IFCQ_GET_THROTTLE(_ifq, _level, _err) do { \
295 cqrq_throttle_t _req = { 0, IFNET_THROTTLE_OFF }; \
296 (_err) = (*(_ifq)->ifcq_request) \
297 (_ifq, CLASSQRQ_THROTTLE, &_req); \
298 (_level) = _req.level; \
316670eb
A
299} while (0)
300
0a7de745
A
301#define IFCQ_LEN_SC(_ifq, _sc, _packets, _bytes, _err) do { \
302 cqrq_stat_sc_t _req = { _sc, 0, 0 }; \
303 (_err) = (*(ifq)->ifcq_request)(_ifq, CLASSQRQ_STAT_SC, &_req); \
304 if ((_packets) != NULL) \
305 (*(_packets)) = _req.packets; \
306 if ((_bytes) != NULL) \
307 (*(_bytes)) = _req.bytes; \
39236c6e
A
308} while (0)
309
0a7de745
A
310#define IFCQ_LEN(_ifcq) ((_ifcq)->ifcq_len)
311#define IFCQ_QFULL(_ifcq) (IFCQ_LEN(_ifcq) >= (_ifcq)->ifcq_maxlen)
312#define IFCQ_IS_EMPTY(_ifcq) (IFCQ_LEN(_ifcq) == 0)
313#define IFCQ_INC_LEN(_ifcq) (IFCQ_LEN(_ifcq)++)
314#define IFCQ_DEC_LEN(_ifcq) (IFCQ_LEN(_ifcq)--)
315#define IFCQ_MAXLEN(_ifcq) ((_ifcq)->ifcq_maxlen)
316#define IFCQ_SET_MAXLEN(_ifcq, _len) ((_ifcq)->ifcq_maxlen = (_len))
317#define IFCQ_TARGET_QDELAY(_ifcq) ((_ifcq)->ifcq_target_qdelay)
318#define IFCQ_BYTES(_ifcq) ((_ifcq)->ifcq_bytes)
319#define IFCQ_INC_BYTES(_ifcq, _len) \
39037602 320 ((_ifcq)->ifcq_bytes = (_ifcq)->ifcq_bytes + (_len))
0a7de745 321#define IFCQ_DEC_BYTES(_ifcq, _len) \
39037602 322 ((_ifcq)->ifcq_bytes = (_ifcq)->ifcq_bytes - (_len))
316670eb 323
0a7de745
A
324#define IFCQ_XMIT_ADD(_ifcq, _pkt, _len) do { \
325 PKTCNTR_ADD(&(_ifcq)->ifcq_xmitcnt, _pkt, _len); \
316670eb
A
326} while (0)
327
0a7de745
A
328#define IFCQ_DROP_ADD(_ifcq, _pkt, _len) do { \
329 PKTCNTR_ADD(&(_ifcq)->ifcq_dropcnt, _pkt, _len); \
316670eb
A
330} while (0)
331
0a7de745 332#define IFCQ_PKT_DROP_LIMIT(_ifcq) ((_ifcq)->ifcq_pkt_drop_limit)
5ba3f43e 333
316670eb
A
334extern int ifclassq_setup(struct ifnet *, u_int32_t, boolean_t);
335extern void ifclassq_teardown(struct ifnet *);
336extern int ifclassq_pktsched_setup(struct ifclassq *);
337extern void ifclassq_set_maxlen(struct ifclassq *, u_int32_t);
338extern u_int32_t ifclassq_get_maxlen(struct ifclassq *);
39236c6e
A
339extern int ifclassq_get_len(struct ifclassq *, mbuf_svc_class_t,
340 u_int32_t *, u_int32_t *);
cb323159 341extern errno_t ifclassq_enqueue(struct ifclassq *, classq_pkt_t *, boolean_t *);
39037602 342extern errno_t ifclassq_dequeue(struct ifclassq *, u_int32_t, u_int32_t,
cb323159 343 classq_pkt_t *, classq_pkt_t *, u_int32_t *, u_int32_t *);
316670eb 344extern errno_t ifclassq_dequeue_sc(struct ifclassq *, mbuf_svc_class_t,
cb323159
A
345 u_int32_t, u_int32_t, classq_pkt_t *, classq_pkt_t *, u_int32_t *,
346 u_int32_t *);
5ba3f43e
A
347extern void *ifclassq_poll(struct ifclassq *, classq_pkt_type_t *);
348extern void *ifclassq_poll_sc(struct ifclassq *, mbuf_svc_class_t,
349 classq_pkt_type_t *);
316670eb
A
350extern void ifclassq_update(struct ifclassq *, cqev_t);
351extern int ifclassq_attach(struct ifclassq *, u_int32_t, void *,
352 ifclassq_enq_func, ifclassq_deq_func, ifclassq_deq_sc_func,
5ba3f43e 353 ifclassq_deq_multi_func, ifclassq_deq_sc_multi_func, ifclassq_req_func);
316670eb
A
354extern int ifclassq_detach(struct ifclassq *);
355extern int ifclassq_getqstats(struct ifclassq *, u_int32_t,
356 void *, u_int32_t *);
357extern const char *ifclassq_ev2str(cqev_t);
358extern int ifclassq_tbr_set(struct ifclassq *, struct tb_profile *, boolean_t);
cb323159
A
359extern void ifclassq_tbr_dequeue(struct ifclassq *, classq_pkt_t *);
360extern void ifclassq_tbr_dequeue_sc(struct ifclassq *, mbuf_svc_class_t,
361 classq_pkt_t *);
39037602
A
362extern void ifclassq_calc_target_qdelay(struct ifnet *ifp,
363 u_int64_t *if_target_qdelay);
364extern void ifclassq_calc_update_interval(u_int64_t *update_interval);
5ba3f43e 365extern void ifclassq_set_packet_metadata(struct ifclassq *ifq,
cb323159 366 struct ifnet *ifp, classq_pkt_t *p);
a39ff7e2 367extern void ifclassq_reap_caches(boolean_t);
39037602 368
316670eb
A
369#endif /* BSD_KERNEL_PRIVATE */
370#endif /* PRIVATE */
371#endif /* _NET_CLASSQ_IF_CLASSQ_H_ */