]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/net/classq/if_classq.h
xnu-2050.48.11.tar.gz
[apple/xnu.git] / bsd / net / classq / if_classq.h
... / ...
CommitLineData
1/*
2 * Copyright (c) 2011-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#ifndef _NET_CLASSQ_IF_CLASSQ_H_
30#define _NET_CLASSQ_IF_CLASSQ_H_
31
32#ifdef PRIVATE
33#define IFCQ_SC_MAX 10 /* max number of queues */
34
35#ifdef BSD_KERNEL_PRIVATE
36#include <net/classq/classq.h>
37/* classq dequeue op arg */
38typedef enum cqdq_op {
39 CLASSQDQ_REMOVE = 1, /* dequeue mbuf from the queue */
40 CLASSQDQ_POLL = 2, /* don't dequeue mbuf from the queue */
41} cqdq_op_t;
42
43/* classq request types */
44typedef enum cqrq {
45 CLASSQRQ_PURGE = 1, /* purge all packets */
46 CLASSQRQ_PURGE_SC = 2, /* purge service class (and flow) */
47 CLASSQRQ_EVENT = 3, /* interface events */
48 CLASSQRQ_THROTTLE = 4, /* throttle packets */
49} cqrq_t;
50
51/* classq purge_sc request argument */
52typedef struct cqrq_purge_sc {
53 mbuf_svc_class_t sc; /* (in) service class */
54 u_int32_t flow; /* (in) 0 means all flows */
55 u_int32_t packets; /* (out) purged packets */
56 u_int32_t bytes; /* (out) purged bytes */
57} cqrq_purge_sc_t;
58
59/* classq throttle request argument */
60typedef struct cqrq_throttle {
61 u_int32_t set; /* set or get */
62 u_int32_t level; /* (in/out) throttling level */
63} cqrq_throttle_t;
64
65#if PF_ALTQ
66#include <net/altq/if_altq.h>
67#endif /* PF_ALTQ */
68
69/*
70 * A token-bucket regulator limits the rate that a network driver can
71 * dequeue packets from the output queue. Modern cards are able to buffer
72 * a large amount of packets and dequeue too many packets at a time. This
73 * bursty dequeue behavior makes it impossible to schedule packets by
74 * queueing disciplines. A token-bucket is used to control the burst size
75 * in a device independent manner.
76 */
77struct tb_regulator {
78 u_int64_t tbr_rate_raw; /* (unscaled) token bucket rate */
79 u_int32_t tbr_percent; /* token bucket rate in percentage */
80 int64_t tbr_rate; /* (scaled) token bucket rate */
81 int64_t tbr_depth; /* (scaled) token bucket depth */
82
83 int64_t tbr_token; /* (scaled) current token */
84 int64_t tbr_filluptime; /* (scaled) time to fill up bucket */
85 u_int64_t tbr_last; /* last time token was updated */
86
87 int tbr_lastop; /* last dequeue operation type */
88 /* needed for poll-and-dequeue */
89};
90
91/* simple token bucket meter profile */
92struct tb_profile {
93 u_int64_t rate; /* rate in bit-per-sec */
94 u_int32_t percent; /* rate in percentage */
95 u_int32_t depth; /* depth in bytes */
96};
97
98struct ifclassq;
99enum cqdq_op;
100enum cqrq;
101
102typedef int (*ifclassq_enq_func)(struct ifclassq *, struct mbuf *);
103typedef struct mbuf *(*ifclassq_deq_func)(struct ifclassq *, enum cqdq_op);
104typedef struct mbuf *(*ifclassq_deq_sc_func)(struct ifclassq *,
105 mbuf_svc_class_t, enum cqdq_op);
106typedef int (*ifclassq_req_func)(struct ifclassq *, enum cqrq, void *);
107
108/*
109 * Structure defining a queue for a network interface.
110 */
111struct ifclassq {
112 decl_lck_mtx_data(, ifcq_lock);
113
114 struct ifnet *ifcq_ifp; /* back pointer to interface */
115 u_int32_t ifcq_len;
116 u_int32_t ifcq_maxlen;
117 struct pktcntr ifcq_xmitcnt;
118 struct pktcntr ifcq_dropcnt;
119
120 u_int32_t ifcq_type; /* scheduler type */
121 u_int32_t ifcq_flags; /* flags */
122 u_int32_t ifcq_sflags; /* scheduler flags */
123 void *ifcq_disc; /* for scheduler-specific use */
124 /*
125 * ifcq_disc_slots[] represents the leaf classes configured for the
126 * corresponding discpline/scheduler, ordered by their corresponding
127 * service class index. Each slot holds the queue ID used to identify
128 * the class instance, as well as the class instance pointer itself.
129 * The latter is used during enqueue and dequeue in order to avoid the
130 * costs associated with looking up the class pointer based on the
131 * queue ID. The queue ID is used when querying the statistics from
132 * user space.
133 *
134 * Avoiding the use of queue ID during enqueue and dequeue is made
135 * possible by virtue of knowing the particular mbuf service class
136 * associated with the packets. The service class index of the
137 * packet is used as the index to ifcq_disc_slots[].
138 *
139 * ifcq_disc_slots[] therefore also acts as a lookup table which
140 * provides for the mapping between MBUF_SC values and the actual
141 * scheduler classes.
142 */
143 struct ifclassq_disc_slot {
144 u_int32_t qid;
145 void *cl;
146 } ifcq_disc_slots[IFCQ_SC_MAX]; /* for discipline use */
147
148 ifclassq_enq_func ifcq_enqueue;
149 ifclassq_deq_func ifcq_dequeue;
150 ifclassq_deq_sc_func ifcq_dequeue_sc;
151 ifclassq_req_func ifcq_request;
152
153 /* token bucket regulator */
154 struct tb_regulator ifcq_tbr; /* TBR */
155
156#if PF_ALTQ
157 u_int32_t ifcq_drain;
158 struct ifaltq ifcq_altq;
159#endif /* PF_ALTQ */
160};
161
162#if PF_ALTQ
163#define IFCQ_ALTQ(_ifcq) (&(_ifcq)->ifcq_altq)
164#define IFCQ_IS_DRAINING(_ifcq) ((_ifcq)->ifcq_drain > 0)
165#endif /* PF_ALTQ */
166
167/* ifcq_flags */
168#define IFCQF_READY 0x01 /* ifclassq supports discipline */
169#define IFCQF_ENABLED 0x02 /* ifclassq is in use */
170#define IFCQF_TBR 0x04 /* Token Bucket Regulator is in use */
171
172#define IFCQ_IS_READY(_ifcq) ((_ifcq)->ifcq_flags & IFCQF_READY)
173#define IFCQ_IS_ENABLED(_ifcq) ((_ifcq)->ifcq_flags & IFCQF_ENABLED)
174#define IFCQ_TBR_IS_ENABLED(_ifcq) ((_ifcq)->ifcq_flags & IFCQF_TBR)
175
176/* classq enqueue return value */
177#define CLASSQEQ_DROPPED (-1) /* packet dropped (freed) */
178#define CLASSQEQ_SUCCESS 0 /* success, packet enqueued */
179#define CLASSQEQ_SUCCESS_FC 1 /* packet enqueued; */
180 /* give flow control feedback */
181#define CLASSQEQ_DROPPED_FC 2 /* packet dropped; */
182 /* give flow control feedback */
183#define CLASSQEQ_DROPPED_SP 3 /* packet dropped due to suspension; */
184 /* give flow control feedback */
185
186/* interface event argument for CLASSQRQ_EVENT */
187typedef enum cqev {
188 CLASSQ_EV_LINK_SPEED = 1, /* link speed has changed */
189 CLASSQ_EV_LINK_MTU = 2, /* link MTU has changed */
190 CLASSQ_EV_LINK_UP = 3, /* link is now up */
191 CLASSQ_EV_LINK_DOWN = 4, /* link is now down */
192} cqev_t;
193#endif /* BSD_KERNEL_PRIVATE */
194
195#include <net/pktsched/pktsched_priq.h>
196#include <net/pktsched/pktsched_fairq.h>
197#include <net/pktsched/pktsched_tcq.h>
198#include <net/pktsched/pktsched_cbq.h>
199#include <net/pktsched/pktsched_hfsc.h>
200#include <net/pktsched/pktsched_qfq.h>
201
202#ifdef __cplusplus
203extern "C" {
204#endif
205
206struct if_ifclassq_stats {
207 u_int32_t ifqs_len;
208 u_int32_t ifqs_maxlen;
209 struct pktcntr ifqs_xmitcnt;
210 struct pktcntr ifqs_dropcnt;
211 u_int32_t ifqs_scheduler;
212 union {
213 struct priq_classstats ifqs_priq_stats;
214 struct fairq_classstats ifqs_fairq_stats;
215 struct tcq_classstats ifqs_tcq_stats;
216 struct cbq_classstats ifqs_cbq_stats;
217 struct hfsc_classstats ifqs_hfsc_stats;
218 struct qfq_classstats ifqs_qfq_stats;
219 };
220} __attribute__((aligned(8)));
221
222#ifdef __cplusplus
223}
224#endif
225
226#ifdef BSD_KERNEL_PRIVATE
227/*
228 * For ifclassq lock
229 */
230#define IFCQ_LOCK_ASSERT_HELD(_ifcq) \
231 lck_mtx_assert(&(_ifcq)->ifcq_lock, LCK_MTX_ASSERT_OWNED)
232
233#define IFCQ_LOCK_ASSERT_NOTHELD(_ifcq) \
234 lck_mtx_assert(&(_ifcq)->ifcq_lock, LCK_MTX_ASSERT_NOTOWNED)
235
236#define IFCQ_LOCK(_ifcq) \
237 lck_mtx_lock(&(_ifcq)->ifcq_lock)
238
239#define IFCQ_LOCK_SPIN(_ifcq) \
240 lck_mtx_lock_spin(&(_ifcq)->ifcq_lock)
241
242#define IFCQ_CONVERT_LOCK(_ifcq) do { \
243 IFCQ_LOCK_ASSERT_HELD(_ifcq); \
244 lck_mtx_convert_spin(&(_ifcq)->ifcq_lock); \
245} while (0)
246
247#define IFCQ_UNLOCK(_ifcq) \
248 lck_mtx_unlock(&(_ifcq)->ifcq_lock)
249
250/*
251 * For ifclassq operations
252 */
253#define IFCQ_ENQUEUE(_ifq, _m, _err) do { \
254 (_err) = (*(_ifq)->ifcq_enqueue)(_ifq, _m); \
255} while (0)
256
257#define IFCQ_DEQUEUE(_ifq, _m) do { \
258 (_m) = (*(_ifq)->ifcq_dequeue)(_ifq, CLASSQDQ_REMOVE); \
259} while (0)
260
261#define IFCQ_DEQUEUE_SC(_ifq, _sc, _m) do { \
262 (_m) = (*(_ifq)->ifcq_dequeue_sc)(_ifq, _sc, CLASSQDQ_REMOVE); \
263} while (0)
264
265#define IFCQ_TBR_DEQUEUE(_ifcq, _m) do { \
266 (_m) = ifclassq_tbr_dequeue(_ifcq, CLASSQDQ_REMOVE); \
267} while (0)
268
269#define IFCQ_TBR_DEQUEUE_SC(_ifcq, _sc, _m) do { \
270 (_m) = ifclassq_tbr_dequeue_sc(_ifcq, CLASSQDQ_REMOVE, _sc); \
271} while (0)
272
273#define IFCQ_POLL(_ifq, _m) do { \
274 (_m) = (*(_ifq)->ifcq_dequeue)(_ifq, CLASSQDQ_POLL); \
275} while (0)
276
277#define IFCQ_POLL_SC(_ifq, _sc, _m) do { \
278 (_m) = (*(_ifq)->ifcq_dequeue_sc)(_ifq, _sc, CLASSQDQ_POLL); \
279} while (0)
280
281#define IFCQ_TBR_POLL(_ifcq, _m) do { \
282 (_m) = ifclassq_tbr_dequeue(_ifcq, CLASSQDQ_POLL); \
283} while (0)
284
285#define IFCQ_TBR_POLL_SC(_ifcq, _sc, _m) do { \
286 (_m) = ifclassq_tbr_dequeue_sc(_ifcq, CLASSQDQ_POLL, _sc); \
287} while (0)
288
289#define IFCQ_PURGE(_ifq) do { \
290 (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_PURGE, NULL); \
291} while (0)
292
293#define IFCQ_PURGE_SC(_ifq, _sc, _flow, _packets, _bytes) do { \
294 cqrq_purge_sc_t _req = { _sc, _flow, 0, 0 }; \
295 (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_PURGE_SC, &_req); \
296 (_packets) = _req.packets; \
297 (_bytes) = _req.bytes; \
298} while (0)
299
300#define IFCQ_UPDATE(_ifq, _ev) do { \
301 (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_EVENT, \
302 (void *)(_ev)); \
303} while (0)
304
305#define IFCQ_SET_THROTTLE(_ifq, _level, _err) do { \
306 cqrq_throttle_t _req = { 1, _level }; \
307 (_err) = (*(_ifq)->ifcq_request) \
308 (_ifq, CLASSQRQ_THROTTLE, &_req); \
309} while (0)
310
311#define IFCQ_GET_THROTTLE(_ifq, _level, _err) do { \
312 cqrq_throttle_t _req = { 0, IFNET_THROTTLE_OFF }; \
313 (_err) = (*(_ifq)->ifcq_request) \
314 (_ifq, CLASSQRQ_THROTTLE, &_req); \
315 (_level) = _req.level; \
316} while (0)
317
318#define IFCQ_LEN(_ifcq) ((_ifcq)->ifcq_len)
319#define IFCQ_QFULL(_ifcq) (IFCQ_LEN(_ifcq) >= (_ifcq)->ifcq_maxlen)
320#define IFCQ_IS_EMPTY(_ifcq) (IFCQ_LEN(_ifcq) == 0)
321#define IFCQ_INC_LEN(_ifcq) (IFCQ_LEN(_ifcq)++)
322#define IFCQ_DEC_LEN(_ifcq) (IFCQ_LEN(_ifcq)--)
323#define IFCQ_MAXLEN(_ifcq) ((_ifcq)->ifcq_maxlen)
324#define IFCQ_SET_MAXLEN(_ifcq, _len) ((_ifcq)->ifcq_maxlen = (_len))
325
326#define IFCQ_XMIT_ADD(_ifcq, _pkt, _len) do { \
327 PKTCNTR_ADD(&(_ifcq)->ifcq_xmitcnt, _pkt, _len); \
328} while (0)
329
330#define IFCQ_DROP_ADD(_ifcq, _pkt, _len) do { \
331 PKTCNTR_ADD(&(_ifcq)->ifcq_dropcnt, _pkt, _len); \
332} while (0)
333
334extern int ifclassq_setup(struct ifnet *, u_int32_t, boolean_t);
335extern void ifclassq_teardown(struct ifnet *);
336extern int ifclassq_pktsched_setup(struct ifclassq *);
337extern void ifclassq_set_maxlen(struct ifclassq *, u_int32_t);
338extern u_int32_t ifclassq_get_maxlen(struct ifclassq *);
339extern u_int32_t ifclassq_get_len(struct ifclassq *);
340extern errno_t ifclassq_enqueue(struct ifclassq *, struct mbuf *);
341extern errno_t ifclassq_dequeue(struct ifclassq *, u_int32_t, struct mbuf **,
342 struct mbuf **, u_int32_t *, u_int32_t *);
343extern errno_t ifclassq_dequeue_sc(struct ifclassq *, mbuf_svc_class_t,
344 u_int32_t, struct mbuf **, struct mbuf **, u_int32_t *, u_int32_t *);
345extern struct mbuf *ifclassq_poll(struct ifclassq *);
346extern struct mbuf *ifclassq_poll_sc(struct ifclassq *, mbuf_svc_class_t);
347extern void ifclassq_update(struct ifclassq *, cqev_t);
348extern int ifclassq_attach(struct ifclassq *, u_int32_t, void *,
349 ifclassq_enq_func, ifclassq_deq_func, ifclassq_deq_sc_func,
350 ifclassq_req_func);
351extern int ifclassq_detach(struct ifclassq *);
352extern int ifclassq_getqstats(struct ifclassq *, u_int32_t,
353 void *, u_int32_t *);
354extern const char *ifclassq_ev2str(cqev_t);
355extern int ifclassq_tbr_set(struct ifclassq *, struct tb_profile *, boolean_t);
356extern struct mbuf *ifclassq_tbr_dequeue(struct ifclassq *, int);
357extern struct mbuf *ifclassq_tbr_dequeue_sc(struct ifclassq *, int,
358 mbuf_svc_class_t);
359#endif /* BSD_KERNEL_PRIVATE */
360#endif /* PRIVATE */
361#endif /* _NET_CLASSQ_IF_CLASSQ_H_ */