2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #ifndef _NET_CLASSQ_IF_CLASSQ_H_
30 #define _NET_CLASSQ_IF_CLASSQ_H_
33 #define IFCQ_SC_MAX 10 /* max number of queues */
35 #ifdef BSD_KERNEL_PRIVATE
36 #include <net/classq/classq.h>
37 /* classq dequeue op arg */
38 typedef enum cqdq_op
{
39 CLASSQDQ_REMOVE
= 1, /* dequeue mbuf from the queue */
40 CLASSQDQ_POLL
= 2, /* don't dequeue mbuf from the queue */
43 /* classq request types */
45 CLASSQRQ_PURGE
= 1, /* purge all packets */
46 CLASSQRQ_PURGE_SC
= 2, /* purge service class (and flow) */
47 CLASSQRQ_EVENT
= 3, /* interface events */
48 CLASSQRQ_THROTTLE
= 4, /* throttle packets */
49 CLASSQRQ_STAT_SC
= 5, /* get service class queue stats */
52 /* classq purge_sc request argument */
53 typedef struct cqrq_purge_sc
{
54 mbuf_svc_class_t sc
; /* (in) service class */
55 u_int32_t flow
; /* (in) 0 means all flows */
56 u_int32_t packets
; /* (out) purged packets */
57 u_int32_t bytes
; /* (out) purged bytes */
60 /* classq throttle request argument */
61 typedef struct cqrq_throttle
{
62 u_int32_t set
; /* set or get */
63 u_int32_t level
; /* (in/out) throttling level */
66 /* classq service class stats request argument */
67 typedef struct cqrq_stat_sc
{
68 mbuf_svc_class_t sc
; /* (in) service class */
69 u_int32_t packets
; /* (out) packets enqueued */
70 u_int32_t bytes
; /* (out) bytes enqueued */
74 #include <net/altq/if_altq.h>
78 * A token-bucket regulator limits the rate that a network driver can
79 * dequeue packets from the output queue. Modern cards are able to buffer
80 * a large amount of packets and dequeue too many packets at a time. This
81 * bursty dequeue behavior makes it impossible to schedule packets by
82 * queueing disciplines. A token-bucket is used to control the burst size
83 * in a device independent manner.
86 u_int64_t tbr_rate_raw
; /* (unscaled) token bucket rate */
87 u_int32_t tbr_percent
; /* token bucket rate in percentage */
88 int64_t tbr_rate
; /* (scaled) token bucket rate */
89 int64_t tbr_depth
; /* (scaled) token bucket depth */
91 int64_t tbr_token
; /* (scaled) current token */
92 int64_t tbr_filluptime
; /* (scaled) time to fill up bucket */
93 u_int64_t tbr_last
; /* last time token was updated */
95 int tbr_lastop
; /* last dequeue operation type */
96 /* needed for poll-and-dequeue */
99 /* simple token bucket meter profile */
101 u_int64_t rate
; /* rate in bit-per-sec */
102 u_int32_t percent
; /* rate in percentage */
103 u_int32_t depth
; /* depth in bytes */
110 typedef int (*ifclassq_enq_func
)(struct ifclassq
*, struct mbuf
*);
111 typedef struct mbuf
*(*ifclassq_deq_func
)(struct ifclassq
*, enum cqdq_op
);
112 typedef struct mbuf
*(*ifclassq_deq_sc_func
)(struct ifclassq
*,
113 mbuf_svc_class_t
, enum cqdq_op
);
114 typedef int (*ifclassq_req_func
)(struct ifclassq
*, enum cqrq
, void *);
117 * Structure defining a queue for a network interface.
120 decl_lck_mtx_data(, ifcq_lock
);
122 struct ifnet
*ifcq_ifp
; /* back pointer to interface */
124 u_int32_t ifcq_maxlen
;
125 struct pktcntr ifcq_xmitcnt
;
126 struct pktcntr ifcq_dropcnt
;
128 u_int32_t ifcq_type
; /* scheduler type */
129 u_int32_t ifcq_flags
; /* flags */
130 u_int32_t ifcq_sflags
; /* scheduler flags */
131 u_int32_t ifcq_target_qdelay
; /* target queue delay */
132 void *ifcq_disc
; /* for scheduler-specific use */
134 * ifcq_disc_slots[] represents the leaf classes configured for the
135 * corresponding discpline/scheduler, ordered by their corresponding
136 * service class index. Each slot holds the queue ID used to identify
137 * the class instance, as well as the class instance pointer itself.
138 * The latter is used during enqueue and dequeue in order to avoid the
139 * costs associated with looking up the class pointer based on the
140 * queue ID. The queue ID is used when querying the statistics from
143 * Avoiding the use of queue ID during enqueue and dequeue is made
144 * possible by virtue of knowing the particular mbuf service class
145 * associated with the packets. The service class index of the
146 * packet is used as the index to ifcq_disc_slots[].
148 * ifcq_disc_slots[] therefore also acts as a lookup table which
149 * provides for the mapping between MBUF_SC values and the actual
152 struct ifclassq_disc_slot
{
155 } ifcq_disc_slots
[IFCQ_SC_MAX
]; /* for discipline use */
157 ifclassq_enq_func ifcq_enqueue
;
158 ifclassq_deq_func ifcq_dequeue
;
159 ifclassq_deq_sc_func ifcq_dequeue_sc
;
160 ifclassq_req_func ifcq_request
;
162 /* token bucket regulator */
163 struct tb_regulator ifcq_tbr
; /* TBR */
166 u_int32_t ifcq_drain
;
167 struct ifaltq ifcq_altq
;
172 #define IFCQ_ALTQ(_ifcq) (&(_ifcq)->ifcq_altq)
173 #define IFCQ_IS_DRAINING(_ifcq) ((_ifcq)->ifcq_drain > 0)
177 #define IFCQF_READY 0x01 /* ifclassq supports discipline */
178 #define IFCQF_ENABLED 0x02 /* ifclassq is in use */
179 #define IFCQF_TBR 0x04 /* Token Bucket Regulator is in use */
181 #define IFCQ_IS_READY(_ifcq) ((_ifcq)->ifcq_flags & IFCQF_READY)
182 #define IFCQ_IS_ENABLED(_ifcq) ((_ifcq)->ifcq_flags & IFCQF_ENABLED)
183 #define IFCQ_TBR_IS_ENABLED(_ifcq) ((_ifcq)->ifcq_flags & IFCQF_TBR)
185 /* classq enqueue return value */
186 #define CLASSQEQ_DROPPED (-1) /* packet dropped (freed) */
187 #define CLASSQEQ_SUCCESS 0 /* success, packet enqueued */
188 #define CLASSQEQ_SUCCESS_FC 1 /* packet enqueued; */
189 /* give flow control feedback */
190 #define CLASSQEQ_DROPPED_FC 2 /* packet dropped; */
191 /* give flow control feedback */
192 #define CLASSQEQ_DROPPED_SP 3 /* packet dropped due to suspension; */
193 /* give flow control feedback */
195 /* interface event argument for CLASSQRQ_EVENT */
197 CLASSQ_EV_LINK_BANDWIDTH
= 1, /* link bandwidth has changed */
198 CLASSQ_EV_LINK_LATENCY
= 2, /* link latency has changed */
199 CLASSQ_EV_LINK_MTU
= 3, /* link MTU has changed */
200 CLASSQ_EV_LINK_UP
= 4, /* link is now up */
201 CLASSQ_EV_LINK_DOWN
= 5, /* link is now down */
203 #endif /* BSD_KERNEL_PRIVATE */
205 #include <net/pktsched/pktsched_priq.h>
206 #include <net/pktsched/pktsched_fairq.h>
207 #include <net/pktsched/pktsched_tcq.h>
208 #include <net/pktsched/pktsched_cbq.h>
209 #include <net/pktsched/pktsched_hfsc.h>
210 #include <net/pktsched/pktsched_qfq.h>
216 struct if_ifclassq_stats
{
218 u_int32_t ifqs_maxlen
;
219 struct pktcntr ifqs_xmitcnt
;
220 struct pktcntr ifqs_dropcnt
;
221 u_int32_t ifqs_scheduler
;
223 struct priq_classstats ifqs_priq_stats
;
224 struct fairq_classstats ifqs_fairq_stats
;
225 struct tcq_classstats ifqs_tcq_stats
;
226 struct cbq_classstats ifqs_cbq_stats
;
227 struct hfsc_classstats ifqs_hfsc_stats
;
228 struct qfq_classstats ifqs_qfq_stats
;
230 } __attribute__((aligned(8)));
236 #ifdef BSD_KERNEL_PRIVATE
240 #define IFCQ_LOCK_ASSERT_HELD(_ifcq) \
241 lck_mtx_assert(&(_ifcq)->ifcq_lock, LCK_MTX_ASSERT_OWNED)
243 #define IFCQ_LOCK_ASSERT_NOTHELD(_ifcq) \
244 lck_mtx_assert(&(_ifcq)->ifcq_lock, LCK_MTX_ASSERT_NOTOWNED)
246 #define IFCQ_LOCK(_ifcq) \
247 lck_mtx_lock(&(_ifcq)->ifcq_lock)
249 #define IFCQ_LOCK_SPIN(_ifcq) \
250 lck_mtx_lock_spin(&(_ifcq)->ifcq_lock)
252 #define IFCQ_CONVERT_LOCK(_ifcq) do { \
253 IFCQ_LOCK_ASSERT_HELD(_ifcq); \
254 lck_mtx_convert_spin(&(_ifcq)->ifcq_lock); \
257 #define IFCQ_UNLOCK(_ifcq) \
258 lck_mtx_unlock(&(_ifcq)->ifcq_lock)
261 * For ifclassq operations
263 #define IFCQ_ENQUEUE(_ifq, _m, _err) do { \
264 (_err) = (*(_ifq)->ifcq_enqueue)(_ifq, _m); \
267 #define IFCQ_DEQUEUE(_ifq, _m) do { \
268 (_m) = (*(_ifq)->ifcq_dequeue)(_ifq, CLASSQDQ_REMOVE); \
271 #define IFCQ_DEQUEUE_SC(_ifq, _sc, _m) do { \
272 (_m) = (*(_ifq)->ifcq_dequeue_sc)(_ifq, _sc, CLASSQDQ_REMOVE); \
275 #define IFCQ_TBR_DEQUEUE(_ifcq, _m) do { \
276 (_m) = ifclassq_tbr_dequeue(_ifcq, CLASSQDQ_REMOVE); \
279 #define IFCQ_TBR_DEQUEUE_SC(_ifcq, _sc, _m) do { \
280 (_m) = ifclassq_tbr_dequeue_sc(_ifcq, CLASSQDQ_REMOVE, _sc); \
283 #define IFCQ_POLL(_ifq, _m) do { \
284 (_m) = (*(_ifq)->ifcq_dequeue)(_ifq, CLASSQDQ_POLL); \
287 #define IFCQ_POLL_SC(_ifq, _sc, _m) do { \
288 (_m) = (*(_ifq)->ifcq_dequeue_sc)(_ifq, _sc, CLASSQDQ_POLL); \
291 #define IFCQ_TBR_POLL(_ifcq, _m) do { \
292 (_m) = ifclassq_tbr_dequeue(_ifcq, CLASSQDQ_POLL); \
295 #define IFCQ_TBR_POLL_SC(_ifcq, _sc, _m) do { \
296 (_m) = ifclassq_tbr_dequeue_sc(_ifcq, CLASSQDQ_POLL, _sc); \
299 #define IFCQ_PURGE(_ifq) do { \
300 (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_PURGE, NULL); \
303 #define IFCQ_PURGE_SC(_ifq, _sc, _flow, _packets, _bytes) do { \
304 cqrq_purge_sc_t _req = { _sc, _flow, 0, 0 }; \
305 (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_PURGE_SC, &_req); \
306 (_packets) = _req.packets; \
307 (_bytes) = _req.bytes; \
310 #define IFCQ_UPDATE(_ifq, _ev) do { \
311 (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_EVENT, \
315 #define IFCQ_SET_THROTTLE(_ifq, _level, _err) do { \
316 cqrq_throttle_t _req = { 1, _level }; \
317 (_err) = (*(_ifq)->ifcq_request) \
318 (_ifq, CLASSQRQ_THROTTLE, &_req); \
321 #define IFCQ_GET_THROTTLE(_ifq, _level, _err) do { \
322 cqrq_throttle_t _req = { 0, IFNET_THROTTLE_OFF }; \
323 (_err) = (*(_ifq)->ifcq_request) \
324 (_ifq, CLASSQRQ_THROTTLE, &_req); \
325 (_level) = _req.level; \
328 #define IFCQ_LEN_SC(_ifq, _sc, _packets, _bytes, _err) do { \
329 cqrq_stat_sc_t _req = { _sc, 0, 0 }; \
330 (_err) = (*(ifq)->ifcq_request)(_ifq, CLASSQRQ_STAT_SC, &_req); \
331 if ((_packets) != NULL) \
332 (*(_packets)) = _req.packets; \
333 if ((_bytes) != NULL) \
334 (*(_bytes)) = _req.bytes; \
337 #define IFCQ_LEN(_ifcq) ((_ifcq)->ifcq_len)
338 #define IFCQ_QFULL(_ifcq) (IFCQ_LEN(_ifcq) >= (_ifcq)->ifcq_maxlen)
339 #define IFCQ_IS_EMPTY(_ifcq) (IFCQ_LEN(_ifcq) == 0)
340 #define IFCQ_INC_LEN(_ifcq) (IFCQ_LEN(_ifcq)++)
341 #define IFCQ_DEC_LEN(_ifcq) (IFCQ_LEN(_ifcq)--)
342 #define IFCQ_MAXLEN(_ifcq) ((_ifcq)->ifcq_maxlen)
343 #define IFCQ_SET_MAXLEN(_ifcq, _len) ((_ifcq)->ifcq_maxlen = (_len))
344 #define IFCQ_TARGET_QDELAY(_ifcq) ((_ifcq)->ifcq_target_qdelay)
346 #define IFCQ_XMIT_ADD(_ifcq, _pkt, _len) do { \
347 PKTCNTR_ADD(&(_ifcq)->ifcq_xmitcnt, _pkt, _len); \
350 #define IFCQ_DROP_ADD(_ifcq, _pkt, _len) do { \
351 PKTCNTR_ADD(&(_ifcq)->ifcq_dropcnt, _pkt, _len); \
354 extern int ifclassq_setup(struct ifnet
*, u_int32_t
, boolean_t
);
355 extern void ifclassq_teardown(struct ifnet
*);
356 extern int ifclassq_pktsched_setup(struct ifclassq
*);
357 extern void ifclassq_set_maxlen(struct ifclassq
*, u_int32_t
);
358 extern u_int32_t
ifclassq_get_maxlen(struct ifclassq
*);
359 extern int ifclassq_get_len(struct ifclassq
*, mbuf_svc_class_t
,
360 u_int32_t
*, u_int32_t
*);
361 extern errno_t
ifclassq_enqueue(struct ifclassq
*, struct mbuf
*);
362 extern errno_t
ifclassq_dequeue(struct ifclassq
*, u_int32_t
, struct mbuf
**,
363 struct mbuf
**, u_int32_t
*, u_int32_t
*);
364 extern errno_t
ifclassq_dequeue_sc(struct ifclassq
*, mbuf_svc_class_t
,
365 u_int32_t
, struct mbuf
**, struct mbuf
**, u_int32_t
*, u_int32_t
*);
366 extern struct mbuf
*ifclassq_poll(struct ifclassq
*);
367 extern struct mbuf
*ifclassq_poll_sc(struct ifclassq
*, mbuf_svc_class_t
);
368 extern void ifclassq_update(struct ifclassq
*, cqev_t
);
369 extern int ifclassq_attach(struct ifclassq
*, u_int32_t
, void *,
370 ifclassq_enq_func
, ifclassq_deq_func
, ifclassq_deq_sc_func
,
372 extern int ifclassq_detach(struct ifclassq
*);
373 extern int ifclassq_getqstats(struct ifclassq
*, u_int32_t
,
374 void *, u_int32_t
*);
375 extern const char *ifclassq_ev2str(cqev_t
);
376 extern int ifclassq_tbr_set(struct ifclassq
*, struct tb_profile
*, boolean_t
);
377 extern struct mbuf
*ifclassq_tbr_dequeue(struct ifclassq
*, int);
378 extern struct mbuf
*ifclassq_tbr_dequeue_sc(struct ifclassq
*, int,
380 #endif /* BSD_KERNEL_PRIVATE */
382 #endif /* _NET_CLASSQ_IF_CLASSQ_H_ */