2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #ifndef _NET_CLASSQ_IF_CLASSQ_H_
30 #define _NET_CLASSQ_IF_CLASSQ_H_
33 #define IFCQ_SC_MAX 10 /* max number of queues */
35 #ifdef BSD_KERNEL_PRIVATE
36 #include <net/classq/classq.h>
37 /* classq dequeue op arg */
38 typedef enum cqdq_op
{
39 CLASSQDQ_REMOVE
= 1, /* dequeue mbuf from the queue */
40 CLASSQDQ_POLL
= 2, /* don't dequeue mbuf from the queue */
43 /* classq request types */
45 CLASSQRQ_PURGE
= 1, /* purge all packets */
46 CLASSQRQ_PURGE_SC
= 2, /* purge service class (and flow) */
47 CLASSQRQ_EVENT
= 3, /* interface events */
48 CLASSQRQ_THROTTLE
= 4, /* throttle packets */
49 CLASSQRQ_STAT_SC
= 5, /* get service class queue stats */
52 /* classq purge_sc request argument */
53 typedef struct cqrq_purge_sc
{
54 mbuf_svc_class_t sc
; /* (in) service class */
55 u_int32_t flow
; /* (in) 0 means all flows */
56 u_int32_t packets
; /* (out) purged packets */
57 u_int32_t bytes
; /* (out) purged bytes */
60 /* classq throttle request argument */
61 typedef struct cqrq_throttle
{
62 u_int32_t set
; /* set or get */
63 u_int32_t level
; /* (in/out) throttling level */
66 /* classq service class stats request argument */
67 typedef struct cqrq_stat_sc
{
68 mbuf_svc_class_t sc
; /* (in) service class */
69 u_int32_t packets
; /* (out) packets enqueued */
70 u_int32_t bytes
; /* (out) bytes enqueued */
74 #include <net/altq/if_altq.h>
78 * A token-bucket regulator limits the rate that a network driver can
79 * dequeue packets from the output queue. Modern cards are able to buffer
80 * a large amount of packets and dequeue too many packets at a time. This
81 * bursty dequeue behavior makes it impossible to schedule packets by
82 * queueing disciplines. A token-bucket is used to control the burst size
83 * in a device independent manner.
86 u_int64_t tbr_rate_raw
; /* (unscaled) token bucket rate */
87 u_int32_t tbr_percent
; /* token bucket rate in percentage */
88 int64_t tbr_rate
; /* (scaled) token bucket rate */
89 int64_t tbr_depth
; /* (scaled) token bucket depth */
91 int64_t tbr_token
; /* (scaled) current token */
92 int64_t tbr_filluptime
; /* (scaled) time to fill up bucket */
93 u_int64_t tbr_last
; /* last time token was updated */
95 int tbr_lastop
; /* last dequeue operation type */
96 /* needed for poll-and-dequeue */
99 /* simple token bucket meter profile */
101 u_int64_t rate
; /* rate in bit-per-sec */
102 u_int32_t percent
; /* rate in percentage */
103 u_int32_t depth
; /* depth in bytes */
110 typedef int (*ifclassq_enq_func
)(struct ifclassq
*, struct mbuf
*);
111 typedef struct mbuf
*(*ifclassq_deq_func
)(struct ifclassq
*, enum cqdq_op
);
112 typedef struct mbuf
*(*ifclassq_deq_sc_func
)(struct ifclassq
*,
113 mbuf_svc_class_t
, enum cqdq_op
);
114 typedef int (*ifclassq_deq_multi_func
)(struct ifclassq
*, enum cqdq_op
,
115 u_int32_t
, u_int32_t
, struct mbuf
**, struct mbuf
**, u_int32_t
*,
117 typedef int (*ifclassq_req_func
)(struct ifclassq
*, enum cqrq
, void *);
120 * Structure defining a queue for a network interface.
123 decl_lck_mtx_data(, ifcq_lock
);
125 struct ifnet
*ifcq_ifp
; /* back pointer to interface */
126 u_int32_t ifcq_len
; /* packet count */
127 u_int32_t ifcq_maxlen
;
128 struct pktcntr ifcq_xmitcnt
;
129 struct pktcntr ifcq_dropcnt
;
131 u_int32_t ifcq_type
; /* scheduler type */
132 u_int32_t ifcq_flags
; /* flags */
133 u_int32_t ifcq_sflags
; /* scheduler flags */
134 u_int32_t ifcq_target_qdelay
; /* target queue delay */
135 u_int32_t ifcq_bytes
; /* bytes count */
136 void *ifcq_disc
; /* for scheduler-specific use */
138 * ifcq_disc_slots[] represents the leaf classes configured for the
139 * corresponding discpline/scheduler, ordered by their corresponding
140 * service class index. Each slot holds the queue ID used to identify
141 * the class instance, as well as the class instance pointer itself.
142 * The latter is used during enqueue and dequeue in order to avoid the
143 * costs associated with looking up the class pointer based on the
144 * queue ID. The queue ID is used when querying the statistics from
147 * Avoiding the use of queue ID during enqueue and dequeue is made
148 * possible by virtue of knowing the particular mbuf service class
149 * associated with the packets. The service class index of the
150 * packet is used as the index to ifcq_disc_slots[].
152 * ifcq_disc_slots[] therefore also acts as a lookup table which
153 * provides for the mapping between MBUF_SC values and the actual
156 struct ifclassq_disc_slot
{
159 } ifcq_disc_slots
[IFCQ_SC_MAX
]; /* for discipline use */
161 ifclassq_enq_func ifcq_enqueue
;
162 ifclassq_deq_func ifcq_dequeue
;
163 ifclassq_deq_sc_func ifcq_dequeue_sc
;
164 ifclassq_deq_multi_func ifcq_dequeue_multi
;
165 ifclassq_req_func ifcq_request
;
167 /* token bucket regulator */
168 struct tb_regulator ifcq_tbr
; /* TBR */
171 u_int32_t ifcq_drain
;
172 struct ifaltq ifcq_altq
;
177 #define IFCQ_ALTQ(_ifcq) (&(_ifcq)->ifcq_altq)
178 #define IFCQ_IS_DRAINING(_ifcq) ((_ifcq)->ifcq_drain > 0)
182 #define IFCQF_READY 0x01 /* ifclassq supports discipline */
183 #define IFCQF_ENABLED 0x02 /* ifclassq is in use */
184 #define IFCQF_TBR 0x04 /* Token Bucket Regulator is in use */
186 #define IFCQ_IS_READY(_ifcq) ((_ifcq)->ifcq_flags & IFCQF_READY)
187 #define IFCQ_IS_ENABLED(_ifcq) ((_ifcq)->ifcq_flags & IFCQF_ENABLED)
188 #define IFCQ_TBR_IS_ENABLED(_ifcq) ((_ifcq)->ifcq_flags & IFCQF_TBR)
190 /* classq enqueue return value */
191 #define CLASSQEQ_DROPPED (-1) /* packet dropped (freed) */
192 #define CLASSQEQ_SUCCESS 0 /* success, packet enqueued */
193 #define CLASSQEQ_SUCCESS_FC 1 /* packet enqueued; */
194 /* give flow control feedback */
195 #define CLASSQEQ_DROPPED_FC 2 /* packet dropped; */
196 /* give flow control feedback */
197 #define CLASSQEQ_DROPPED_SP 3 /* packet dropped due to suspension; */
198 /* give flow control feedback */
200 /* interface event argument for CLASSQRQ_EVENT */
202 CLASSQ_EV_LINK_BANDWIDTH
= 1, /* link bandwidth has changed */
203 CLASSQ_EV_LINK_LATENCY
= 2, /* link latency has changed */
204 CLASSQ_EV_LINK_MTU
= 3, /* link MTU has changed */
205 CLASSQ_EV_LINK_UP
= 4, /* link is now up */
206 CLASSQ_EV_LINK_DOWN
= 5, /* link is now down */
208 #endif /* BSD_KERNEL_PRIVATE */
210 #include <net/pktsched/pktsched_priq.h>
211 #include <net/pktsched/pktsched_fairq.h>
212 #include <net/pktsched/pktsched_tcq.h>
213 #include <net/pktsched/pktsched_cbq.h>
214 #include <net/pktsched/pktsched_hfsc.h>
215 #include <net/pktsched/pktsched_qfq.h>
216 #include <net/pktsched/pktsched_fq_codel.h>
221 struct if_ifclassq_stats
{
223 u_int32_t ifqs_maxlen
;
224 struct pktcntr ifqs_xmitcnt
;
225 struct pktcntr ifqs_dropcnt
;
226 u_int32_t ifqs_scheduler
;
228 struct priq_classstats ifqs_priq_stats
;
229 struct fairq_classstats ifqs_fairq_stats
;
230 struct tcq_classstats ifqs_tcq_stats
;
231 struct cbq_classstats ifqs_cbq_stats
;
232 struct hfsc_classstats ifqs_hfsc_stats
;
233 struct qfq_classstats ifqs_qfq_stats
;
234 struct fq_codel_classstats ifqs_fq_codel_stats
;
236 } __attribute__((aligned(8)));
242 #ifdef BSD_KERNEL_PRIVATE
246 #define IFCQ_LOCK_ASSERT_HELD(_ifcq) \
247 lck_mtx_assert(&(_ifcq)->ifcq_lock, LCK_MTX_ASSERT_OWNED)
249 #define IFCQ_LOCK_ASSERT_NOTHELD(_ifcq) \
250 lck_mtx_assert(&(_ifcq)->ifcq_lock, LCK_MTX_ASSERT_NOTOWNED)
252 #define IFCQ_LOCK(_ifcq) \
253 lck_mtx_lock(&(_ifcq)->ifcq_lock)
255 #define IFCQ_LOCK_SPIN(_ifcq) \
256 lck_mtx_lock_spin(&(_ifcq)->ifcq_lock)
258 #define IFCQ_CONVERT_LOCK(_ifcq) do { \
259 IFCQ_LOCK_ASSERT_HELD(_ifcq); \
260 lck_mtx_convert_spin(&(_ifcq)->ifcq_lock); \
263 #define IFCQ_UNLOCK(_ifcq) \
264 lck_mtx_unlock(&(_ifcq)->ifcq_lock)
267 * For ifclassq operations
269 #define IFCQ_ENQUEUE(_ifq, _m, _err) do { \
270 (_err) = (*(_ifq)->ifcq_enqueue)(_ifq, _m); \
273 #define IFCQ_DEQUEUE(_ifq, _m) do { \
274 (_m) = (*(_ifq)->ifcq_dequeue)(_ifq, CLASSQDQ_REMOVE); \
277 #define IFCQ_DEQUEUE_SC(_ifq, _sc, _m) do { \
278 (_m) = (*(_ifq)->ifcq_dequeue_sc)(_ifq, _sc, CLASSQDQ_REMOVE); \
281 #define IFCQ_TBR_DEQUEUE(_ifcq, _m) do { \
282 (_m) = ifclassq_tbr_dequeue(_ifcq, CLASSQDQ_REMOVE); \
285 #define IFCQ_TBR_DEQUEUE_SC(_ifcq, _sc, _m) do { \
286 (_m) = ifclassq_tbr_dequeue_sc(_ifcq, CLASSQDQ_REMOVE, _sc); \
289 #define IFCQ_POLL(_ifq, _m) do { \
290 (_m) = (*(_ifq)->ifcq_dequeue)(_ifq, CLASSQDQ_POLL); \
293 #define IFCQ_POLL_SC(_ifq, _sc, _m) do { \
294 (_m) = (*(_ifq)->ifcq_dequeue_sc)(_ifq, _sc, CLASSQDQ_POLL); \
297 #define IFCQ_TBR_POLL(_ifcq, _m) do { \
298 (_m) = ifclassq_tbr_dequeue(_ifcq, CLASSQDQ_POLL); \
301 #define IFCQ_TBR_POLL_SC(_ifcq, _sc, _m) do { \
302 (_m) = ifclassq_tbr_dequeue_sc(_ifcq, CLASSQDQ_POLL, _sc); \
305 #define IFCQ_PURGE(_ifq) do { \
306 (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_PURGE, NULL); \
309 #define IFCQ_PURGE_SC(_ifq, _sc, _flow, _packets, _bytes) do { \
310 cqrq_purge_sc_t _req = { _sc, _flow, 0, 0 }; \
311 (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_PURGE_SC, &_req); \
312 (_packets) = _req.packets; \
313 (_bytes) = _req.bytes; \
316 #define IFCQ_UPDATE(_ifq, _ev) do { \
317 (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_EVENT, \
321 #define IFCQ_SET_THROTTLE(_ifq, _level, _err) do { \
322 cqrq_throttle_t _req = { 1, _level }; \
323 (_err) = (*(_ifq)->ifcq_request) \
324 (_ifq, CLASSQRQ_THROTTLE, &_req); \
327 #define IFCQ_GET_THROTTLE(_ifq, _level, _err) do { \
328 cqrq_throttle_t _req = { 0, IFNET_THROTTLE_OFF }; \
329 (_err) = (*(_ifq)->ifcq_request) \
330 (_ifq, CLASSQRQ_THROTTLE, &_req); \
331 (_level) = _req.level; \
334 #define IFCQ_LEN_SC(_ifq, _sc, _packets, _bytes, _err) do { \
335 cqrq_stat_sc_t _req = { _sc, 0, 0 }; \
336 (_err) = (*(ifq)->ifcq_request)(_ifq, CLASSQRQ_STAT_SC, &_req); \
337 if ((_packets) != NULL) \
338 (*(_packets)) = _req.packets; \
339 if ((_bytes) != NULL) \
340 (*(_bytes)) = _req.bytes; \
343 #define IFCQ_LEN(_ifcq) ((_ifcq)->ifcq_len)
344 #define IFCQ_QFULL(_ifcq) (IFCQ_LEN(_ifcq) >= (_ifcq)->ifcq_maxlen)
345 #define IFCQ_IS_EMPTY(_ifcq) (IFCQ_LEN(_ifcq) == 0)
346 #define IFCQ_INC_LEN(_ifcq) (IFCQ_LEN(_ifcq)++)
347 #define IFCQ_DEC_LEN(_ifcq) (IFCQ_LEN(_ifcq)--)
348 #define IFCQ_MAXLEN(_ifcq) ((_ifcq)->ifcq_maxlen)
349 #define IFCQ_SET_MAXLEN(_ifcq, _len) ((_ifcq)->ifcq_maxlen = (_len))
350 #define IFCQ_TARGET_QDELAY(_ifcq) ((_ifcq)->ifcq_target_qdelay)
351 #define IFCQ_BYTES(_ifcq) ((_ifcq)->ifcq_bytes)
352 #define IFCQ_INC_BYTES(_ifcq, _len) \
353 ((_ifcq)->ifcq_bytes = (_ifcq)->ifcq_bytes + (_len))
354 #define IFCQ_DEC_BYTES(_ifcq, _len) \
355 ((_ifcq)->ifcq_bytes = (_ifcq)->ifcq_bytes - (_len))
357 #define IFCQ_XMIT_ADD(_ifcq, _pkt, _len) do { \
358 PKTCNTR_ADD(&(_ifcq)->ifcq_xmitcnt, _pkt, _len); \
361 #define IFCQ_DROP_ADD(_ifcq, _pkt, _len) do { \
362 PKTCNTR_ADD(&(_ifcq)->ifcq_dropcnt, _pkt, _len); \
365 extern int ifclassq_setup(struct ifnet
*, u_int32_t
, boolean_t
);
366 extern void ifclassq_teardown(struct ifnet
*);
367 extern int ifclassq_pktsched_setup(struct ifclassq
*);
368 extern void ifclassq_set_maxlen(struct ifclassq
*, u_int32_t
);
369 extern u_int32_t
ifclassq_get_maxlen(struct ifclassq
*);
370 extern int ifclassq_get_len(struct ifclassq
*, mbuf_svc_class_t
,
371 u_int32_t
*, u_int32_t
*);
372 extern errno_t
ifclassq_enqueue(struct ifclassq
*, struct mbuf
*);
373 extern errno_t
ifclassq_dequeue(struct ifclassq
*, u_int32_t
, u_int32_t
,
374 struct mbuf
**, struct mbuf
**, u_int32_t
*, u_int32_t
*);
375 extern errno_t
ifclassq_dequeue_sc(struct ifclassq
*, mbuf_svc_class_t
,
376 u_int32_t
, struct mbuf
**, struct mbuf
**, u_int32_t
*, u_int32_t
*);
377 extern struct mbuf
*ifclassq_poll(struct ifclassq
*);
378 extern struct mbuf
*ifclassq_poll_sc(struct ifclassq
*, mbuf_svc_class_t
);
379 extern void ifclassq_update(struct ifclassq
*, cqev_t
);
380 extern int ifclassq_attach(struct ifclassq
*, u_int32_t
, void *,
381 ifclassq_enq_func
, ifclassq_deq_func
, ifclassq_deq_sc_func
,
382 ifclassq_deq_multi_func
, ifclassq_req_func
);
383 extern int ifclassq_detach(struct ifclassq
*);
384 extern int ifclassq_getqstats(struct ifclassq
*, u_int32_t
,
385 void *, u_int32_t
*);
386 extern const char *ifclassq_ev2str(cqev_t
);
387 extern int ifclassq_tbr_set(struct ifclassq
*, struct tb_profile
*, boolean_t
);
388 extern struct mbuf
*ifclassq_tbr_dequeue(struct ifclassq
*, int);
389 extern struct mbuf
*ifclassq_tbr_dequeue_sc(struct ifclassq
*, int,
391 extern void ifclassq_calc_target_qdelay(struct ifnet
*ifp
,
392 u_int64_t
*if_target_qdelay
);
393 extern void ifclassq_calc_update_interval(u_int64_t
*update_interval
);
395 #endif /* BSD_KERNEL_PRIVATE */
397 #endif /* _NET_CLASSQ_IF_CLASSQ_H_ */