2  * Copyright (c) 2011-2013 Apple Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  29 #ifndef _NET_CLASSQ_IF_CLASSQ_H_ 
  30 #define _NET_CLASSQ_IF_CLASSQ_H_ 
  33 #define IFCQ_SC_MAX             10              /* max number of queues */ 
  35 #ifdef BSD_KERNEL_PRIVATE 
  36 #include <net/classq/classq.h> 
  37 /* classq dequeue op arg */ 
  38 typedef enum cqdq_op 
{ 
  39         CLASSQDQ_REMOVE 
=       1,      /* dequeue mbuf from the queue */ 
  40         CLASSQDQ_POLL 
=         2,      /* don't dequeue mbuf from the queue */ 
  43 /* classq request types */ 
  45         CLASSQRQ_PURGE 
=        1,      /* purge all packets */ 
  46         CLASSQRQ_PURGE_SC 
=     2,      /* purge service class (and flow) */ 
  47         CLASSQRQ_EVENT 
=        3,      /* interface events */ 
  48         CLASSQRQ_THROTTLE 
=     4,      /* throttle packets */ 
  49         CLASSQRQ_STAT_SC 
=      5,      /* get service class queue stats */ 
  52 /* classq purge_sc request argument */ 
  53 typedef struct cqrq_purge_sc 
{ 
  54         mbuf_svc_class_t        sc
;     /* (in) service class */ 
  55         u_int32_t               flow
;   /* (in) 0 means all flows */ 
  56         u_int32_t               packets
; /* (out) purged packets */ 
  57         u_int32_t               bytes
;  /* (out) purged bytes */ 
  60 /* classq throttle request argument */ 
  61 typedef struct cqrq_throttle 
{ 
  62         u_int32_t               set
;    /* set or get */ 
  63         u_int32_t               level
;  /* (in/out) throttling level */ 
  66 /* classq service class stats request argument */ 
  67 typedef struct cqrq_stat_sc 
{ 
  68         mbuf_svc_class_t        sc
;     /* (in) service class */ 
  69         u_int32_t               packets
; /* (out) packets enqueued */ 
  70         u_int32_t               bytes
;  /* (out) bytes enqueued */ 
  74 #include <net/altq/if_altq.h> 
  78  * A token-bucket regulator limits the rate that a network driver can 
  79  * dequeue packets from the output queue.  Modern cards are able to buffer 
  80  * a large amount of packets and dequeue too many packets at a time.  This 
  81  * bursty dequeue behavior makes it impossible to schedule packets by 
  82  * queueing disciplines.  A token-bucket is used to control the burst size 
  83  * in a device independent manner. 
  86         u_int64_t       tbr_rate_raw
;   /* (unscaled) token bucket rate */ 
  87         u_int32_t       tbr_percent
;    /* token bucket rate in percentage */ 
  88         int64_t         tbr_rate
;       /* (scaled) token bucket rate */ 
  89         int64_t         tbr_depth
;      /* (scaled) token bucket depth */ 
  91         int64_t         tbr_token
;      /* (scaled) current token */ 
  92         int64_t         tbr_filluptime
; /* (scaled) time to fill up bucket */ 
  93         u_int64_t       tbr_last
;       /* last time token was updated */ 
  95         int             tbr_lastop
;     /* last dequeue operation type */ 
  96                                         /*   needed for poll-and-dequeue */ 
  99 /* simple token bucket meter profile */ 
 101         u_int64_t       rate
;   /* rate in bit-per-sec */ 
 102         u_int32_t       percent
; /* rate in percentage */ 
 103         u_int32_t       depth
;  /* depth in bytes */ 
 110 typedef int (*ifclassq_enq_func
)(struct ifclassq 
*, struct mbuf 
*); 
 111 typedef struct mbuf 
*(*ifclassq_deq_func
)(struct ifclassq 
*, enum cqdq_op
); 
 112 typedef struct mbuf 
*(*ifclassq_deq_sc_func
)(struct ifclassq 
*, 
 113     mbuf_svc_class_t
, enum cqdq_op
); 
 114 typedef int (*ifclassq_req_func
)(struct ifclassq 
*, enum cqrq
, void *); 
 117  * Structure defining a queue for a network interface. 
 120         decl_lck_mtx_data(, ifcq_lock
); 
 122         struct ifnet    
*ifcq_ifp
;      /* back pointer to interface */ 
 123         u_int32_t       ifcq_len
;       /* packet count */ 
 124         u_int32_t       ifcq_maxlen
; 
 125         struct pktcntr  ifcq_xmitcnt
; 
 126         struct pktcntr  ifcq_dropcnt
; 
 128         u_int32_t       ifcq_type
;      /* scheduler type */ 
 129         u_int32_t       ifcq_flags
;     /* flags */ 
 130         u_int32_t       ifcq_sflags
;    /* scheduler flags */ 
 131         u_int32_t       ifcq_target_qdelay
; /* target queue delay */ 
 132         u_int32_t       ifcq_bytes
;     /* bytes count */ 
 133         void            *ifcq_disc
;     /* for scheduler-specific use */ 
 135          * ifcq_disc_slots[] represents the leaf classes configured for the 
 136          * corresponding discpline/scheduler, ordered by their corresponding 
 137          * service class index.  Each slot holds the queue ID used to identify 
 138          * the class instance, as well as the class instance pointer itself. 
 139          * The latter is used during enqueue and dequeue in order to avoid the 
 140          * costs associated with looking up the class pointer based on the 
 141          * queue ID.  The queue ID is used when querying the statistics from 
 144          * Avoiding the use of queue ID during enqueue and dequeue is made 
 145          * possible by virtue of knowing the particular mbuf service class 
 146          * associated with the packets.  The service class index of the 
 147          * packet is used as the index to ifcq_disc_slots[]. 
 149          * ifcq_disc_slots[] therefore also acts as a lookup table which 
 150          * provides for the mapping between MBUF_SC values and the actual 
 153         struct ifclassq_disc_slot 
{ 
 156         } ifcq_disc_slots
[IFCQ_SC_MAX
]; /* for discipline use */ 
 158         ifclassq_enq_func       ifcq_enqueue
; 
 159         ifclassq_deq_func       ifcq_dequeue
; 
 160         ifclassq_deq_sc_func    ifcq_dequeue_sc
; 
 161         ifclassq_req_func       ifcq_request
; 
 163         /* token bucket regulator */ 
 164         struct tb_regulator     ifcq_tbr
;       /* TBR */ 
 167         u_int32_t       ifcq_drain
; 
 168         struct ifaltq   ifcq_altq
; 
 173 #define IFCQ_ALTQ(_ifcq)                (&(_ifcq)->ifcq_altq) 
 174 #define IFCQ_IS_DRAINING(_ifcq)         ((_ifcq)->ifcq_drain > 0) 
 178 #define IFCQF_READY      0x01           /* ifclassq supports discipline */ 
 179 #define IFCQF_ENABLED    0x02           /* ifclassq is in use */ 
 180 #define IFCQF_TBR        0x04           /* Token Bucket Regulator is in use */ 
 182 #define IFCQ_IS_READY(_ifcq)            ((_ifcq)->ifcq_flags & IFCQF_READY) 
 183 #define IFCQ_IS_ENABLED(_ifcq)          ((_ifcq)->ifcq_flags & IFCQF_ENABLED) 
 184 #define IFCQ_TBR_IS_ENABLED(_ifcq)      ((_ifcq)->ifcq_flags & IFCQF_TBR) 
 186 /* classq enqueue return value */ 
 187 #define CLASSQEQ_DROPPED        (-1)    /* packet dropped (freed)  */ 
 188 #define CLASSQEQ_SUCCESS        0       /* success, packet enqueued */ 
 189 #define CLASSQEQ_SUCCESS_FC     1       /* packet enqueued; */ 
 190                                         /*   give flow control feedback */ 
 191 #define CLASSQEQ_DROPPED_FC     2       /* packet dropped; */ 
 192                                         /*  give flow control feedback */ 
 193 #define CLASSQEQ_DROPPED_SP     3       /* packet dropped due to suspension; */ 
 194                                         /*  give flow control feedback */ 
 196 /* interface event argument for CLASSQRQ_EVENT */ 
 198         CLASSQ_EV_LINK_BANDWIDTH 
= 1,   /* link bandwidth has changed */ 
 199         CLASSQ_EV_LINK_LATENCY 
= 2,     /* link latency has changed */ 
 200         CLASSQ_EV_LINK_MTU 
=    3,      /* link MTU has changed */ 
 201         CLASSQ_EV_LINK_UP 
=     4,      /* link is now up */ 
 202         CLASSQ_EV_LINK_DOWN 
=   5,      /* link is now down */ 
 204 #endif /* BSD_KERNEL_PRIVATE */ 
 206 #include <net/pktsched/pktsched_priq.h> 
 207 #include <net/pktsched/pktsched_fairq.h> 
 208 #include <net/pktsched/pktsched_tcq.h> 
 209 #include <net/pktsched/pktsched_cbq.h> 
 210 #include <net/pktsched/pktsched_hfsc.h> 
 211 #include <net/pktsched/pktsched_qfq.h> 
 217 struct if_ifclassq_stats 
{ 
 219         u_int32_t       ifqs_maxlen
; 
 220         struct pktcntr  ifqs_xmitcnt
; 
 221         struct pktcntr  ifqs_dropcnt
; 
 222         u_int32_t       ifqs_scheduler
; 
 224                 struct priq_classstats  ifqs_priq_stats
; 
 225                 struct fairq_classstats ifqs_fairq_stats
; 
 226                 struct tcq_classstats   ifqs_tcq_stats
; 
 227                 struct cbq_classstats   ifqs_cbq_stats
; 
 228                 struct hfsc_classstats  ifqs_hfsc_stats
; 
 229                 struct qfq_classstats   ifqs_qfq_stats
; 
 231 } __attribute__((aligned(8))); 
 237 #ifdef BSD_KERNEL_PRIVATE 
 241 #define IFCQ_LOCK_ASSERT_HELD(_ifcq)                                    \ 
 242         lck_mtx_assert(&(_ifcq)->ifcq_lock, LCK_MTX_ASSERT_OWNED) 
 244 #define IFCQ_LOCK_ASSERT_NOTHELD(_ifcq)                                 \ 
 245         lck_mtx_assert(&(_ifcq)->ifcq_lock, LCK_MTX_ASSERT_NOTOWNED) 
 247 #define IFCQ_LOCK(_ifcq)                                                \ 
 248         lck_mtx_lock(&(_ifcq)->ifcq_lock) 
 250 #define IFCQ_LOCK_SPIN(_ifcq)                                           \ 
 251         lck_mtx_lock_spin(&(_ifcq)->ifcq_lock) 
 253 #define IFCQ_CONVERT_LOCK(_ifcq) do {                                   \ 
 254         IFCQ_LOCK_ASSERT_HELD(_ifcq);                                   \ 
 255         lck_mtx_convert_spin(&(_ifcq)->ifcq_lock);                      \ 
 258 #define IFCQ_UNLOCK(_ifcq)                                              \ 
 259         lck_mtx_unlock(&(_ifcq)->ifcq_lock) 
 262  * For ifclassq operations 
 264 #define IFCQ_ENQUEUE(_ifq, _m, _err) do {                               \ 
 265         (_err) = (*(_ifq)->ifcq_enqueue)(_ifq, _m);                     \ 
 268 #define IFCQ_DEQUEUE(_ifq, _m) do {                                     \ 
 269         (_m) = (*(_ifq)->ifcq_dequeue)(_ifq, CLASSQDQ_REMOVE);          \ 
 272 #define IFCQ_DEQUEUE_SC(_ifq, _sc, _m) do {                             \ 
 273         (_m) = (*(_ifq)->ifcq_dequeue_sc)(_ifq, _sc, CLASSQDQ_REMOVE);  \ 
 276 #define IFCQ_TBR_DEQUEUE(_ifcq, _m) do {                                \ 
 277         (_m) = ifclassq_tbr_dequeue(_ifcq, CLASSQDQ_REMOVE);            \ 
 280 #define IFCQ_TBR_DEQUEUE_SC(_ifcq, _sc, _m) do {                        \ 
 281         (_m) = ifclassq_tbr_dequeue_sc(_ifcq, CLASSQDQ_REMOVE, _sc);    \ 
 284 #define IFCQ_POLL(_ifq, _m) do {                                        \ 
 285         (_m) = (*(_ifq)->ifcq_dequeue)(_ifq, CLASSQDQ_POLL);            \ 
 288 #define IFCQ_POLL_SC(_ifq, _sc, _m) do {                                \ 
 289         (_m) = (*(_ifq)->ifcq_dequeue_sc)(_ifq, _sc, CLASSQDQ_POLL);    \ 
 292 #define IFCQ_TBR_POLL(_ifcq, _m) do {                                   \ 
 293         (_m) = ifclassq_tbr_dequeue(_ifcq, CLASSQDQ_POLL);              \ 
 296 #define IFCQ_TBR_POLL_SC(_ifcq, _sc, _m) do {                           \ 
 297         (_m) = ifclassq_tbr_dequeue_sc(_ifcq, CLASSQDQ_POLL, _sc);      \ 
 300 #define IFCQ_PURGE(_ifq) do {                                           \ 
 301         (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_PURGE, NULL);     \ 
 304 #define IFCQ_PURGE_SC(_ifq, _sc, _flow, _packets, _bytes) do {          \ 
 305         cqrq_purge_sc_t _req = { _sc, _flow, 0, 0 };                    \ 
 306         (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_PURGE_SC, &_req); \ 
 307         (_packets) = _req.packets;                                      \ 
 308         (_bytes) = _req.bytes;                                          \ 
 311 #define IFCQ_UPDATE(_ifq, _ev) do {                                     \ 
 312         (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_EVENT,            \ 
 316 #define IFCQ_SET_THROTTLE(_ifq, _level, _err) do {                      \ 
 317         cqrq_throttle_t _req = { 1, _level };                           \ 
 318         (_err) = (*(_ifq)->ifcq_request)                                \ 
 319             (_ifq, CLASSQRQ_THROTTLE, &_req);                           \ 
 322 #define IFCQ_GET_THROTTLE(_ifq, _level, _err) do {                      \ 
 323         cqrq_throttle_t _req = { 0, IFNET_THROTTLE_OFF };               \ 
 324         (_err) = (*(_ifq)->ifcq_request)                                \ 
 325             (_ifq, CLASSQRQ_THROTTLE, &_req);                           \ 
 326         (_level) = _req.level;                                          \ 
 329 #define IFCQ_LEN_SC(_ifq, _sc, _packets, _bytes, _err) do {             \ 
 330         cqrq_stat_sc_t _req = { _sc, 0, 0 };                            \ 
 331         (_err) = (*(ifq)->ifcq_request)(_ifq, CLASSQRQ_STAT_SC, &_req); \ 
 332         if ((_packets) != NULL)                                         \ 
 333                 (*(_packets)) = _req.packets;                           \ 
 334         if ((_bytes) != NULL)                                           \ 
 335                 (*(_bytes)) = _req.bytes;                               \ 
 338 #define IFCQ_LEN(_ifcq)         ((_ifcq)->ifcq_len) 
 339 #define IFCQ_QFULL(_ifcq)       (IFCQ_LEN(_ifcq) >= (_ifcq)->ifcq_maxlen) 
 340 #define IFCQ_IS_EMPTY(_ifcq)    (IFCQ_LEN(_ifcq) == 0) 
 341 #define IFCQ_INC_LEN(_ifcq)     (IFCQ_LEN(_ifcq)++) 
 342 #define IFCQ_DEC_LEN(_ifcq)     (IFCQ_LEN(_ifcq)--) 
 343 #define IFCQ_MAXLEN(_ifcq)      ((_ifcq)->ifcq_maxlen) 
 344 #define IFCQ_SET_MAXLEN(_ifcq, _len) ((_ifcq)->ifcq_maxlen = (_len)) 
 345 #define IFCQ_TARGET_QDELAY(_ifcq)       ((_ifcq)->ifcq_target_qdelay) 
 346 #define IFCQ_BYTES(_ifcq)       ((_ifcq)->ifcq_bytes) 
 347 #define IFCQ_INC_BYTES(_ifcq, _len) (IFCQ_BYTES(_ifcq) + _len) 
 348 #define IFCQ_DEC_BYTES(_ifcq, _len) (IFCQ_BYTES(_ifcq) - _len) 
 350 #define IFCQ_XMIT_ADD(_ifcq, _pkt, _len) do {                           \ 
 351         PKTCNTR_ADD(&(_ifcq)->ifcq_xmitcnt, _pkt, _len);                \ 
 354 #define IFCQ_DROP_ADD(_ifcq, _pkt, _len) do {                           \ 
 355         PKTCNTR_ADD(&(_ifcq)->ifcq_dropcnt, _pkt, _len);                \ 
 358 extern int ifclassq_setup(struct ifnet 
*, u_int32_t
, boolean_t
); 
 359 extern void ifclassq_teardown(struct ifnet 
*); 
 360 extern int ifclassq_pktsched_setup(struct ifclassq 
*); 
 361 extern void ifclassq_set_maxlen(struct ifclassq 
*, u_int32_t
); 
 362 extern u_int32_t 
ifclassq_get_maxlen(struct ifclassq 
*); 
 363 extern int ifclassq_get_len(struct ifclassq 
*, mbuf_svc_class_t
, 
 364     u_int32_t 
*, u_int32_t 
*); 
 365 extern errno_t 
ifclassq_enqueue(struct ifclassq 
*, struct mbuf 
*); 
 366 extern errno_t 
ifclassq_dequeue(struct ifclassq 
*, u_int32_t
, struct mbuf 
**, 
 367     struct mbuf 
**, u_int32_t 
*, u_int32_t 
*); 
 368 extern errno_t 
ifclassq_dequeue_sc(struct ifclassq 
*, mbuf_svc_class_t
, 
 369     u_int32_t
, struct mbuf 
**, struct mbuf 
**, u_int32_t 
*, u_int32_t 
*); 
 370 extern struct mbuf 
*ifclassq_poll(struct ifclassq 
*); 
 371 extern struct mbuf 
*ifclassq_poll_sc(struct ifclassq 
*, mbuf_svc_class_t
); 
 372 extern void ifclassq_update(struct ifclassq 
*, cqev_t
); 
 373 extern int ifclassq_attach(struct ifclassq 
*, u_int32_t
, void *, 
 374     ifclassq_enq_func
, ifclassq_deq_func
, ifclassq_deq_sc_func
, 
 376 extern int ifclassq_detach(struct ifclassq 
*); 
 377 extern int ifclassq_getqstats(struct ifclassq 
*, u_int32_t
, 
 378     void *, u_int32_t 
*); 
 379 extern const char *ifclassq_ev2str(cqev_t
); 
 380 extern int ifclassq_tbr_set(struct ifclassq 
*, struct tb_profile 
*, boolean_t
); 
 381 extern struct mbuf 
*ifclassq_tbr_dequeue(struct ifclassq 
*, int); 
 382 extern struct mbuf 
*ifclassq_tbr_dequeue_sc(struct ifclassq 
*, int, 
 384 #endif /* BSD_KERNEL_PRIVATE */ 
 386 #endif /* _NET_CLASSQ_IF_CLASSQ_H_ */