2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1998-2002 Luigi Rizzo, Universita` di Pisa
30 * Portions Copyright (c) 2000 Akamba Corp.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 * $FreeBSD: src/sys/netinet/ip_dummynet.c,v 1.84 2004/08/25 09:31:30 pjd Exp $
57 #define DUMMYNET_DEBUG
60 * This module implements IP dummynet, a bandwidth limiter/delay emulator
61 * used in conjunction with the ipfw package.
62 * Description of the data structures used is in ip_dummynet.h
63 * Here you mainly find the following blocks of code:
64 * + variable declarations;
65 * + heap management functions;
66 * + scheduler and dummynet functions;
67 * + configuration and initialization.
69 * NOTA BENE: critical sections are protected by the "dummynet lock".
71 * Most important Changes:
73 * 010124: Fixed WF2Q behaviour
74 * 010122: Fixed spl protection.
75 * 000601: WF2Q support
76 * 000106: large rewrite, use heaps to handle very many pipes.
77 * 980513: initial release
79 * include files marked with XXX are probably not needed
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/malloc.h>
86 #include <sys/queue.h> /* XXX */
87 #include <sys/kernel.h>
88 #include <sys/socket.h>
89 #include <sys/socketvar.h>
91 #include <sys/sysctl.h>
92 //#include <sys/mcache.h>
94 #include <net/route.h>
95 #include <net/kpi_protocol.h>
96 #include <netinet/in.h>
97 #include <netinet/in_systm.h>
98 #include <netinet/in_var.h>
99 #include <netinet/ip.h>
100 #include <netinet/ip_fw.h>
101 #include <netinet/ip_dummynet.h>
102 #include <netinet/ip_var.h>
105 * We keep a private variable for the simulation time, but we could
106 * probably use an existing one ("softticks" in sys/kern/kern_timer.c)
108 static dn_key curr_time
= 0 ; /* current simulation time */
110 /* this is for the timer that fires to call dummynet() - we only enable the timer when
111 there are packets to process, otherwise it's disabled */
112 static int timer_enabled
= 0;
114 static int dn_hash_size
= 64 ; /* default hash size */
116 /* statistics on number of queue searches and search steps */
117 static int searches
, search_steps
;
118 static int pipe_expire
= 1 ; /* expire queue if empty */
119 static int dn_max_ratio
= 16 ; /* max queues/buckets ratio */
121 static int red_lookup_depth
= 256; /* RED - default lookup table depth */
122 static int red_avg_pkt_size
= 512; /* RED - default medium packet size */
123 static int red_max_pkt_size
= 1500; /* RED - default max packet size */
125 static int serialize
= 0;
128 * Three heaps contain queues and pipes that the scheduler handles:
130 * ready_heap contains all dn_flow_queue related to fixed-rate pipes.
132 * wfq_ready_heap contains the pipes associated with WF2Q flows
134 * extract_heap contains pipes associated with delay lines.
137 static struct dn_heap ready_heap
, extract_heap
, wfq_ready_heap
;
139 static int heap_init(struct dn_heap
*h
, int size
) ;
140 static int heap_insert (struct dn_heap
*h
, dn_key key1
, void *p
);
141 static void heap_extract(struct dn_heap
*h
, void *obj
);
144 static void transmit_event(struct dn_pipe
*pipe
, struct mbuf
**head
,
146 static void ready_event(struct dn_flow_queue
*q
, struct mbuf
**head
,
148 static void ready_event_wfq(struct dn_pipe
*p
, struct mbuf
**head
,
152 * Packets are retrieved from queues in Dummynet in chains instead of
153 * packet-by-packet. The entire list of packets is first dequeued and
154 * sent out by the following function.
156 static void dummynet_send(struct mbuf
*m
);
159 #define HASH(num) ((((num) >> 8) ^ ((num) >> 4) ^ (num)) & 0x0f)
160 static struct dn_pipe_head pipehash
[HASHSIZE
]; /* all pipes */
161 static struct dn_flow_set_head flowsethash
[HASHSIZE
]; /* all flowsets */
165 SYSCTL_NODE(_net_inet_ip
, OID_AUTO
, dummynet
,
166 CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "Dummynet");
167 SYSCTL_INT(_net_inet_ip_dummynet
, OID_AUTO
, hash_size
,
168 CTLFLAG_RW
| CTLFLAG_LOCKED
, &dn_hash_size
, 0, "Default hash table size");
169 SYSCTL_QUAD(_net_inet_ip_dummynet
, OID_AUTO
, curr_time
,
170 CTLFLAG_RD
| CTLFLAG_LOCKED
, &curr_time
, "Current tick");
171 SYSCTL_INT(_net_inet_ip_dummynet
, OID_AUTO
, ready_heap
,
172 CTLFLAG_RD
| CTLFLAG_LOCKED
, &ready_heap
.size
, 0, "Size of ready heap");
173 SYSCTL_INT(_net_inet_ip_dummynet
, OID_AUTO
, extract_heap
,
174 CTLFLAG_RD
| CTLFLAG_LOCKED
, &extract_heap
.size
, 0, "Size of extract heap");
175 SYSCTL_INT(_net_inet_ip_dummynet
, OID_AUTO
, searches
,
176 CTLFLAG_RD
| CTLFLAG_LOCKED
, &searches
, 0, "Number of queue searches");
177 SYSCTL_INT(_net_inet_ip_dummynet
, OID_AUTO
, search_steps
,
178 CTLFLAG_RD
| CTLFLAG_LOCKED
, &search_steps
, 0, "Number of queue search steps");
179 SYSCTL_INT(_net_inet_ip_dummynet
, OID_AUTO
, expire
,
180 CTLFLAG_RW
| CTLFLAG_LOCKED
, &pipe_expire
, 0, "Expire queue if empty");
181 SYSCTL_INT(_net_inet_ip_dummynet
, OID_AUTO
, max_chain_len
,
182 CTLFLAG_RW
| CTLFLAG_LOCKED
, &dn_max_ratio
, 0,
183 "Max ratio between dynamic queues and buckets");
184 SYSCTL_INT(_net_inet_ip_dummynet
, OID_AUTO
, red_lookup_depth
,
185 CTLFLAG_RD
| CTLFLAG_LOCKED
, &red_lookup_depth
, 0, "Depth of RED lookup table");
186 SYSCTL_INT(_net_inet_ip_dummynet
, OID_AUTO
, red_avg_pkt_size
,
187 CTLFLAG_RD
| CTLFLAG_LOCKED
, &red_avg_pkt_size
, 0, "RED Medium packet size");
188 SYSCTL_INT(_net_inet_ip_dummynet
, OID_AUTO
, red_max_pkt_size
,
189 CTLFLAG_RD
| CTLFLAG_LOCKED
, &red_max_pkt_size
, 0, "RED Max packet size");
192 #ifdef DUMMYNET_DEBUG
193 int dummynet_debug
= 0;
195 SYSCTL_INT(_net_inet_ip_dummynet
, OID_AUTO
, debug
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &dummynet_debug
,
196 0, "control debugging printfs");
198 #define DPRINTF(X) if (dummynet_debug) printf X
203 /* contrary to the comment above random(), it does not actually
204 * return a value [0, 2^31 - 1], which breaks plr amongst other
205 * things. Masking it should work even if the behavior of
206 * the function is fixed.
208 #define MY_RANDOM (random() & 0x7FFFFFFF)
211 static lck_grp_t
*dn_mutex_grp
;
212 static lck_grp_attr_t
*dn_mutex_grp_attr
;
213 static lck_attr_t
*dn_mutex_attr
;
214 static lck_mtx_t
*dn_mutex
;
216 static int config_pipe(struct dn_pipe
*p
);
217 static int ip_dn_ctl(struct sockopt
*sopt
);
219 static void dummynet(void *);
220 static void dummynet_flush(void);
221 void dummynet_drain(void);
222 static ip_dn_io_t dummynet_io
;
223 static void dn_rule_delete(void *);
225 int if_tx_rdy(struct ifnet
*ifp
);
227 static void cp_flow_set_to_64_user(struct dn_flow_set
*set
, struct dn_flow_set_64
*fs_bp
);
228 static void cp_queue_to_64_user( struct dn_flow_queue
*q
, struct dn_flow_queue_64
*qp
);
229 static char *cp_pipe_to_64_user(struct dn_pipe
*p
, struct dn_pipe_64
*pipe_bp
);
230 static char* dn_copy_set_64(struct dn_flow_set
*set
, char *bp
);
231 static int cp_pipe_from_user_64( struct sockopt
*sopt
, struct dn_pipe
*p
);
233 static void cp_flow_set_to_32_user(struct dn_flow_set
*set
, struct dn_flow_set_32
*fs_bp
);
234 static void cp_queue_to_32_user( struct dn_flow_queue
*q
, struct dn_flow_queue_32
*qp
);
235 static char *cp_pipe_to_32_user(struct dn_pipe
*p
, struct dn_pipe_32
*pipe_bp
);
236 static char* dn_copy_set_32(struct dn_flow_set
*set
, char *bp
);
237 static int cp_pipe_from_user_32( struct sockopt
*sopt
, struct dn_pipe
*p
);
241 * Heap management functions.
243 * In the heap, first node is element 0. Children of i are 2i+1 and 2i+2.
244 * Some macros help finding parent/children so we can optimize them.
246 * heap_init() is called to expand the heap when needed.
247 * Increment size in blocks of 16 entries.
248 * XXX failure to allocate a new element is a pretty bad failure
249 * as we basically stall a whole queue forever!!
250 * Returns 1 on error, 0 on success
252 #define HEAP_FATHER(x) ( ( (x) - 1 ) / 2 )
253 #define HEAP_LEFT(x) ( 2*(x) + 1 )
254 #define HEAP_IS_LEFT(x) ( (x) & 1 )
255 #define HEAP_RIGHT(x) ( 2*(x) + 2 )
256 #define HEAP_SWAP(a, b, buffer) { buffer = a ; a = b ; b = buffer ; }
257 #define HEAP_INCREMENT 15
260 int cp_pipe_from_user_32( struct sockopt
*sopt
, struct dn_pipe
*p
)
262 struct dn_pipe_32 user_pipe_32
;
265 error
= sooptcopyin(sopt
, &user_pipe_32
, sizeof(struct dn_pipe_32
), sizeof(struct dn_pipe_32
));
267 p
->pipe_nr
= user_pipe_32
.pipe_nr
;
268 p
->bandwidth
= user_pipe_32
.bandwidth
;
269 p
->delay
= user_pipe_32
.delay
;
270 p
->V
= user_pipe_32
.V
;
271 p
->sum
= user_pipe_32
.sum
;
272 p
->numbytes
= user_pipe_32
.numbytes
;
273 p
->sched_time
= user_pipe_32
.sched_time
;
274 bcopy( user_pipe_32
.if_name
, p
->if_name
, IFNAMSIZ
);
275 p
->ready
= user_pipe_32
.ready
;
277 p
->fs
.fs_nr
= user_pipe_32
.fs
.fs_nr
;
278 p
->fs
.flags_fs
= user_pipe_32
.fs
.flags_fs
;
279 p
->fs
.parent_nr
= user_pipe_32
.fs
.parent_nr
;
280 p
->fs
.weight
= user_pipe_32
.fs
.weight
;
281 p
->fs
.qsize
= user_pipe_32
.fs
.qsize
;
282 p
->fs
.plr
= user_pipe_32
.fs
.plr
;
283 p
->fs
.flow_mask
= user_pipe_32
.fs
.flow_mask
;
284 p
->fs
.rq_size
= user_pipe_32
.fs
.rq_size
;
285 p
->fs
.rq_elements
= user_pipe_32
.fs
.rq_elements
;
286 p
->fs
.last_expired
= user_pipe_32
.fs
.last_expired
;
287 p
->fs
.backlogged
= user_pipe_32
.fs
.backlogged
;
288 p
->fs
.w_q
= user_pipe_32
.fs
.w_q
;
289 p
->fs
.max_th
= user_pipe_32
.fs
.max_th
;
290 p
->fs
.min_th
= user_pipe_32
.fs
.min_th
;
291 p
->fs
.max_p
= user_pipe_32
.fs
.max_p
;
292 p
->fs
.c_1
= user_pipe_32
.fs
.c_1
;
293 p
->fs
.c_2
= user_pipe_32
.fs
.c_2
;
294 p
->fs
.c_3
= user_pipe_32
.fs
.c_3
;
295 p
->fs
.c_4
= user_pipe_32
.fs
.c_4
;
296 p
->fs
.lookup_depth
= user_pipe_32
.fs
.lookup_depth
;
297 p
->fs
.lookup_step
= user_pipe_32
.fs
.lookup_step
;
298 p
->fs
.lookup_weight
= user_pipe_32
.fs
.lookup_weight
;
299 p
->fs
.avg_pkt_size
= user_pipe_32
.fs
.avg_pkt_size
;
300 p
->fs
.max_pkt_size
= user_pipe_32
.fs
.max_pkt_size
;
306 int cp_pipe_from_user_64( struct sockopt
*sopt
, struct dn_pipe
*p
)
308 struct dn_pipe_64 user_pipe_64
;
311 error
= sooptcopyin(sopt
, &user_pipe_64
, sizeof(struct dn_pipe_64
), sizeof(struct dn_pipe_64
));
313 p
->pipe_nr
= user_pipe_64
.pipe_nr
;
314 p
->bandwidth
= user_pipe_64
.bandwidth
;
315 p
->delay
= user_pipe_64
.delay
;
316 p
->V
= user_pipe_64
.V
;
317 p
->sum
= user_pipe_64
.sum
;
318 p
->numbytes
= user_pipe_64
.numbytes
;
319 p
->sched_time
= user_pipe_64
.sched_time
;
320 bcopy( user_pipe_64
.if_name
, p
->if_name
, IFNAMSIZ
);
321 p
->ready
= user_pipe_64
.ready
;
323 p
->fs
.fs_nr
= user_pipe_64
.fs
.fs_nr
;
324 p
->fs
.flags_fs
= user_pipe_64
.fs
.flags_fs
;
325 p
->fs
.parent_nr
= user_pipe_64
.fs
.parent_nr
;
326 p
->fs
.weight
= user_pipe_64
.fs
.weight
;
327 p
->fs
.qsize
= user_pipe_64
.fs
.qsize
;
328 p
->fs
.plr
= user_pipe_64
.fs
.plr
;
329 p
->fs
.flow_mask
= user_pipe_64
.fs
.flow_mask
;
330 p
->fs
.rq_size
= user_pipe_64
.fs
.rq_size
;
331 p
->fs
.rq_elements
= user_pipe_64
.fs
.rq_elements
;
332 p
->fs
.last_expired
= user_pipe_64
.fs
.last_expired
;
333 p
->fs
.backlogged
= user_pipe_64
.fs
.backlogged
;
334 p
->fs
.w_q
= user_pipe_64
.fs
.w_q
;
335 p
->fs
.max_th
= user_pipe_64
.fs
.max_th
;
336 p
->fs
.min_th
= user_pipe_64
.fs
.min_th
;
337 p
->fs
.max_p
= user_pipe_64
.fs
.max_p
;
338 p
->fs
.c_1
= user_pipe_64
.fs
.c_1
;
339 p
->fs
.c_2
= user_pipe_64
.fs
.c_2
;
340 p
->fs
.c_3
= user_pipe_64
.fs
.c_3
;
341 p
->fs
.c_4
= user_pipe_64
.fs
.c_4
;
342 p
->fs
.lookup_depth
= user_pipe_64
.fs
.lookup_depth
;
343 p
->fs
.lookup_step
= user_pipe_64
.fs
.lookup_step
;
344 p
->fs
.lookup_weight
= user_pipe_64
.fs
.lookup_weight
;
345 p
->fs
.avg_pkt_size
= user_pipe_64
.fs
.avg_pkt_size
;
346 p
->fs
.max_pkt_size
= user_pipe_64
.fs
.max_pkt_size
;
352 cp_flow_set_to_32_user(struct dn_flow_set
*set
, struct dn_flow_set_32
*fs_bp
)
354 fs_bp
->fs_nr
= set
->fs_nr
;
355 fs_bp
->flags_fs
= set
->flags_fs
;
356 fs_bp
->parent_nr
= set
->parent_nr
;
357 fs_bp
->weight
= set
->weight
;
358 fs_bp
->qsize
= set
->qsize
;
359 fs_bp
->plr
= set
->plr
;
360 fs_bp
->flow_mask
= set
->flow_mask
;
361 fs_bp
->rq_size
= set
->rq_size
;
362 fs_bp
->rq_elements
= set
->rq_elements
;
363 fs_bp
->last_expired
= set
->last_expired
;
364 fs_bp
->backlogged
= set
->backlogged
;
365 fs_bp
->w_q
= set
->w_q
;
366 fs_bp
->max_th
= set
->max_th
;
367 fs_bp
->min_th
= set
->min_th
;
368 fs_bp
->max_p
= set
->max_p
;
369 fs_bp
->c_1
= set
->c_1
;
370 fs_bp
->c_2
= set
->c_2
;
371 fs_bp
->c_3
= set
->c_3
;
372 fs_bp
->c_4
= set
->c_4
;
373 fs_bp
->w_q_lookup
= CAST_DOWN_EXPLICIT(user32_addr_t
, set
->w_q_lookup
) ;
374 fs_bp
->lookup_depth
= set
->lookup_depth
;
375 fs_bp
->lookup_step
= set
->lookup_step
;
376 fs_bp
->lookup_weight
= set
->lookup_weight
;
377 fs_bp
->avg_pkt_size
= set
->avg_pkt_size
;
378 fs_bp
->max_pkt_size
= set
->max_pkt_size
;
382 cp_flow_set_to_64_user(struct dn_flow_set
*set
, struct dn_flow_set_64
*fs_bp
)
384 fs_bp
->fs_nr
= set
->fs_nr
;
385 fs_bp
->flags_fs
= set
->flags_fs
;
386 fs_bp
->parent_nr
= set
->parent_nr
;
387 fs_bp
->weight
= set
->weight
;
388 fs_bp
->qsize
= set
->qsize
;
389 fs_bp
->plr
= set
->plr
;
390 fs_bp
->flow_mask
= set
->flow_mask
;
391 fs_bp
->rq_size
= set
->rq_size
;
392 fs_bp
->rq_elements
= set
->rq_elements
;
393 fs_bp
->last_expired
= set
->last_expired
;
394 fs_bp
->backlogged
= set
->backlogged
;
395 fs_bp
->w_q
= set
->w_q
;
396 fs_bp
->max_th
= set
->max_th
;
397 fs_bp
->min_th
= set
->min_th
;
398 fs_bp
->max_p
= set
->max_p
;
399 fs_bp
->c_1
= set
->c_1
;
400 fs_bp
->c_2
= set
->c_2
;
401 fs_bp
->c_3
= set
->c_3
;
402 fs_bp
->c_4
= set
->c_4
;
403 fs_bp
->w_q_lookup
= CAST_DOWN(user64_addr_t
, set
->w_q_lookup
) ;
404 fs_bp
->lookup_depth
= set
->lookup_depth
;
405 fs_bp
->lookup_step
= set
->lookup_step
;
406 fs_bp
->lookup_weight
= set
->lookup_weight
;
407 fs_bp
->avg_pkt_size
= set
->avg_pkt_size
;
408 fs_bp
->max_pkt_size
= set
->max_pkt_size
;
412 void cp_queue_to_32_user( struct dn_flow_queue
*q
, struct dn_flow_queue_32
*qp
)
416 qp
->len_bytes
= q
->len_bytes
;
417 qp
->numbytes
= q
->numbytes
;
418 qp
->tot_pkts
= q
->tot_pkts
;
419 qp
->tot_bytes
= q
->tot_bytes
;
420 qp
->drops
= q
->drops
;
421 qp
->hash_slot
= q
->hash_slot
;
423 qp
->count
= q
->count
;
424 qp
->random
= q
->random
;
425 qp
->q_time
= q
->q_time
;
426 qp
->heap_pos
= q
->heap_pos
;
427 qp
->sched_time
= q
->sched_time
;
433 void cp_queue_to_64_user( struct dn_flow_queue
*q
, struct dn_flow_queue_64
*qp
)
437 qp
->len_bytes
= q
->len_bytes
;
438 qp
->numbytes
= q
->numbytes
;
439 qp
->tot_pkts
= q
->tot_pkts
;
440 qp
->tot_bytes
= q
->tot_bytes
;
441 qp
->drops
= q
->drops
;
442 qp
->hash_slot
= q
->hash_slot
;
444 qp
->count
= q
->count
;
445 qp
->random
= q
->random
;
446 qp
->q_time
= q
->q_time
;
447 qp
->heap_pos
= q
->heap_pos
;
448 qp
->sched_time
= q
->sched_time
;
454 char *cp_pipe_to_32_user(struct dn_pipe
*p
, struct dn_pipe_32
*pipe_bp
)
458 pipe_bp
->pipe_nr
= p
->pipe_nr
;
459 pipe_bp
->bandwidth
= p
->bandwidth
;
460 pipe_bp
->delay
= p
->delay
;
461 bcopy( &(p
->scheduler_heap
), &(pipe_bp
->scheduler_heap
), sizeof(struct dn_heap_32
));
462 pipe_bp
->scheduler_heap
.p
= CAST_DOWN_EXPLICIT(user32_addr_t
, pipe_bp
->scheduler_heap
.p
);
463 bcopy( &(p
->not_eligible_heap
), &(pipe_bp
->not_eligible_heap
), sizeof(struct dn_heap_32
));
464 pipe_bp
->not_eligible_heap
.p
= CAST_DOWN_EXPLICIT(user32_addr_t
, pipe_bp
->not_eligible_heap
.p
);
465 bcopy( &(p
->idle_heap
), &(pipe_bp
->idle_heap
), sizeof(struct dn_heap_32
));
466 pipe_bp
->idle_heap
.p
= CAST_DOWN_EXPLICIT(user32_addr_t
, pipe_bp
->idle_heap
.p
);
468 pipe_bp
->sum
= p
->sum
;
469 pipe_bp
->numbytes
= p
->numbytes
;
470 pipe_bp
->sched_time
= p
->sched_time
;
471 bcopy( p
->if_name
, pipe_bp
->if_name
, IFNAMSIZ
);
472 pipe_bp
->ifp
= CAST_DOWN_EXPLICIT(user32_addr_t
, p
->ifp
);
473 pipe_bp
->ready
= p
->ready
;
475 cp_flow_set_to_32_user( &(p
->fs
), &(pipe_bp
->fs
));
477 pipe_bp
->delay
= (pipe_bp
->delay
* 1000) / (hz
*10) ;
479 * XXX the following is a hack based on ->next being the
480 * first field in dn_pipe and dn_flow_set. The correct
481 * solution would be to move the dn_flow_set to the beginning
484 pipe_bp
->next
= CAST_DOWN_EXPLICIT( user32_addr_t
, DN_IS_PIPE
);
486 pipe_bp
->head
= pipe_bp
->tail
= (user32_addr_t
) 0 ;
487 pipe_bp
->fs
.next
= (user32_addr_t
)0 ;
488 pipe_bp
->fs
.pipe
= (user32_addr_t
)0 ;
489 pipe_bp
->fs
.rq
= (user32_addr_t
)0 ;
490 bp
= ((char *)pipe_bp
) + sizeof(struct dn_pipe_32
);
491 return( dn_copy_set_32( &(p
->fs
), bp
) );
495 char *cp_pipe_to_64_user(struct dn_pipe
*p
, struct dn_pipe_64
*pipe_bp
)
499 pipe_bp
->pipe_nr
= p
->pipe_nr
;
500 pipe_bp
->bandwidth
= p
->bandwidth
;
501 pipe_bp
->delay
= p
->delay
;
502 bcopy( &(p
->scheduler_heap
), &(pipe_bp
->scheduler_heap
), sizeof(struct dn_heap_64
));
503 pipe_bp
->scheduler_heap
.p
= CAST_DOWN(user64_addr_t
, pipe_bp
->scheduler_heap
.p
);
504 bcopy( &(p
->not_eligible_heap
), &(pipe_bp
->not_eligible_heap
), sizeof(struct dn_heap_64
));
505 pipe_bp
->not_eligible_heap
.p
= CAST_DOWN(user64_addr_t
, pipe_bp
->not_eligible_heap
.p
);
506 bcopy( &(p
->idle_heap
), &(pipe_bp
->idle_heap
), sizeof(struct dn_heap_64
));
507 pipe_bp
->idle_heap
.p
= CAST_DOWN(user64_addr_t
, pipe_bp
->idle_heap
.p
);
509 pipe_bp
->sum
= p
->sum
;
510 pipe_bp
->numbytes
= p
->numbytes
;
511 pipe_bp
->sched_time
= p
->sched_time
;
512 bcopy( p
->if_name
, pipe_bp
->if_name
, IFNAMSIZ
);
513 pipe_bp
->ifp
= CAST_DOWN(user64_addr_t
, p
->ifp
);
514 pipe_bp
->ready
= p
->ready
;
516 cp_flow_set_to_64_user( &(p
->fs
), &(pipe_bp
->fs
));
518 pipe_bp
->delay
= (pipe_bp
->delay
* 1000) / (hz
*10) ;
520 * XXX the following is a hack based on ->next being the
521 * first field in dn_pipe and dn_flow_set. The correct
522 * solution would be to move the dn_flow_set to the beginning
525 pipe_bp
->next
= CAST_DOWN( user64_addr_t
, DN_IS_PIPE
);
527 pipe_bp
->head
= pipe_bp
->tail
= USER_ADDR_NULL
;
528 pipe_bp
->fs
.next
= USER_ADDR_NULL
;
529 pipe_bp
->fs
.pipe
= USER_ADDR_NULL
;
530 pipe_bp
->fs
.rq
= USER_ADDR_NULL
;
531 bp
= ((char *)pipe_bp
) + sizeof(struct dn_pipe_64
);
532 return( dn_copy_set_64( &(p
->fs
), bp
) );
536 heap_init(struct dn_heap
*h
, int new_size
)
538 struct dn_heap_entry
*p
;
540 if (h
->size
>= new_size
) {
541 printf("dummynet: heap_init, Bogus call, have %d want %d\n",
545 new_size
= (new_size
+ HEAP_INCREMENT
) & ~HEAP_INCREMENT
;
546 p
= _MALLOC(new_size
* sizeof(*p
), M_DUMMYNET
, M_DONTWAIT
);
548 printf("dummynet: heap_init, resize %d failed\n", new_size
);
549 return 1 ; /* error */
552 bcopy(h
->p
, p
, h
->size
* sizeof(*p
) );
553 FREE(h
->p
, M_DUMMYNET
);
561 * Insert element in heap. Normally, p != NULL, we insert p in
562 * a new position and bubble up. If p == NULL, then the element is
563 * already in place, and key is the position where to start the
565 * Returns 1 on failure (cannot allocate new heap entry)
567 * If offset > 0 the position (index, int) of the element in the heap is
568 * also stored in the element itself at the given offset in bytes.
570 #define SET_OFFSET(heap, node) \
571 if (heap->offset > 0) \
572 *((int *)((char *)(heap->p[node].object) + heap->offset)) = node ;
574 * RESET_OFFSET is used for sanity checks. It sets offset to an invalid value.
576 #define RESET_OFFSET(heap, node) \
577 if (heap->offset > 0) \
578 *((int *)((char *)(heap->p[node].object) + heap->offset)) = -1 ;
580 heap_insert(struct dn_heap
*h
, dn_key key1
, void *p
)
582 int son
= h
->elements
;
584 if (p
== NULL
) /* data already there, set starting point */
586 else { /* insert new element at the end, possibly resize */
588 if (son
== h
->size
) /* need resize... */
589 if (heap_init(h
, h
->elements
+1) )
590 return 1 ; /* failure... */
591 h
->p
[son
].object
= p
;
592 h
->p
[son
].key
= key1
;
595 while (son
> 0) { /* bubble up */
596 int father
= HEAP_FATHER(son
) ;
597 struct dn_heap_entry tmp
;
599 if (DN_KEY_LT( h
->p
[father
].key
, h
->p
[son
].key
) )
600 break ; /* found right position */
601 /* son smaller than father, swap and repeat */
602 HEAP_SWAP(h
->p
[son
], h
->p
[father
], tmp
) ;
611 * remove top element from heap, or obj if obj != NULL
614 heap_extract(struct dn_heap
*h
, void *obj
)
616 int child
, father
, maxelt
= h
->elements
- 1 ;
619 printf("dummynet: warning, extract from empty heap 0x%p\n", h
);
622 father
= 0 ; /* default: move up smallest child */
623 if (obj
!= NULL
) { /* extract specific element, index is at offset */
625 panic("dummynet: heap_extract from middle not supported on this heap!!!\n");
626 father
= *((int *)((char *)obj
+ h
->offset
)) ;
627 if (father
< 0 || father
>= h
->elements
) {
628 printf("dummynet: heap_extract, father %d out of bound 0..%d\n",
629 father
, h
->elements
);
630 panic("dummynet: heap_extract");
633 RESET_OFFSET(h
, father
);
634 child
= HEAP_LEFT(father
) ; /* left child */
635 while (child
<= maxelt
) { /* valid entry */
636 if (child
!= maxelt
&& DN_KEY_LT(h
->p
[child
+1].key
, h
->p
[child
].key
) )
637 child
= child
+1 ; /* take right child, otherwise left */
638 h
->p
[father
] = h
->p
[child
] ;
639 SET_OFFSET(h
, father
);
641 child
= HEAP_LEFT(child
) ; /* left child for next loop */
644 if (father
!= maxelt
) {
646 * Fill hole with last entry and bubble up, reusing the insert code
648 h
->p
[father
] = h
->p
[maxelt
] ;
649 heap_insert(h
, father
, NULL
); /* this one cannot fail */
654 * heapify() will reorganize data inside an array to maintain the
655 * heap property. It is needed when we delete a bunch of entries.
658 heapify(struct dn_heap
*h
)
662 for (i
= 0 ; i
< h
->elements
; i
++ )
663 heap_insert(h
, i
, NULL
) ;
667 * cleanup the heap and free data structure
670 heap_free(struct dn_heap
*h
)
673 FREE(h
->p
, M_DUMMYNET
);
674 bzero(h
, sizeof(*h
));
678 * --- end of heap management functions ---
682 * Return the mbuf tag holding the dummynet state. As an optimization
683 * this is assumed to be the first tag on the list. If this turns out
684 * wrong we'll need to search the list.
686 static struct dn_pkt_tag
*
687 dn_tag_get(struct mbuf
*m
)
689 struct m_tag
*mtag
= m_tag_first(m
);
690 /* KASSERT(mtag != NULL &&
691 mtag->m_tag_id == KERNEL_MODULE_TAG_ID &&
692 mtag->m_tag_type == KERNEL_TAG_TYPE_DUMMYNET,
693 ("packet on dummynet queue w/o dummynet tag!"));
695 return (struct dn_pkt_tag
*)(mtag
+1);
699 * Scheduler functions:
701 * transmit_event() is called when the delay-line needs to enter
702 * the scheduler, either because of existing pkts getting ready,
703 * or new packets entering the queue. The event handled is the delivery
704 * time of the packet.
706 * ready_event() does something similar with fixed-rate queues, and the
707 * event handled is the finish time of the head pkt.
709 * wfq_ready_event() does something similar with WF2Q queues, and the
710 * event handled is the start time of the head pkt.
712 * In all cases, we make sure that the data structures are consistent
713 * before passing pkts out, because this might trigger recursive
714 * invocations of the procedures.
717 transmit_event(struct dn_pipe
*pipe
, struct mbuf
**head
, struct mbuf
**tail
)
720 struct dn_pkt_tag
*pkt
;
721 u_int64_t schedule_time
;
723 lck_mtx_assert(dn_mutex
, LCK_MTX_ASSERT_OWNED
);
724 ASSERT(serialize
>= 0);
725 if (serialize
== 0) {
726 while ((m
= pipe
->head
) != NULL
) {
728 if (!DN_KEY_LEQ(pkt
->output_time
, curr_time
))
731 pipe
->head
= m
->m_nextpkt
;
733 (*tail
)->m_nextpkt
= m
;
740 (*tail
)->m_nextpkt
= NULL
;
743 schedule_time
= DN_KEY_LEQ(pkt
->output_time
, curr_time
) ?
744 curr_time
+1 : pkt
->output_time
;
746 /* if there are leftover packets, put the pipe into the heap for next ready event */
747 if ((m
= pipe
->head
) != NULL
) {
749 /* XXX should check errors on heap_insert, by draining the
750 * whole pipe p and hoping in the future we are more successful
752 heap_insert(&extract_heap
, schedule_time
, pipe
);
757 * the following macro computes how many ticks we have to wait
758 * before being able to transmit a packet. The credit is taken from
759 * either a pipe (WF2Q) or a flow_queue (per-flow queueing)
762 /* hz is 100, which gives a granularity of 10ms in the old timer.
763 * The timer has been changed to fire every 1ms, so the use of
764 * hz has been modified here. All instances of hz have been left
765 * in place but adjusted by a factor of 10 so that hz is functionally
768 #define SET_TICKS(_m, q, p) \
769 ((_m)->m_pkthdr.len*8*(hz*10) - (q)->numbytes + p->bandwidth - 1 ) / \
773 * extract pkt from queue, compute output time (could be now)
774 * and put into delay line (p_queue)
777 move_pkt(struct mbuf
*pkt
, struct dn_flow_queue
*q
,
778 struct dn_pipe
*p
, int len
)
780 struct dn_pkt_tag
*dt
= dn_tag_get(pkt
);
782 q
->head
= pkt
->m_nextpkt
;
784 q
->len_bytes
-= len
;
786 dt
->output_time
= curr_time
+ p
->delay
;
791 p
->tail
->m_nextpkt
= pkt
;
793 p
->tail
->m_nextpkt
= NULL
;
797 * ready_event() is invoked every time the queue must enter the
798 * scheduler, either because the first packet arrives, or because
799 * a previously scheduled event fired.
800 * On invokation, drain as many pkts as possible (could be 0) and then
801 * if there are leftover packets reinsert the pkt in the scheduler.
804 ready_event(struct dn_flow_queue
*q
, struct mbuf
**head
, struct mbuf
**tail
)
807 struct dn_pipe
*p
= q
->fs
->pipe
;
810 lck_mtx_assert(dn_mutex
, LCK_MTX_ASSERT_OWNED
);
813 printf("dummynet: ready_event pipe is gone\n");
816 p_was_empty
= (p
->head
== NULL
) ;
819 * schedule fixed-rate queues linked to this pipe:
820 * Account for the bw accumulated since last scheduling, then
821 * drain as many pkts as allowed by q->numbytes and move to
822 * the delay line (in p) computing output time.
823 * bandwidth==0 (no limit) means we can drain the whole queue,
824 * setting len_scaled = 0 does the job.
826 q
->numbytes
+= ( curr_time
- q
->sched_time
) * p
->bandwidth
;
827 while ( (pkt
= q
->head
) != NULL
) {
828 int len
= pkt
->m_pkthdr
.len
;
829 int len_scaled
= p
->bandwidth
? len
*8*(hz
*10) : 0 ;
830 if (len_scaled
> q
->numbytes
)
832 q
->numbytes
-= len_scaled
;
833 move_pkt(pkt
, q
, p
, len
);
836 * If we have more packets queued, schedule next ready event
837 * (can only occur when bandwidth != 0, otherwise we would have
838 * flushed the whole queue in the previous loop).
839 * To this purpose we record the current time and compute how many
840 * ticks to go for the finish time of the packet.
842 if ( (pkt
= q
->head
) != NULL
) { /* this implies bandwidth != 0 */
843 dn_key t
= SET_TICKS(pkt
, q
, p
); /* ticks i have to wait */
844 q
->sched_time
= curr_time
;
845 heap_insert(&ready_heap
, curr_time
+ t
, (void *)q
);
846 /* XXX should check errors on heap_insert, and drain the whole
847 * queue on error hoping next time we are luckier.
849 } else { /* RED needs to know when the queue becomes empty */
850 q
->q_time
= curr_time
;
854 * If the delay line was empty call transmit_event(p) now.
855 * Otherwise, the scheduler will take care of it.
858 transmit_event(p
, head
, tail
);
862 * Called when we can transmit packets on WF2Q queues. Take pkts out of
863 * the queues at their start time, and enqueue into the delay line.
864 * Packets are drained until p->numbytes < 0. As long as
865 * len_scaled >= p->numbytes, the packet goes into the delay line
866 * with a deadline p->delay. For the last packet, if p->numbytes<0,
867 * there is an additional delay.
870 ready_event_wfq(struct dn_pipe
*p
, struct mbuf
**head
, struct mbuf
**tail
)
872 int p_was_empty
= (p
->head
== NULL
) ;
873 struct dn_heap
*sch
= &(p
->scheduler_heap
);
874 struct dn_heap
*neh
= &(p
->not_eligible_heap
) ;
875 int64_t p_numbytes
= p
->numbytes
;
877 lck_mtx_assert(dn_mutex
, LCK_MTX_ASSERT_OWNED
);
879 if (p
->if_name
[0] == 0) /* tx clock is simulated */
880 p_numbytes
+= ( curr_time
- p
->sched_time
) * p
->bandwidth
;
881 else { /* tx clock is for real, the ifq must be empty or this is a NOP */
882 if (p
->ifp
&& p
->ifp
->if_snd
.ifq_head
!= NULL
)
885 DPRINTF(("dummynet: pipe %d ready from %s --\n",
886 p
->pipe_nr
, p
->if_name
));
891 * While we have backlogged traffic AND credit, we need to do
892 * something on the queue.
894 while ( p_numbytes
>=0 && (sch
->elements
>0 || neh
->elements
>0) ) {
895 if (sch
->elements
> 0) { /* have some eligible pkts to send out */
896 struct dn_flow_queue
*q
= sch
->p
[0].object
;
897 struct mbuf
*pkt
= q
->head
;
898 struct dn_flow_set
*fs
= q
->fs
;
899 u_int64_t len
= pkt
->m_pkthdr
.len
;
900 int len_scaled
= p
->bandwidth
? len
*8*(hz
*10) : 0 ;
902 heap_extract(sch
, NULL
); /* remove queue from heap */
903 p_numbytes
-= len_scaled
;
904 move_pkt(pkt
, q
, p
, len
);
906 p
->V
+= (len
<<MY_M
) / p
->sum
; /* update V */
907 q
->S
= q
->F
; /* update start time */
908 if (q
->len
== 0) { /* Flow not backlogged any more */
910 heap_insert(&(p
->idle_heap
), q
->F
, q
);
911 } else { /* still backlogged */
913 * update F and position in backlogged queue, then
914 * put flow in not_eligible_heap (we will fix this later).
916 len
= (q
->head
)->m_pkthdr
.len
;
917 q
->F
+= (len
<<MY_M
)/(u_int64_t
) fs
->weight
;
918 if (DN_KEY_LEQ(q
->S
, p
->V
))
919 heap_insert(neh
, q
->S
, q
);
921 heap_insert(sch
, q
->F
, q
);
925 * now compute V = max(V, min(S_i)). Remember that all elements in sch
926 * have by definition S_i <= V so if sch is not empty, V is surely
927 * the max and we must not update it. Conversely, if sch is empty
928 * we only need to look at neh.
930 if (sch
->elements
== 0 && neh
->elements
> 0)
931 p
->V
= MAX64 ( p
->V
, neh
->p
[0].key
);
932 /* move from neh to sch any packets that have become eligible */
933 while (neh
->elements
> 0 && DN_KEY_LEQ(neh
->p
[0].key
, p
->V
) ) {
934 struct dn_flow_queue
*q
= neh
->p
[0].object
;
935 heap_extract(neh
, NULL
);
936 heap_insert(sch
, q
->F
, q
);
939 if (p
->if_name
[0] != '\0') {/* tx clock is from a real thing */
940 p_numbytes
= -1 ; /* mark not ready for I/O */
944 if (sch
->elements
== 0 && neh
->elements
== 0 && p_numbytes
>= 0
945 && p
->idle_heap
.elements
> 0) {
947 * no traffic and no events scheduled. We can get rid of idle-heap.
951 for (i
= 0 ; i
< p
->idle_heap
.elements
; i
++) {
952 struct dn_flow_queue
*q
= p
->idle_heap
.p
[i
].object
;
959 p
->idle_heap
.elements
= 0 ;
962 * If we are getting clocks from dummynet (not a real interface) and
963 * If we are under credit, schedule the next ready event.
964 * Also fix the delivery time of the last packet.
966 if (p
->if_name
[0]==0 && p_numbytes
< 0) { /* this implies bandwidth >0 */
967 dn_key t
=0 ; /* number of ticks i have to wait */
969 if (p
->bandwidth
> 0)
970 t
= ( p
->bandwidth
-1 - p_numbytes
) / p
->bandwidth
;
971 dn_tag_get(p
->tail
)->output_time
+= t
;
972 p
->sched_time
= curr_time
;
973 heap_insert(&wfq_ready_heap
, curr_time
+ t
, (void *)p
);
974 /* XXX should check errors on heap_insert, and drain the whole
975 * queue on error hoping next time we are luckier.
979 /* Fit (adjust if necessary) 64bit result into 32bit variable. */
980 if (p_numbytes
> INT_MAX
)
981 p
->numbytes
= INT_MAX
;
982 else if (p_numbytes
< INT_MIN
)
983 p
->numbytes
= INT_MIN
;
985 p
->numbytes
= p_numbytes
;
988 * If the delay line was empty call transmit_event(p) now.
989 * Otherwise, the scheduler will take care of it.
992 transmit_event(p
, head
, tail
);
997 * This is called every 1ms. It is used to
998 * increment the current tick counter and schedule expired events.
1001 dummynet(__unused
void * unused
)
1003 void *p
; /* generic parameter to handler */
1005 struct dn_heap
*heaps
[3];
1006 struct mbuf
*head
= NULL
, *tail
= NULL
;
1008 struct dn_pipe
*pe
;
1012 heaps
[0] = &ready_heap
; /* fixed-rate queues */
1013 heaps
[1] = &wfq_ready_heap
; /* wfq queues */
1014 heaps
[2] = &extract_heap
; /* delay line */
1016 lck_mtx_lock(dn_mutex
);
1018 /* make all time measurements in milliseconds (ms) -
1019 * here we convert secs and usecs to msecs (just divide the
1020 * usecs and take the closest whole number).
1023 curr_time
= (tv
.tv_sec
* 1000) + (tv
.tv_usec
/ 1000);
1025 for (i
=0; i
< 3 ; i
++) {
1027 while (h
->elements
> 0 && DN_KEY_LEQ(h
->p
[0].key
, curr_time
) ) {
1028 if (h
->p
[0].key
> curr_time
)
1029 printf("dummynet: warning, heap %d is %d ticks late\n",
1030 i
, (int)(curr_time
- h
->p
[0].key
));
1031 p
= h
->p
[0].object
; /* store a copy before heap_extract */
1032 heap_extract(h
, NULL
); /* need to extract before processing */
1034 ready_event(p
, &head
, &tail
) ;
1036 struct dn_pipe
*pipe
= p
;
1037 if (pipe
->if_name
[0] != '\0')
1038 printf("dummynet: bad ready_event_wfq for pipe %s\n",
1041 ready_event_wfq(p
, &head
, &tail
) ;
1043 transmit_event(p
, &head
, &tail
);
1047 /* sweep pipes trying to expire idle flow_queues */
1048 for (i
= 0; i
< HASHSIZE
; i
++)
1049 SLIST_FOREACH(pe
, &pipehash
[i
], next
)
1050 if (pe
->idle_heap
.elements
> 0 &&
1051 DN_KEY_LT(pe
->idle_heap
.p
[0].key
, pe
->V
) ) {
1052 struct dn_flow_queue
*q
= pe
->idle_heap
.p
[0].object
;
1054 heap_extract(&(pe
->idle_heap
), NULL
);
1055 q
->S
= q
->F
+ 1 ; /* mark timestamp as invalid */
1056 pe
->sum
-= q
->fs
->weight
;
1059 /* check the heaps to see if there's still stuff in there, and
1060 * only set the timer if there are packets to process
1063 for (i
=0; i
< 3 ; i
++) {
1065 if (h
->elements
> 0) { // set the timer
1067 ts
.tv_nsec
= 1 * 1000000; // 1ms
1069 bsd_timeout(dummynet
, NULL
, &ts
);
1077 lck_mtx_unlock(dn_mutex
);
1079 /* Send out the de-queued list of ready-to-send packets */
1081 dummynet_send(head
);
1082 lck_mtx_lock(dn_mutex
);
1084 lck_mtx_unlock(dn_mutex
);
1090 dummynet_send(struct mbuf
*m
)
1092 struct dn_pkt_tag
*pkt
;
1095 for (; m
!= NULL
; m
= n
) {
1097 m
->m_nextpkt
= NULL
;
1098 pkt
= dn_tag_get(m
);
1100 switch (pkt
->dn_dir
) {
1101 case DN_TO_IP_OUT
: {
1102 struct route tmp_rt
= pkt
->ro
;
1103 (void)ip_output(m
, NULL
, &tmp_rt
, pkt
->flags
, NULL
, NULL
);
1105 rtfree(tmp_rt
.ro_rt
);
1106 tmp_rt
.ro_rt
= NULL
;
1111 proto_inject(PF_INET
, m
);
1115 printf("dummynet: bad switch %d!\n", pkt
->dn_dir
);
1125 * called by an interface when tx_rdy occurs.
1128 if_tx_rdy(struct ifnet
*ifp
)
1131 struct mbuf
*head
= NULL
, *tail
= NULL
;
1134 lck_mtx_lock(dn_mutex
);
1136 for (i
= 0; i
< HASHSIZE
; i
++)
1137 SLIST_FOREACH(p
, &pipehash
[i
], next
)
1142 snprintf(buf
, sizeof(buf
), "%s%d",ifp
->if_name
, ifp
->if_unit
);
1143 for (i
= 0; i
< HASHSIZE
; i
++)
1144 SLIST_FOREACH(p
, &pipehash
[i
], next
)
1145 if (!strcmp(p
->if_name
, buf
) ) {
1147 DPRINTF(("dummynet: ++ tx rdy from %s (now found)\n", buf
));
1152 DPRINTF(("dummynet: ++ tx rdy from %s%d - qlen %d\n", ifp
->if_name
,
1153 ifp
->if_unit
, ifp
->if_snd
.ifq_len
));
1154 p
->numbytes
= 0 ; /* mark ready for I/O */
1155 ready_event_wfq(p
, &head
, &tail
);
1162 lck_mtx_unlock(dn_mutex
);
1165 /* Send out the de-queued list of ready-to-send packets */
1167 dummynet_send(head
);
1174 * Unconditionally expire empty queues in case of shortage.
1175 * Returns the number of queues freed.
1178 expire_queues(struct dn_flow_set
*fs
)
1180 struct dn_flow_queue
*q
, *prev
;
1181 int i
, initial_elements
= fs
->rq_elements
;
1182 struct timeval timenow
;
1184 /* reviewed for getmicrotime usage */
1185 getmicrotime(&timenow
);
1187 if (fs
->last_expired
== timenow
.tv_sec
)
1189 fs
->last_expired
= timenow
.tv_sec
;
1190 for (i
= 0 ; i
<= fs
->rq_size
; i
++) /* last one is overflow */
1191 for (prev
=NULL
, q
= fs
->rq
[i
] ; q
!= NULL
; )
1192 if (q
->head
!= NULL
|| q
->S
!= q
->F
+1) {
1195 } else { /* entry is idle, expire it */
1196 struct dn_flow_queue
*old_q
= q
;
1199 prev
->next
= q
= q
->next
;
1201 fs
->rq
[i
] = q
= q
->next
;
1203 FREE(old_q
, M_DUMMYNET
);
1205 return initial_elements
- fs
->rq_elements
;
1209 * If room, create a new queue and put at head of slot i;
1210 * otherwise, create or use the default queue.
1212 static struct dn_flow_queue
*
1213 create_queue(struct dn_flow_set
*fs
, int i
)
1215 struct dn_flow_queue
*q
;
1217 if (fs
->rq_elements
> fs
->rq_size
* dn_max_ratio
&&
1218 expire_queues(fs
) == 0) {
1220 * No way to get room, use or create overflow queue.
1223 if ( fs
->rq
[i
] != NULL
)
1226 q
= _MALLOC(sizeof(*q
), M_DUMMYNET
, M_DONTWAIT
| M_ZERO
);
1228 printf("dummynet: sorry, cannot allocate queue for new flow\n");
1233 q
->next
= fs
->rq
[i
] ;
1234 q
->S
= q
->F
+ 1; /* hack - mark timestamp as invalid */
1241 * Given a flow_set and a pkt in last_pkt, find a matching queue
1242 * after appropriate masking. The queue is moved to front
1243 * so that further searches take less time.
1245 static struct dn_flow_queue
*
1246 find_queue(struct dn_flow_set
*fs
, struct ipfw_flow_id
*id
)
1248 int i
= 0 ; /* we need i and q for new allocations */
1249 struct dn_flow_queue
*q
, *prev
;
1251 if ( !(fs
->flags_fs
& DN_HAVE_FLOW_MASK
) )
1254 /* first, do the masking */
1255 id
->dst_ip
&= fs
->flow_mask
.dst_ip
;
1256 id
->src_ip
&= fs
->flow_mask
.src_ip
;
1257 id
->dst_port
&= fs
->flow_mask
.dst_port
;
1258 id
->src_port
&= fs
->flow_mask
.src_port
;
1259 id
->proto
&= fs
->flow_mask
.proto
;
1260 id
->flags
= 0 ; /* we don't care about this one */
1261 /* then, hash function */
1262 i
= ( (id
->dst_ip
) & 0xffff ) ^
1263 ( (id
->dst_ip
>> 15) & 0xffff ) ^
1264 ( (id
->src_ip
<< 1) & 0xffff ) ^
1265 ( (id
->src_ip
>> 16 ) & 0xffff ) ^
1266 (id
->dst_port
<< 1) ^ (id
->src_port
) ^
1268 i
= i
% fs
->rq_size
;
1269 /* finally, scan the current list for a match */
1271 for (prev
=NULL
, q
= fs
->rq
[i
] ; q
; ) {
1273 if (id
->dst_ip
== q
->id
.dst_ip
&&
1274 id
->src_ip
== q
->id
.src_ip
&&
1275 id
->dst_port
== q
->id
.dst_port
&&
1276 id
->src_port
== q
->id
.src_port
&&
1277 id
->proto
== q
->id
.proto
&&
1278 id
->flags
== q
->id
.flags
)
1280 else if (pipe_expire
&& q
->head
== NULL
&& q
->S
== q
->F
+1 ) {
1281 /* entry is idle and not in any heap, expire it */
1282 struct dn_flow_queue
*old_q
= q
;
1285 prev
->next
= q
= q
->next
;
1287 fs
->rq
[i
] = q
= q
->next
;
1289 FREE(old_q
, M_DUMMYNET
);
1295 if (q
&& prev
!= NULL
) { /* found and not in front */
1296 prev
->next
= q
->next
;
1297 q
->next
= fs
->rq
[i
] ;
1301 if (q
== NULL
) { /* no match, need to allocate a new entry */
1302 q
= create_queue(fs
, i
);
1310 red_drops(struct dn_flow_set
*fs
, struct dn_flow_queue
*q
, int len
)
1315 * RED calculates the average queue size (avg) using a low-pass filter
1316 * with an exponential weighted (w_q) moving average:
1317 * avg <- (1-w_q) * avg + w_q * q_size
1318 * where q_size is the queue length (measured in bytes or * packets).
1320 * If q_size == 0, we compute the idle time for the link, and set
1321 * avg = (1 - w_q)^(idle/s)
1322 * where s is the time needed for transmitting a medium-sized packet.
1324 * Now, if avg < min_th the packet is enqueued.
1325 * If avg > max_th the packet is dropped. Otherwise, the packet is
1326 * dropped with probability P function of avg.
1331 /* queue in bytes or packets ? */
1332 u_int q_size
= (fs
->flags_fs
& DN_QSIZE_IS_BYTES
) ? q
->len_bytes
: q
->len
;
1334 DPRINTF(("\ndummynet: %d q: %2u ", (int) curr_time
, q_size
));
1336 /* average queue size estimation */
1339 * queue is not empty, avg <- avg + (q_size - avg) * w_q
1341 int diff
= SCALE(q_size
) - q
->avg
;
1342 int64_t v
= SCALE_MUL((int64_t) diff
, (int64_t) fs
->w_q
);
1347 * queue is empty, find for how long the queue has been
1348 * empty and use a lookup table for computing
1349 * (1 - * w_q)^(idle_time/s) where s is the time to send a
1351 * XXX check wraps...
1354 u_int t
= (curr_time
- q
->q_time
) / fs
->lookup_step
;
1356 q
->avg
= (t
< fs
->lookup_depth
) ?
1357 SCALE_MUL(q
->avg
, fs
->w_q_lookup
[t
]) : 0;
1360 DPRINTF(("dummynet: avg: %u ", SCALE_VAL(q
->avg
)));
1362 /* should i drop ? */
1364 if (q
->avg
< fs
->min_th
) {
1366 return 0; /* accept packet ; */
1368 if (q
->avg
>= fs
->max_th
) { /* average queue >= max threshold */
1369 if (fs
->flags_fs
& DN_IS_GENTLE_RED
) {
1371 * According to Gentle-RED, if avg is greater than max_th the
1372 * packet is dropped with a probability
1373 * p_b = c_3 * avg - c_4
1374 * where c_3 = (1 - max_p) / max_th, and c_4 = 1 - 2 * max_p
1376 p_b
= SCALE_MUL((int64_t) fs
->c_3
, (int64_t) q
->avg
) - fs
->c_4
;
1379 DPRINTF(("dummynet: - drop"));
1382 } else if (q
->avg
> fs
->min_th
) {
1384 * we compute p_b using the linear dropping function p_b = c_1 *
1385 * avg - c_2, where c_1 = max_p / (max_th - min_th), and c_2 =
1386 * max_p * min_th / (max_th - min_th)
1388 p_b
= SCALE_MUL((int64_t) fs
->c_1
, (int64_t) q
->avg
) - fs
->c_2
;
1390 if (fs
->flags_fs
& DN_QSIZE_IS_BYTES
)
1391 p_b
= (p_b
* len
) / fs
->max_pkt_size
;
1392 if (++q
->count
== 0)
1393 q
->random
= MY_RANDOM
& 0xffff;
1396 * q->count counts packets arrived since last drop, so a greater
1397 * value of q->count means a greater packet drop probability.
1399 if (SCALE_MUL(p_b
, SCALE((int64_t) q
->count
)) > q
->random
) {
1401 DPRINTF(("dummynet: - red drop"));
1402 /* after a drop we calculate a new random value */
1403 q
->random
= MY_RANDOM
& 0xffff;
1404 return 1; /* drop */
1407 /* end of RED algorithm */
1408 return 0 ; /* accept */
1412 struct dn_flow_set
*
1413 locate_flowset(int fs_nr
)
1415 struct dn_flow_set
*fs
;
1416 SLIST_FOREACH(fs
, &flowsethash
[HASH(fs_nr
)], next
)
1417 if (fs
->fs_nr
== fs_nr
)
1423 static __inline
struct dn_pipe
*
1424 locate_pipe(int pipe_nr
)
1426 struct dn_pipe
*pipe
;
1428 SLIST_FOREACH(pipe
, &pipehash
[HASH(pipe_nr
)], next
)
1429 if (pipe
->pipe_nr
== pipe_nr
)
1438 * dummynet hook for packets. Below 'pipe' is a pipe or a queue
1439 * depending on whether WF2Q or fixed bw is used.
1441 * pipe_nr pipe or queue the packet is destined for.
1442 * dir where shall we send the packet after dummynet.
1443 * m the mbuf with the packet
1444 * ifp the 'ifp' parameter from the caller.
1445 * NULL in ip_input, destination interface in ip_output,
1446 * real_dst in bdg_forward
1447 * ro route parameter (only used in ip_output, NULL otherwise)
1448 * dst destination address, only used by ip_output
1449 * rule matching rule, in case of multiple passes
1450 * flags flags from the caller, only used in ip_output
1454 dummynet_io(struct mbuf
*m
, int pipe_nr
, int dir
, struct ip_fw_args
*fwa
)
1456 struct mbuf
*head
= NULL
, *tail
= NULL
;
1457 struct dn_pkt_tag
*pkt
;
1459 struct dn_flow_set
*fs
= NULL
;
1460 struct dn_pipe
*pipe
;
1461 u_int64_t len
= m
->m_pkthdr
.len
;
1462 struct dn_flow_queue
*q
= NULL
;
1468 ipfw_insn
*cmd
= fwa
->rule
->cmd
+ fwa
->rule
->act_ofs
;
1470 if (cmd
->opcode
== O_LOG
)
1472 is_pipe
= (cmd
->opcode
== O_PIPE
);
1474 is_pipe
= (fwa
->rule
->fw_flg
& IP_FW_F_COMMAND
) == IP_FW_F_PIPE
;
1479 lck_mtx_lock(dn_mutex
);
1481 /* make all time measurements in milliseconds (ms) -
1482 * here we convert secs and usecs to msecs (just divide the
1483 * usecs and take the closest whole number).
1486 curr_time
= (tv
.tv_sec
* 1000) + (tv
.tv_usec
/ 1000);
1489 * This is a dummynet rule, so we expect an O_PIPE or O_QUEUE rule.
1492 pipe
= locate_pipe(pipe_nr
);
1496 fs
= locate_flowset(pipe_nr
);
1500 goto dropit
; /* this queue/pipe does not exist! */
1503 if (pipe
== NULL
) { /* must be a queue, try find a matching pipe */
1504 pipe
= locate_pipe(fs
->parent_nr
);
1509 printf("dummynet: no pipe %d for queue %d, drop pkt\n",
1510 fs
->parent_nr
, fs
->fs_nr
);
1514 q
= find_queue(fs
, &(fwa
->f_id
));
1516 goto dropit
; /* cannot allocate queue */
1518 * update statistics, then check reasons to drop pkt
1520 q
->tot_bytes
+= len
;
1522 if ( fs
->plr
&& (MY_RANDOM
< fs
->plr
) )
1523 goto dropit
; /* random pkt drop */
1524 if ( fs
->flags_fs
& DN_QSIZE_IS_BYTES
) {
1525 if (q
->len_bytes
> fs
->qsize
)
1526 goto dropit
; /* queue size overflow */
1528 if (q
->len
>= fs
->qsize
)
1529 goto dropit
; /* queue count overflow */
1531 if ( fs
->flags_fs
& DN_IS_RED
&& red_drops(fs
, q
, len
) )
1534 /* XXX expensive to zero, see if we can remove it*/
1535 mtag
= m_tag_create(KERNEL_MODULE_TAG_ID
, KERNEL_TAG_TYPE_DUMMYNET
,
1536 sizeof(struct dn_pkt_tag
), M_NOWAIT
, m
);
1538 goto dropit
; /* cannot allocate packet header */
1539 m_tag_prepend(m
, mtag
); /* attach to mbuf chain */
1541 pkt
= (struct dn_pkt_tag
*)(mtag
+1);
1542 bzero(pkt
, sizeof(struct dn_pkt_tag
));
1543 /* ok, i can handle the pkt now... */
1544 /* build and enqueue packet + parameters */
1545 pkt
->rule
= fwa
->rule
;
1548 pkt
->ifp
= fwa
->oif
;
1549 if (dir
== DN_TO_IP_OUT
) {
1551 * We need to copy *ro because for ICMP pkts (and maybe others)
1552 * the caller passed a pointer into the stack; dst might also be
1553 * a pointer into *ro so it needs to be updated.
1555 pkt
->ro
= *(fwa
->ro
);
1557 RT_ADDREF(fwa
->ro
->ro_rt
);
1559 if (fwa
->dst
== (struct sockaddr_in
*)&fwa
->ro
->ro_dst
) /* dst points into ro */
1560 fwa
->dst
= (struct sockaddr_in
*)&(pkt
->ro
.ro_dst
) ;
1562 bcopy (fwa
->dst
, &pkt
->dn_dst
, sizeof(pkt
->dn_dst
));
1563 pkt
->flags
= fwa
->flags
;
1564 if (fwa
->ipoa
!= NULL
)
1565 pkt
->ipoa
= *(fwa
->ipoa
);
1567 if (q
->head
== NULL
)
1570 q
->tail
->m_nextpkt
= m
;
1573 q
->len_bytes
+= len
;
1575 if ( q
->head
!= m
) /* flow was not idle, we are done */
1578 * If we reach this point the flow was previously idle, so we need
1579 * to schedule it. This involves different actions for fixed-rate or
1584 * Fixed-rate queue: just insert into the ready_heap.
1587 if (pipe
->bandwidth
)
1588 t
= SET_TICKS(m
, q
, pipe
);
1589 q
->sched_time
= curr_time
;
1590 if (t
== 0) /* must process it now */
1591 ready_event( q
, &head
, &tail
);
1593 heap_insert(&ready_heap
, curr_time
+ t
, q
);
1596 * WF2Q. First, compute start time S: if the flow was idle (S=F+1)
1597 * set S to the virtual time V for the controlling pipe, and update
1598 * the sum of weights for the pipe; otherwise, remove flow from
1599 * idle_heap and set S to max(F,V).
1600 * Second, compute finish time F = S + len/weight.
1601 * Third, if pipe was idle, update V=max(S, V).
1602 * Fourth, count one more backlogged flow.
1604 if (DN_KEY_GT(q
->S
, q
->F
)) { /* means timestamps are invalid */
1606 pipe
->sum
+= fs
->weight
; /* add weight of new queue */
1608 heap_extract(&(pipe
->idle_heap
), q
);
1609 q
->S
= MAX64(q
->F
, pipe
->V
) ;
1611 q
->F
= q
->S
+ ( len
<<MY_M
)/(u_int64_t
) fs
->weight
;
1613 if (pipe
->not_eligible_heap
.elements
== 0 &&
1614 pipe
->scheduler_heap
.elements
== 0)
1615 pipe
->V
= MAX64 ( q
->S
, pipe
->V
);
1618 * Look at eligibility. A flow is not eligibile if S>V (when
1619 * this happens, it means that there is some other flow already
1620 * scheduled for the same pipe, so the scheduler_heap cannot be
1621 * empty). If the flow is not eligible we just store it in the
1622 * not_eligible_heap. Otherwise, we store in the scheduler_heap
1623 * and possibly invoke ready_event_wfq() right now if there is
1625 * Note that for all flows in scheduler_heap (SCH), S_i <= V,
1626 * and for all flows in not_eligible_heap (NEH), S_i > V .
1627 * So when we need to compute max( V, min(S_i) ) forall i in SCH+NEH,
1628 * we only need to look into NEH.
1630 if (DN_KEY_GT(q
->S
, pipe
->V
) ) { /* not eligible */
1631 if (pipe
->scheduler_heap
.elements
== 0)
1632 printf("dummynet: ++ ouch! not eligible but empty scheduler!\n");
1633 heap_insert(&(pipe
->not_eligible_heap
), q
->S
, q
);
1635 heap_insert(&(pipe
->scheduler_heap
), q
->F
, q
);
1636 if (pipe
->numbytes
>= 0) { /* pipe is idle */
1637 if (pipe
->scheduler_heap
.elements
!= 1)
1638 printf("dummynet: OUCH! pipe should have been idle!\n");
1639 DPRINTF(("dummynet: waking up pipe %d at %d\n",
1640 pipe
->pipe_nr
, (int)(q
->F
>> MY_M
)));
1641 pipe
->sched_time
= curr_time
;
1642 ready_event_wfq(pipe
, &head
, &tail
);
1647 /* start the timer and set global if not already set */
1648 if (!timer_enabled
) {
1650 ts
.tv_nsec
= 1 * 1000000; // 1ms
1652 bsd_timeout(dummynet
, NULL
, &ts
);
1655 lck_mtx_unlock(dn_mutex
);
1658 dummynet_send(head
);
1665 lck_mtx_unlock(dn_mutex
);
1667 return ( (fs
&& (fs
->flags_fs
& DN_NOERROR
)) ? 0 : ENOBUFS
);
1671 * Below, the rtfree is only needed when (pkt->dn_dir == DN_TO_IP_OUT)
1672 * Doing this would probably save us the initial bzero of dn_pkt
1674 #define DN_FREE_PKT(_m) do { \
1675 struct m_tag *tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DUMMYNET, NULL); \
1677 struct dn_pkt_tag *n = (struct dn_pkt_tag *)(tag+1); \
1678 if (n->ro.ro_rt != NULL) { \
1679 rtfree(n->ro.ro_rt); \
1680 n->ro.ro_rt = NULL; \
1683 m_tag_delete(_m, tag); \
1688 * Dispose all packets and flow_queues on a flow_set.
1689 * If all=1, also remove red lookup table and other storage,
1690 * including the descriptor itself.
1691 * For the one in dn_pipe MUST also cleanup ready_heap...
1694 purge_flow_set(struct dn_flow_set
*fs
, int all
)
1696 struct dn_flow_queue
*q
, *qn
;
1699 lck_mtx_assert(dn_mutex
, LCK_MTX_ASSERT_OWNED
);
1701 for (i
= 0 ; i
<= fs
->rq_size
; i
++ ) {
1702 for (q
= fs
->rq
[i
] ; q
; q
= qn
) {
1703 struct mbuf
*m
, *mnext
;
1706 while ((m
= mnext
) != NULL
) {
1707 mnext
= m
->m_nextpkt
;
1711 FREE(q
, M_DUMMYNET
);
1715 fs
->rq_elements
= 0 ;
1717 /* RED - free lookup table */
1719 FREE(fs
->w_q_lookup
, M_DUMMYNET
);
1721 FREE(fs
->rq
, M_DUMMYNET
);
1722 /* if this fs is not part of a pipe, free it */
1723 if (fs
->pipe
&& fs
!= &(fs
->pipe
->fs
) )
1724 FREE(fs
, M_DUMMYNET
);
1729 * Dispose all packets queued on a pipe (not a flow_set).
1730 * Also free all resources associated to a pipe, which is about
1734 purge_pipe(struct dn_pipe
*pipe
)
1736 struct mbuf
*m
, *mnext
;
1738 purge_flow_set( &(pipe
->fs
), 1 );
1741 while ((m
= mnext
) != NULL
) {
1742 mnext
= m
->m_nextpkt
;
1746 heap_free( &(pipe
->scheduler_heap
) );
1747 heap_free( &(pipe
->not_eligible_heap
) );
1748 heap_free( &(pipe
->idle_heap
) );
1752 * Delete all pipes and heaps returning memory. Must also
1753 * remove references from all ipfw rules to all pipes.
1756 dummynet_flush(void)
1758 struct dn_pipe
*pipe
, *pipe1
;
1759 struct dn_flow_set
*fs
, *fs1
;
1762 lck_mtx_lock(dn_mutex
);
1764 /* remove all references to pipes ...*/
1765 flush_pipe_ptrs(NULL
);
1767 /* Free heaps so we don't have unwanted events. */
1768 heap_free(&ready_heap
);
1769 heap_free(&wfq_ready_heap
);
1770 heap_free(&extract_heap
);
1773 * Now purge all queued pkts and delete all pipes.
1775 * XXXGL: can we merge the for(;;) cycles into one or not?
1777 for (i
= 0; i
< HASHSIZE
; i
++)
1778 SLIST_FOREACH_SAFE(fs
, &flowsethash
[i
], next
, fs1
) {
1779 SLIST_REMOVE(&flowsethash
[i
], fs
, dn_flow_set
, next
);
1780 purge_flow_set(fs
, 1);
1782 for (i
= 0; i
< HASHSIZE
; i
++)
1783 SLIST_FOREACH_SAFE(pipe
, &pipehash
[i
], next
, pipe1
) {
1784 SLIST_REMOVE(&pipehash
[i
], pipe
, dn_pipe
, next
);
1786 FREE(pipe
, M_DUMMYNET
);
1788 lck_mtx_unlock(dn_mutex
);
1792 extern struct ip_fw
*ip_fw_default_rule
;
1794 dn_rule_delete_fs(struct dn_flow_set
*fs
, void *r
)
1797 struct dn_flow_queue
*q
;
1800 for (i
= 0 ; i
<= fs
->rq_size
; i
++) /* last one is ovflow */
1801 for (q
= fs
->rq
[i
] ; q
; q
= q
->next
)
1802 for (m
= q
->head
; m
; m
= m
->m_nextpkt
) {
1803 struct dn_pkt_tag
*pkt
= dn_tag_get(m
) ;
1805 pkt
->rule
= ip_fw_default_rule
;
1809 * when a firewall rule is deleted, scan all queues and remove the flow-id
1810 * from packets matching this rule.
1813 dn_rule_delete(void *r
)
1816 struct dn_flow_set
*fs
;
1817 struct dn_pkt_tag
*pkt
;
1821 lck_mtx_lock(dn_mutex
);
1824 * If the rule references a queue (dn_flow_set), then scan
1825 * the flow set, otherwise scan pipes. Should do either, but doing
1826 * both does not harm.
1828 for (i
= 0; i
< HASHSIZE
; i
++)
1829 SLIST_FOREACH(fs
, &flowsethash
[i
], next
)
1830 dn_rule_delete_fs(fs
, r
);
1832 for (i
= 0; i
< HASHSIZE
; i
++)
1833 SLIST_FOREACH(p
, &pipehash
[i
], next
) {
1835 dn_rule_delete_fs(fs
, r
);
1836 for (m
= p
->head
; m
; m
= m
->m_nextpkt
) {
1837 pkt
= dn_tag_get(m
);
1839 pkt
->rule
= ip_fw_default_rule
;
1842 lck_mtx_unlock(dn_mutex
);
1846 * setup RED parameters
1849 config_red(struct dn_flow_set
*p
, struct dn_flow_set
* x
)
1854 x
->min_th
= SCALE(p
->min_th
);
1855 x
->max_th
= SCALE(p
->max_th
);
1856 x
->max_p
= p
->max_p
;
1858 x
->c_1
= p
->max_p
/ (p
->max_th
- p
->min_th
);
1859 x
->c_2
= SCALE_MUL(x
->c_1
, SCALE(p
->min_th
));
1860 if (x
->flags_fs
& DN_IS_GENTLE_RED
) {
1861 x
->c_3
= (SCALE(1) - p
->max_p
) / p
->max_th
;
1862 x
->c_4
= (SCALE(1) - 2 * p
->max_p
);
1865 /* if the lookup table already exist, free and create it again */
1866 if (x
->w_q_lookup
) {
1867 FREE(x
->w_q_lookup
, M_DUMMYNET
);
1868 x
->w_q_lookup
= NULL
;
1870 if (red_lookup_depth
== 0) {
1871 printf("\ndummynet: net.inet.ip.dummynet.red_lookup_depth must be > 0\n");
1872 FREE(x
, M_DUMMYNET
);
1875 x
->lookup_depth
= red_lookup_depth
;
1876 x
->w_q_lookup
= (u_int
*) _MALLOC(x
->lookup_depth
* sizeof(int),
1877 M_DUMMYNET
, M_DONTWAIT
);
1878 if (x
->w_q_lookup
== NULL
) {
1879 printf("dummynet: sorry, cannot allocate red lookup table\n");
1880 FREE(x
, M_DUMMYNET
);
1884 /* fill the lookup table with (1 - w_q)^x */
1885 x
->lookup_step
= p
->lookup_step
;
1886 x
->lookup_weight
= p
->lookup_weight
;
1887 x
->w_q_lookup
[0] = SCALE(1) - x
->w_q
;
1888 for (i
= 1; i
< x
->lookup_depth
; i
++)
1889 x
->w_q_lookup
[i
] = SCALE_MUL(x
->w_q_lookup
[i
- 1], x
->lookup_weight
);
1890 if (red_avg_pkt_size
< 1)
1891 red_avg_pkt_size
= 512 ;
1892 x
->avg_pkt_size
= red_avg_pkt_size
;
1893 if (red_max_pkt_size
< 1)
1894 red_max_pkt_size
= 1500 ;
1895 x
->max_pkt_size
= red_max_pkt_size
;
1900 alloc_hash(struct dn_flow_set
*x
, struct dn_flow_set
*pfs
)
1902 if (x
->flags_fs
& DN_HAVE_FLOW_MASK
) { /* allocate some slots */
1903 int l
= pfs
->rq_size
;
1909 else if (l
> DN_MAX_HASH_SIZE
)
1910 l
= DN_MAX_HASH_SIZE
;
1912 } else /* one is enough for null mask */
1914 x
->rq
= _MALLOC((1 + x
->rq_size
) * sizeof(struct dn_flow_queue
*),
1915 M_DUMMYNET
, M_DONTWAIT
| M_ZERO
);
1916 if (x
->rq
== NULL
) {
1917 printf("dummynet: sorry, cannot allocate queue\n");
1925 set_fs_parms(struct dn_flow_set
*x
, struct dn_flow_set
*src
)
1927 x
->flags_fs
= src
->flags_fs
;
1928 x
->qsize
= src
->qsize
;
1930 x
->flow_mask
= src
->flow_mask
;
1931 if (x
->flags_fs
& DN_QSIZE_IS_BYTES
) {
1932 if (x
->qsize
> 1024*1024)
1933 x
->qsize
= 1024*1024 ;
1940 /* configuring RED */
1941 if ( x
->flags_fs
& DN_IS_RED
)
1942 config_red(src
, x
) ; /* XXX should check errors */
1946 * setup pipe or queue parameters.
1950 config_pipe(struct dn_pipe
*p
)
1953 struct dn_flow_set
*pfs
= &(p
->fs
);
1954 struct dn_flow_queue
*q
;
1957 * The config program passes parameters as follows:
1958 * bw = bits/second (0 means no limits),
1959 * delay = ms, must be translated into ticks.
1960 * qsize = slots/bytes
1962 p
->delay
= ( p
->delay
* (hz
*10) ) / 1000 ;
1963 /* We need either a pipe number or a flow_set number */
1964 if (p
->pipe_nr
== 0 && pfs
->fs_nr
== 0)
1966 if (p
->pipe_nr
!= 0 && pfs
->fs_nr
!= 0)
1968 if (p
->pipe_nr
!= 0) { /* this is a pipe */
1969 struct dn_pipe
*x
, *b
;
1971 lck_mtx_lock(dn_mutex
);
1974 b
= locate_pipe(p
->pipe_nr
);
1976 if (b
== NULL
|| b
->pipe_nr
!= p
->pipe_nr
) { /* new pipe */
1977 x
= _MALLOC(sizeof(struct dn_pipe
), M_DUMMYNET
, M_DONTWAIT
| M_ZERO
) ;
1979 lck_mtx_unlock(dn_mutex
);
1980 printf("dummynet: no memory for new pipe\n");
1983 x
->pipe_nr
= p
->pipe_nr
;
1985 /* idle_heap is the only one from which we extract from the middle.
1987 x
->idle_heap
.size
= x
->idle_heap
.elements
= 0 ;
1988 x
->idle_heap
.offset
=offsetof(struct dn_flow_queue
, heap_pos
);
1991 /* Flush accumulated credit for all queues */
1992 for (i
= 0; i
<= x
->fs
.rq_size
; i
++)
1993 for (q
= x
->fs
.rq
[i
]; q
; q
= q
->next
)
1997 x
->bandwidth
= p
->bandwidth
;
1998 x
->numbytes
= 0; /* just in case... */
1999 bcopy(p
->if_name
, x
->if_name
, sizeof(p
->if_name
) );
2000 x
->ifp
= NULL
; /* reset interface ptr */
2001 x
->delay
= p
->delay
;
2002 set_fs_parms(&(x
->fs
), pfs
);
2005 if ( x
->fs
.rq
== NULL
) { /* a new pipe */
2006 r
= alloc_hash(&(x
->fs
), pfs
) ;
2008 lck_mtx_unlock(dn_mutex
);
2009 FREE(x
, M_DUMMYNET
);
2012 SLIST_INSERT_HEAD(&pipehash
[HASH(x
->pipe_nr
)],
2015 lck_mtx_unlock(dn_mutex
);
2016 } else { /* config queue */
2017 struct dn_flow_set
*x
, *b
;
2019 lck_mtx_lock(dn_mutex
);
2020 /* locate flow_set */
2021 b
= locate_flowset(pfs
->fs_nr
);
2023 if (b
== NULL
|| b
->fs_nr
!= pfs
->fs_nr
) { /* new */
2024 if (pfs
->parent_nr
== 0) { /* need link to a pipe */
2025 lck_mtx_unlock(dn_mutex
);
2028 x
= _MALLOC(sizeof(struct dn_flow_set
), M_DUMMYNET
, M_DONTWAIT
| M_ZERO
);
2030 lck_mtx_unlock(dn_mutex
);
2031 printf("dummynet: no memory for new flow_set\n");
2034 x
->fs_nr
= pfs
->fs_nr
;
2035 x
->parent_nr
= pfs
->parent_nr
;
2036 x
->weight
= pfs
->weight
;
2039 else if (x
->weight
> 100)
2042 /* Change parent pipe not allowed; must delete and recreate */
2043 if (pfs
->parent_nr
!= 0 && b
->parent_nr
!= pfs
->parent_nr
) {
2044 lck_mtx_unlock(dn_mutex
);
2049 set_fs_parms(x
, pfs
);
2051 if ( x
->rq
== NULL
) { /* a new flow_set */
2052 r
= alloc_hash(x
, pfs
) ;
2054 lck_mtx_unlock(dn_mutex
);
2055 FREE(x
, M_DUMMYNET
);
2058 SLIST_INSERT_HEAD(&flowsethash
[HASH(x
->fs_nr
)],
2061 lck_mtx_unlock(dn_mutex
);
2067 * Helper function to remove from a heap queues which are linked to
2068 * a flow_set about to be deleted.
2071 fs_remove_from_heap(struct dn_heap
*h
, struct dn_flow_set
*fs
)
2073 int i
= 0, found
= 0 ;
2074 for (; i
< h
->elements
;)
2075 if ( ((struct dn_flow_queue
*)h
->p
[i
].object
)->fs
== fs
) {
2077 h
->p
[i
] = h
->p
[h
->elements
] ;
2086 * helper function to remove a pipe from a heap (can be there at most once)
2089 pipe_remove_from_heap(struct dn_heap
*h
, struct dn_pipe
*p
)
2091 if (h
->elements
> 0) {
2093 for (i
=0; i
< h
->elements
; i
++ ) {
2094 if (h
->p
[i
].object
== p
) { /* found it */
2096 h
->p
[i
] = h
->p
[h
->elements
] ;
2105 * drain all queues. Called in case of severe mbuf shortage.
2108 dummynet_drain(void)
2110 struct dn_flow_set
*fs
;
2112 struct mbuf
*m
, *mnext
;
2115 lck_mtx_assert(dn_mutex
, LCK_MTX_ASSERT_OWNED
);
2117 heap_free(&ready_heap
);
2118 heap_free(&wfq_ready_heap
);
2119 heap_free(&extract_heap
);
2120 /* remove all references to this pipe from flow_sets */
2121 for (i
= 0; i
< HASHSIZE
; i
++)
2122 SLIST_FOREACH(fs
, &flowsethash
[i
], next
)
2123 purge_flow_set(fs
, 0);
2125 for (i
= 0; i
< HASHSIZE
; i
++)
2126 SLIST_FOREACH(p
, &pipehash
[i
], next
) {
2127 purge_flow_set(&(p
->fs
), 0);
2130 while ((m
= mnext
) != NULL
) {
2131 mnext
= m
->m_nextpkt
;
2134 p
->head
= p
->tail
= NULL
;
2139 * Fully delete a pipe or a queue, cleaning up associated info.
2142 delete_pipe(struct dn_pipe
*p
)
2144 if (p
->pipe_nr
== 0 && p
->fs
.fs_nr
== 0)
2146 if (p
->pipe_nr
!= 0 && p
->fs
.fs_nr
!= 0)
2148 if (p
->pipe_nr
!= 0) { /* this is an old-style pipe */
2150 struct dn_flow_set
*fs
;
2153 lck_mtx_lock(dn_mutex
);
2155 b
= locate_pipe(p
->pipe_nr
);
2157 lck_mtx_unlock(dn_mutex
);
2158 return EINVAL
; /* not found */
2161 /* Unlink from list of pipes. */
2162 SLIST_REMOVE(&pipehash
[HASH(b
->pipe_nr
)], b
, dn_pipe
, next
);
2164 /* remove references to this pipe from the ip_fw rules. */
2165 flush_pipe_ptrs(&(b
->fs
));
2167 /* Remove all references to this pipe from flow_sets. */
2168 for (i
= 0; i
< HASHSIZE
; i
++)
2169 SLIST_FOREACH(fs
, &flowsethash
[i
], next
)
2170 if (fs
->pipe
== b
) {
2171 printf("dummynet: ++ ref to pipe %d from fs %d\n",
2172 p
->pipe_nr
, fs
->fs_nr
);
2174 purge_flow_set(fs
, 0);
2176 fs_remove_from_heap(&ready_heap
, &(b
->fs
));
2178 purge_pipe(b
); /* remove all data associated to this pipe */
2179 /* remove reference to here from extract_heap and wfq_ready_heap */
2180 pipe_remove_from_heap(&extract_heap
, b
);
2181 pipe_remove_from_heap(&wfq_ready_heap
, b
);
2182 lck_mtx_unlock(dn_mutex
);
2184 FREE(b
, M_DUMMYNET
);
2185 } else { /* this is a WF2Q queue (dn_flow_set) */
2186 struct dn_flow_set
*b
;
2188 lck_mtx_lock(dn_mutex
);
2190 b
= locate_flowset(p
->fs
.fs_nr
);
2192 lck_mtx_unlock(dn_mutex
);
2193 return EINVAL
; /* not found */
2196 /* remove references to this flow_set from the ip_fw rules. */
2199 /* Unlink from list of flowsets. */
2200 SLIST_REMOVE( &flowsethash
[HASH(b
->fs_nr
)], b
, dn_flow_set
, next
);
2202 if (b
->pipe
!= NULL
) {
2203 /* Update total weight on parent pipe and cleanup parent heaps */
2204 b
->pipe
->sum
-= b
->weight
* b
->backlogged
;
2205 fs_remove_from_heap(&(b
->pipe
->not_eligible_heap
), b
);
2206 fs_remove_from_heap(&(b
->pipe
->scheduler_heap
), b
);
2207 #if 1 /* XXX should i remove from idle_heap as well ? */
2208 fs_remove_from_heap(&(b
->pipe
->idle_heap
), b
);
2211 purge_flow_set(b
, 1);
2212 lck_mtx_unlock(dn_mutex
);
2218 * helper function used to copy data from kernel in DUMMYNET_GET
2221 char* dn_copy_set_32(struct dn_flow_set
*set
, char *bp
)
2224 struct dn_flow_queue
*q
;
2225 struct dn_flow_queue_32
*qp
= (struct dn_flow_queue_32
*)bp
;
2227 lck_mtx_assert(dn_mutex
, LCK_MTX_ASSERT_OWNED
);
2229 for (i
= 0 ; i
<= set
->rq_size
; i
++)
2230 for (q
= set
->rq
[i
] ; q
; q
= q
->next
, qp
++ ) {
2231 if (q
->hash_slot
!= i
)
2232 printf("dummynet: ++ at %d: wrong slot (have %d, "
2233 "should be %d)\n", copied
, q
->hash_slot
, i
);
2235 printf("dummynet: ++ at %d: wrong fs ptr (have %p, should be %p)\n",
2238 cp_queue_to_32_user( q
, qp
);
2239 /* cleanup pointers */
2240 qp
->next
= (user32_addr_t
)0 ;
2241 qp
->head
= qp
->tail
= (user32_addr_t
)0 ;
2242 qp
->fs
= (user32_addr_t
)0 ;
2244 if (copied
!= set
->rq_elements
)
2245 printf("dummynet: ++ wrong count, have %d should be %d\n",
2246 copied
, set
->rq_elements
);
2251 char* dn_copy_set_64(struct dn_flow_set
*set
, char *bp
)
2254 struct dn_flow_queue
*q
;
2255 struct dn_flow_queue_64
*qp
= (struct dn_flow_queue_64
*)bp
;
2257 lck_mtx_assert(dn_mutex
, LCK_MTX_ASSERT_OWNED
);
2259 for (i
= 0 ; i
<= set
->rq_size
; i
++)
2260 for (q
= set
->rq
[i
] ; q
; q
= q
->next
, qp
++ ) {
2261 if (q
->hash_slot
!= i
)
2262 printf("dummynet: ++ at %d: wrong slot (have %d, "
2263 "should be %d)\n", copied
, q
->hash_slot
, i
);
2265 printf("dummynet: ++ at %d: wrong fs ptr (have %p, should be %p)\n",
2268 //bcopy(q, qp, sizeof(*q));
2269 cp_queue_to_64_user( q
, qp
);
2270 /* cleanup pointers */
2271 qp
->next
= USER_ADDR_NULL
;
2272 qp
->head
= qp
->tail
= USER_ADDR_NULL
;
2273 qp
->fs
= USER_ADDR_NULL
;
2275 if (copied
!= set
->rq_elements
)
2276 printf("dummynet: ++ wrong count, have %d should be %d\n",
2277 copied
, set
->rq_elements
);
2282 dn_calc_size(int is64user
)
2284 struct dn_flow_set
*set
;
2292 lck_mtx_assert(dn_mutex
, LCK_MTX_ASSERT_OWNED
);
2294 pipesize
= sizeof(struct dn_pipe_64
);
2295 queuesize
= sizeof(struct dn_flow_queue_64
);
2296 setsize
= sizeof(struct dn_flow_set_64
);
2299 pipesize
= sizeof(struct dn_pipe_32
);
2300 queuesize
= sizeof( struct dn_flow_queue_32
);
2301 setsize
= sizeof(struct dn_flow_set_32
);
2304 * compute size of data structures: list of pipes and flow_sets.
2306 for (i
= 0; i
< HASHSIZE
; i
++) {
2307 SLIST_FOREACH(p
, &pipehash
[i
], next
)
2308 size
+= sizeof(*p
) +
2309 p
->fs
.rq_elements
* sizeof(struct dn_flow_queue
);
2310 SLIST_FOREACH(set
, &flowsethash
[i
], next
)
2311 size
+= sizeof (*set
) +
2312 set
->rq_elements
* sizeof(struct dn_flow_queue
);
2318 dummynet_get(struct sockopt
*sopt
)
2320 char *buf
, *bp
=NULL
; /* bp is the "copy-pointer" */
2322 struct dn_flow_set
*set
;
2327 /* XXX lock held too long */
2328 lck_mtx_lock(dn_mutex
);
2330 * XXX: Ugly, but we need to allocate memory with M_WAITOK flag and we
2331 * cannot use this flag while holding a mutex.
2333 if (proc_is64bit(sopt
->sopt_p
))
2335 for (i
= 0; i
< 10; i
++) {
2336 size
= dn_calc_size(is64user
);
2337 lck_mtx_unlock(dn_mutex
);
2338 buf
= _MALLOC(size
, M_TEMP
, M_WAITOK
);
2341 lck_mtx_lock(dn_mutex
);
2342 if (size
== dn_calc_size(is64user
))
2348 lck_mtx_unlock(dn_mutex
);
2354 for (i
= 0; i
< HASHSIZE
; i
++)
2355 SLIST_FOREACH(p
, &pipehash
[i
], next
) {
2357 * copy pipe descriptor into *bp, convert delay back to ms,
2358 * then copy the flow_set descriptor(s) one at a time.
2359 * After each flow_set, copy the queue descriptor it owns.
2362 bp
= cp_pipe_to_64_user(p
, (struct dn_pipe_64
*)bp
);
2365 bp
= cp_pipe_to_32_user(p
, (struct dn_pipe_32
*)bp
);
2368 for (i
= 0; i
< HASHSIZE
; i
++)
2369 SLIST_FOREACH(set
, &flowsethash
[i
], next
) {
2370 struct dn_flow_set_64
*fs_bp
= (struct dn_flow_set_64
*)bp
;
2371 cp_flow_set_to_64_user(set
, fs_bp
);
2372 /* XXX same hack as above */
2373 fs_bp
->next
= CAST_DOWN(user64_addr_t
, DN_IS_QUEUE
);
2374 fs_bp
->pipe
= USER_ADDR_NULL
;
2375 fs_bp
->rq
= USER_ADDR_NULL
;
2376 bp
+= sizeof(struct dn_flow_set_64
);
2377 bp
= dn_copy_set_64( set
, bp
);
2379 lck_mtx_unlock(dn_mutex
);
2381 error
= sooptcopyout(sopt
, buf
, size
);
2387 * Handler for the various dummynet socket options (get, flush, config, del)
2390 ip_dn_ctl(struct sockopt
*sopt
)
2393 struct dn_pipe
*p
, tmp_pipe
;
2395 /* Disallow sets in really-really secure mode. */
2396 if (sopt
->sopt_dir
== SOPT_SET
&& securelevel
>= 3)
2399 switch (sopt
->sopt_name
) {
2401 printf("dummynet: -- unknown option %d", sopt
->sopt_name
);
2404 case IP_DUMMYNET_GET
:
2405 error
= dummynet_get(sopt
);
2408 case IP_DUMMYNET_FLUSH
:
2412 case IP_DUMMYNET_CONFIGURE
:
2414 if (proc_is64bit(sopt
->sopt_p
))
2415 error
= cp_pipe_from_user_64( sopt
, p
);
2417 error
= cp_pipe_from_user_32( sopt
, p
);
2421 error
= config_pipe(p
);
2424 case IP_DUMMYNET_DEL
: /* remove a pipe or queue */
2426 if (proc_is64bit(sopt
->sopt_p
))
2427 error
= cp_pipe_from_user_64( sopt
, p
);
2429 error
= cp_pipe_from_user_32( sopt
, p
);
2433 error
= delete_pipe(p
);
2443 dn_mutex_grp_attr
= lck_grp_attr_alloc_init();
2444 dn_mutex_grp
= lck_grp_alloc_init("dn", dn_mutex_grp_attr
);
2445 dn_mutex_attr
= lck_attr_alloc_init();
2447 if ((dn_mutex
= lck_mtx_alloc_init(dn_mutex_grp
, dn_mutex_attr
)) == NULL
) {
2448 printf("ip_dn_init: can't alloc dn_mutex\n");
2452 ready_heap
.size
= ready_heap
.elements
= 0 ;
2453 ready_heap
.offset
= 0 ;
2455 wfq_ready_heap
.size
= wfq_ready_heap
.elements
= 0 ;
2456 wfq_ready_heap
.offset
= 0 ;
2458 extract_heap
.size
= extract_heap
.elements
= 0 ;
2459 extract_heap
.offset
= 0 ;
2460 ip_dn_ctl_ptr
= ip_dn_ctl
;
2461 ip_dn_io_ptr
= dummynet_io
;
2462 ip_dn_ruledel_ptr
= dn_rule_delete
;