X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/b0d623f7f2ae71ed96e60569f61f9a9a27016e80..0a7de7458d150b5d4dffc935ba399be265ef0a1a:/bsd/netinet/ip_dummynet.c?ds=sidebyside diff --git a/bsd/netinet/ip_dummynet.c b/bsd/netinet/ip_dummynet.c index 090c692bc..c9f566822 100644 --- a/bsd/netinet/ip_dummynet.c +++ b/bsd/netinet/ip_dummynet.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2008 Apple Inc. All rights reserved. + * Copyright (c) 2000-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -85,6 +85,7 @@ #include #include /* XXX */ #include +#include #include #include #include @@ -92,6 +93,11 @@ #include #include #include +#if DUMMYNET +#include +#endif /* DUMMYNET */ +#include +#include #include #include #include @@ -100,10 +106,10 @@ #include #include -#if BRIDGE -#include /* for struct arpcom */ -#include -#endif +#include /* for ip6_input, ip6_output prototypes */ +#include + +static struct ip_fw default_rule; /* * We keep a private variable for the simulation time, but we could @@ -113,7 +119,7 @@ static dn_key curr_time = 0 ; /* current simulation time */ /* this is for the timer that fires to call dummynet() - we only enable the timer when there are packets to process, otherwise it's disabled */ -static int timer_enabled = 0; +static int timer_enabled = 0; static int dn_hash_size = 64 ; /* default hash size */ @@ -126,6 +132,8 @@ static int red_lookup_depth = 256; /* RED - default lookup table depth */ static int red_avg_pkt_size = 512; /* RED - default medium packet size */ static int red_max_pkt_size = 1500; /* RED - default max packet size */ +static int serialize = 0; + /* * Three heaps contain queues and pipes that the scheduler handles: * @@ -150,54 +158,50 @@ static void ready_event(struct dn_flow_queue *q, struct mbuf **head, static void ready_event_wfq(struct dn_pipe *p, struct mbuf **head, struct mbuf **tail); -/* +/* * Packets are retrieved from queues in Dummynet in chains instead of * packet-by-packet. The entire list of packets is first dequeued and * sent out by the following function. */ static void dummynet_send(struct mbuf *m); -/* Flag to signify the existance of a dequeued packet chain */ -static int serialize = 0; - #define HASHSIZE 16 #define HASH(num) ((((num) >> 8) ^ ((num) >> 4) ^ (num)) & 0x0f) static struct dn_pipe_head pipehash[HASHSIZE]; /* all pipes */ static struct dn_flow_set_head flowsethash[HASHSIZE]; /* all flowsets */ - #ifdef SYSCTL_NODE SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, - CTLFLAG_RW, 0, "Dummynet"); + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Dummynet"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size, - CTLFLAG_RW, &dn_hash_size, 0, "Default hash table size"); + CTLFLAG_RW | CTLFLAG_LOCKED, &dn_hash_size, 0, "Default hash table size"); SYSCTL_QUAD(_net_inet_ip_dummynet, OID_AUTO, curr_time, - CTLFLAG_RD, &curr_time, "Current tick"); + CTLFLAG_RD | CTLFLAG_LOCKED, &curr_time, "Current tick"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, ready_heap, - CTLFLAG_RD, &ready_heap.size, 0, "Size of ready heap"); + CTLFLAG_RD | CTLFLAG_LOCKED, &ready_heap.size, 0, "Size of ready heap"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, extract_heap, - CTLFLAG_RD, &extract_heap.size, 0, "Size of extract heap"); + CTLFLAG_RD | CTLFLAG_LOCKED, &extract_heap.size, 0, "Size of extract heap"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, searches, - CTLFLAG_RD, &searches, 0, "Number of queue searches"); + CTLFLAG_RD | CTLFLAG_LOCKED, &searches, 0, "Number of queue searches"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, search_steps, - CTLFLAG_RD, &search_steps, 0, "Number of queue search steps"); + CTLFLAG_RD | CTLFLAG_LOCKED, &search_steps, 0, "Number of queue search steps"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire, - CTLFLAG_RW, &pipe_expire, 0, "Expire queue if empty"); + CTLFLAG_RW | CTLFLAG_LOCKED, &pipe_expire, 0, "Expire queue if empty"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len, - CTLFLAG_RW, &dn_max_ratio, 0, + CTLFLAG_RW | CTLFLAG_LOCKED, &dn_max_ratio, 0, "Max ratio between dynamic queues and buckets"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth, - CTLFLAG_RD, &red_lookup_depth, 0, "Depth of RED lookup table"); + CTLFLAG_RD | CTLFLAG_LOCKED, &red_lookup_depth, 0, "Depth of RED lookup table"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size, - CTLFLAG_RD, &red_avg_pkt_size, 0, "RED Medium packet size"); + CTLFLAG_RD | CTLFLAG_LOCKED, &red_avg_pkt_size, 0, "RED Medium packet size"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size, - CTLFLAG_RD, &red_max_pkt_size, 0, "RED Max packet size"); + CTLFLAG_RD | CTLFLAG_LOCKED, &red_max_pkt_size, 0, "RED Max packet size"); #endif #ifdef DUMMYNET_DEBUG int dummynet_debug = 0; #ifdef SYSCTL_NODE -SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug, CTLFLAG_RW, &dummynet_debug, +SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED, &dummynet_debug, 0, "control debugging printfs"); #endif #define DPRINTF(X) if (dummynet_debug) printf X @@ -205,18 +209,12 @@ SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug, CTLFLAG_RW, &dummynet_debug, #define DPRINTF(X) #endif -/* contrary to the comment above random(), it does not actually - * return a value [0, 2^31 - 1], which breaks plr amongst other - * things. Masking it should work even if the behavior of - * the function is fixed. - */ -#define MY_RANDOM (random() & 0x7FFFFFFF) - /* dummynet lock */ static lck_grp_t *dn_mutex_grp; static lck_grp_attr_t *dn_mutex_grp_attr; static lck_attr_t *dn_mutex_attr; -static lck_mtx_t *dn_mutex; +decl_lck_mtx_data(static, dn_mutex_data); +static lck_mtx_t *dn_mutex = &dn_mutex_data; static int config_pipe(struct dn_pipe *p); static int ip_dn_ctl(struct sockopt *sopt); @@ -225,9 +223,6 @@ static void dummynet(void *); static void dummynet_flush(void); void dummynet_drain(void); static ip_dn_io_t dummynet_io; -static void dn_rule_delete(void *); - -int if_tx_rdy(struct ifnet *ifp); static void cp_flow_set_to_64_user(struct dn_flow_set *set, struct dn_flow_set_64 *fs_bp); static void cp_queue_to_64_user( struct dn_flow_queue *q, struct dn_flow_queue_64 *qp); @@ -241,6 +236,16 @@ static char *cp_pipe_to_32_user(struct dn_pipe *p, struct dn_pipe_32 *pipe_bp); static char* dn_copy_set_32(struct dn_flow_set *set, char *bp); static int cp_pipe_from_user_32( struct sockopt *sopt, struct dn_pipe *p ); +struct eventhandler_lists_ctxt dummynet_evhdlr_ctxt; + +uint32_t my_random(void) +{ + uint32_t val; + read_frandom(&val, sizeof(val)); + val &= 0x7FFFFFFF; + + return (val); +} /* * Heap management functions. @@ -266,7 +271,7 @@ int cp_pipe_from_user_32( struct sockopt *sopt, struct dn_pipe *p ) { struct dn_pipe_32 user_pipe_32; int error=0; - + error = sooptcopyin(sopt, &user_pipe_32, sizeof(struct dn_pipe_32), sizeof(struct dn_pipe_32)); if ( !error ){ p->pipe_nr = user_pipe_32.pipe_nr; @@ -278,7 +283,7 @@ int cp_pipe_from_user_32( struct sockopt *sopt, struct dn_pipe *p ) p->sched_time = user_pipe_32.sched_time; bcopy( user_pipe_32.if_name, p->if_name, IFNAMSIZ); p->ready = user_pipe_32.ready; - + p->fs.fs_nr = user_pipe_32.fs.fs_nr; p->fs.flags_fs = user_pipe_32.fs.flags_fs; p->fs.parent_nr = user_pipe_32.fs.parent_nr; @@ -312,7 +317,7 @@ int cp_pipe_from_user_64( struct sockopt *sopt, struct dn_pipe *p ) { struct dn_pipe_64 user_pipe_64; int error=0; - + error = sooptcopyin(sopt, &user_pipe_64, sizeof(struct dn_pipe_64), sizeof(struct dn_pipe_64)); if ( !error ){ p->pipe_nr = user_pipe_64.pipe_nr; @@ -324,7 +329,7 @@ int cp_pipe_from_user_64( struct sockopt *sopt, struct dn_pipe *p ) p->sched_time = user_pipe_64.sched_time; bcopy( user_pipe_64.if_name, p->if_name, IFNAMSIZ); p->ready = user_pipe_64.ready; - + p->fs.fs_nr = user_pipe_64.fs.fs_nr; p->fs.flags_fs = user_pipe_64.fs.flags_fs; p->fs.parent_nr = user_pipe_64.fs.parent_nr; @@ -459,9 +464,10 @@ static char *cp_pipe_to_32_user(struct dn_pipe *p, struct dn_pipe_32 *pipe_bp) { char *bp; - + pipe_bp->pipe_nr = p->pipe_nr; pipe_bp->bandwidth = p->bandwidth; + pipe_bp->delay = p->delay; bcopy( &(p->scheduler_heap), &(pipe_bp->scheduler_heap), sizeof(struct dn_heap_32)); pipe_bp->scheduler_heap.p = CAST_DOWN_EXPLICIT(user32_addr_t, pipe_bp->scheduler_heap.p); bcopy( &(p->not_eligible_heap), &(pipe_bp->not_eligible_heap), sizeof(struct dn_heap_32)); @@ -475,10 +481,10 @@ char *cp_pipe_to_32_user(struct dn_pipe *p, struct dn_pipe_32 *pipe_bp) bcopy( p->if_name, pipe_bp->if_name, IFNAMSIZ); pipe_bp->ifp = CAST_DOWN_EXPLICIT(user32_addr_t, p->ifp); pipe_bp->ready = p->ready; - + cp_flow_set_to_32_user( &(p->fs), &(pipe_bp->fs)); - - pipe_bp->delay = (pipe_bp->delay * 1000) / (hz*10) ; + + pipe_bp->delay = (pipe_bp->delay * 1000) / (hz*10) ; /* * XXX the following is a hack based on ->next being the * first field in dn_pipe and dn_flow_set. The correct @@ -499,9 +505,10 @@ static char *cp_pipe_to_64_user(struct dn_pipe *p, struct dn_pipe_64 *pipe_bp) { char *bp; - + pipe_bp->pipe_nr = p->pipe_nr; pipe_bp->bandwidth = p->bandwidth; + pipe_bp->delay = p->delay; bcopy( &(p->scheduler_heap), &(pipe_bp->scheduler_heap), sizeof(struct dn_heap_64)); pipe_bp->scheduler_heap.p = CAST_DOWN(user64_addr_t, pipe_bp->scheduler_heap.p); bcopy( &(p->not_eligible_heap), &(pipe_bp->not_eligible_heap), sizeof(struct dn_heap_64)); @@ -515,10 +522,10 @@ char *cp_pipe_to_64_user(struct dn_pipe *p, struct dn_pipe_64 *pipe_bp) bcopy( p->if_name, pipe_bp->if_name, IFNAMSIZ); pipe_bp->ifp = CAST_DOWN(user64_addr_t, p->ifp); pipe_bp->ready = p->ready; - + cp_flow_set_to_64_user( &(p->fs), &(pipe_bp->fs)); - - pipe_bp->delay = (pipe_bp->delay * 1000) / (hz*10) ; + + pipe_bp->delay = (pipe_bp->delay * 1000) / (hz*10) ; /* * XXX the following is a hack based on ->next being the * first field in dn_pipe and dn_flow_set. The correct @@ -619,7 +626,8 @@ heap_extract(struct dn_heap *h, void *obj) int child, father, maxelt = h->elements - 1 ; if (maxelt < 0) { - printf("dummynet: warning, extract from empty heap 0x%p\n", h); + printf("dummynet: warning, extract from empty heap 0x%llx\n", + (uint64_t)VM_KERNEL_ADDRPERM(h)); return ; } father = 0 ; /* default: move up smallest child */ @@ -653,47 +661,6 @@ heap_extract(struct dn_heap *h, void *obj) } } -#if 0 -/* - * change object position and update references - * XXX this one is never used! - */ -static void -heap_move(struct dn_heap *h, dn_key new_key, void *object) -{ - int temp; - int i ; - int maxelt = h->elements-1 ; - struct dn_heap_entry buf ; - - if (h->offset <= 0) - panic("cannot move items on this heap"); - - i = *((int *)((char *)object + h->offset)); - if (DN_KEY_LT(new_key, h->p[i].key) ) { /* must move up */ - h->p[i].key = new_key ; - for (; i>0 && DN_KEY_LT(new_key, h->p[(temp = HEAP_FATHER(i))].key) ; - i = temp ) { /* bubble up */ - HEAP_SWAP(h->p[i], h->p[temp], buf) ; - SET_OFFSET(h, i); - } - } else { /* must move down */ - h->p[i].key = new_key ; - while ( (temp = HEAP_LEFT(i)) <= maxelt ) { /* found left child */ - if ((temp != maxelt) && DN_KEY_GT(h->p[temp].key, h->p[temp+1].key)) - temp++ ; /* select child with min key */ - if (DN_KEY_GT(new_key, h->p[temp].key)) { /* go down */ - HEAP_SWAP(h->p[i], h->p[temp], buf) ; - SET_OFFSET(h, i); - } else - break ; - i = temp ; - } - } - SET_OFFSET(h, i); -} -#endif /* heap_move, unused */ - /* * heapify() will reorganize data inside an array to maintain the * heap property. It is needed when we delete a bunch of entries. @@ -731,11 +698,13 @@ static struct dn_pkt_tag * dn_tag_get(struct mbuf *m) { struct m_tag *mtag = m_tag_first(m); -/* KASSERT(mtag != NULL && - mtag->m_tag_id == KERNEL_MODULE_TAG_ID && - mtag->m_tag_type == KERNEL_TAG_TYPE_DUMMYNET, - ("packet on dummynet queue w/o dummynet tag!")); -*/ + + if (!(mtag != NULL && + mtag->m_tag_id == KERNEL_MODULE_TAG_ID && + mtag->m_tag_type == KERNEL_TAG_TYPE_DUMMYNET)) + panic("packet on dummynet queue w/o dummynet tag: 0x%llx", + (uint64_t)VM_KERNEL_ADDRPERM(m)); + return (struct dn_pkt_tag *)(mtag+1); } @@ -760,16 +729,16 @@ dn_tag_get(struct mbuf *m) static void transmit_event(struct dn_pipe *pipe, struct mbuf **head, struct mbuf **tail) { - struct mbuf *m ; - struct dn_pkt_tag *pkt ; - - lck_mtx_assert(dn_mutex, LCK_MTX_ASSERT_OWNED); + struct mbuf *m ; + struct dn_pkt_tag *pkt = NULL; + u_int64_t schedule_time; - /* Extract packets only if no pending chain is being currently processed */ + LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); + ASSERT(serialize >= 0); if (serialize == 0) { while ((m = pipe->head) != NULL) { pkt = dn_tag_get(m); - if (!DN_KEY_LEQ(pkt->output_time, curr_time)) + if (!DN_KEY_LEQ(pkt->dn_output_time, curr_time)) break; pipe->head = m->m_nextpkt; @@ -779,18 +748,22 @@ transmit_event(struct dn_pipe *pipe, struct mbuf **head, struct mbuf **tail) *head = m; *tail = m; } + if (*tail != NULL) (*tail)->m_nextpkt = NULL; } - /* if there are leftover packets, put the pipe into the heap for next ready event */ - if ((m = pipe->head) != NULL) { + schedule_time = pkt == NULL || DN_KEY_LEQ(pkt->dn_output_time, curr_time) ? + curr_time + 1 : pkt->dn_output_time; + + /* if there are leftover packets, put the pipe into the heap for next ready event */ + if ((m = pipe->head) != NULL) { pkt = dn_tag_get(m); /* XXX should check errors on heap_insert, by draining the * whole pipe p and hoping in the future we are more successful */ - heap_insert(&extract_heap, pkt->output_time, pipe); - } + heap_insert(&extract_heap, schedule_time, pipe); + } } /* @@ -798,11 +771,11 @@ transmit_event(struct dn_pipe *pipe, struct mbuf **head, struct mbuf **tail) * before being able to transmit a packet. The credit is taken from * either a pipe (WF2Q) or a flow_queue (per-flow queueing) */ - -/* hz is 100, which gives a granularity of 10ms in the old timer. + +/* hz is 100, which gives a granularity of 10ms in the old timer. * The timer has been changed to fire every 1ms, so the use of * hz has been modified here. All instances of hz have been left - * in place but adjusted by a factor of 10 so that hz is functionally + * in place but adjusted by a factor of 10 so that hz is functionally * equal to 1000. */ #define SET_TICKS(_m, q, p) \ @@ -823,7 +796,7 @@ move_pkt(struct mbuf *pkt, struct dn_flow_queue *q, q->len-- ; q->len_bytes -= len ; - dt->output_time = curr_time + p->delay ; + dt->dn_output_time = curr_time + p->delay ; if (p->head == NULL) p->head = pkt; @@ -847,8 +820,8 @@ ready_event(struct dn_flow_queue *q, struct mbuf **head, struct mbuf **tail) struct dn_pipe *p = q->fs->pipe ; int p_was_empty ; - lck_mtx_assert(dn_mutex, LCK_MTX_ASSERT_OWNED); - + LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); + if (p == NULL) { printf("dummynet: ready_event pipe is gone\n"); return ; @@ -914,12 +887,12 @@ ready_event_wfq(struct dn_pipe *p, struct mbuf **head, struct mbuf **tail) struct dn_heap *neh = &(p->not_eligible_heap) ; int64_t p_numbytes = p->numbytes; - lck_mtx_assert(dn_mutex, LCK_MTX_ASSERT_OWNED); - + LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); + if (p->if_name[0] == 0) /* tx clock is simulated */ p_numbytes += ( curr_time - p->sched_time ) * p->bandwidth; else { /* tx clock is for real, the ifq must be empty or this is a NOP */ - if (p->ifp && p->ifp->if_snd.ifq_head != NULL) + if (p->ifp && !IFCQ_IS_EMPTY(&p->ifp->if_snd)) return ; else { DPRINTF(("dummynet: pipe %d ready from %s --\n", @@ -1008,14 +981,14 @@ ready_event_wfq(struct dn_pipe *p, struct mbuf **head, struct mbuf **tail) if (p->bandwidth > 0) t = ( p->bandwidth -1 - p_numbytes) / p->bandwidth ; - dn_tag_get(p->tail)->output_time += t ; + dn_tag_get(p->tail)->dn_output_time += t ; p->sched_time = curr_time ; heap_insert(&wfq_ready_heap, curr_time + t, (void *)p); /* XXX should check errors on heap_insert, and drain the whole * queue on error hoping next time we are luckier. */ } - + /* Fit (adjust if necessary) 64bit result into 32bit variable. */ if (p_numbytes > INT_MAX) p->numbytes = INT_MAX; @@ -1054,9 +1027,9 @@ dummynet(__unused void * unused) heaps[2] = &extract_heap ; /* delay line */ lck_mtx_lock(dn_mutex); - - /* make all time measurements in milliseconds (ms) - - * here we convert secs and usecs to msecs (just divide the + + /* make all time measurements in milliseconds (ms) - + * here we convert secs and usecs to msecs (just divide the * usecs and take the closest whole number). */ microuptime(&tv); @@ -1095,9 +1068,9 @@ dummynet(__unused void * unused) q->S = q->F + 1 ; /* mark timestamp as invalid */ pe->sum -= q->fs->weight ; } - - /* check the heaps to see if there's still stuff in there, and - * only set the timer if there are packets to process + + /* check the heaps to see if there's still stuff in there, and + * only set the timer if there are packets to process */ timer_enabled = 0; for (i=0; i < 3 ; i++) { @@ -1111,20 +1084,16 @@ dummynet(__unused void * unused) } } - /* - * If a packet chain has been dequeued, set serialize=1 so that new - * packets don't get dispatched out of turn - */ if (head != NULL) - serialize = 1; + serialize++; - lck_mtx_unlock(dn_mutex); + lck_mtx_unlock(dn_mutex); /* Send out the de-queued list of ready-to-send packets */ if (head != NULL) { dummynet_send(head); lck_mtx_lock(dn_mutex); - serialize = 0; + serialize--; lck_mtx_unlock(dn_mutex); } } @@ -1140,43 +1109,37 @@ dummynet_send(struct mbuf *m) n = m->m_nextpkt; m->m_nextpkt = NULL; pkt = dn_tag_get(m); - + + DPRINTF(("dummynet_send m: 0x%llx dn_dir: %d dn_flags: 0x%x\n", + (uint64_t)VM_KERNEL_ADDRPERM(m), pkt->dn_dir, + pkt->dn_flags)); + switch (pkt->dn_dir) { case DN_TO_IP_OUT: { - struct route tmp_rt = pkt->ro; - (void)ip_output(m, NULL, &tmp_rt, pkt->flags, NULL, NULL); - if (tmp_rt.ro_rt) { - rtfree(tmp_rt.ro_rt); - tmp_rt.ro_rt = NULL; - } + struct route tmp_rt; + + /* route is already in the packet's dn_ro */ + bzero(&tmp_rt, sizeof (tmp_rt)); + + /* Force IP_RAWOUTPUT as the IP header is fully formed */ + pkt->dn_flags |= IP_RAWOUTPUT | IP_FORWARDING; + (void)ip_output(m, NULL, &tmp_rt, pkt->dn_flags, NULL, NULL); + ROUTE_RELEASE(&tmp_rt); break ; } case DN_TO_IP_IN : proto_inject(PF_INET, m); break ; - -#if BRIDGE - case DN_TO_BDG_FWD : - /* - * The bridge requires/assumes the Ethernet header is - * contiguous in the first mbuf header. Insure this is true. - */ - if (BDG_LOADED) { - if (m->m_len < ETHER_HDR_LEN && - (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) { - printf("dummynet/bridge: pullup fail, dropping pkt\n"); - break; - } - m = bdg_forward_ptr(m, pkt->ifp); - } else { - /* somebody unloaded the bridge module. Drop pkt */ - /* XXX rate limit */ - printf("dummynet: dropping bridged packet trapped in pipe\n"); - } - if (m) - m_freem(m); +#ifdef INET6 + case DN_TO_IP6_OUT: { + /* routes already in the packet's dn_{ro6,pmtu} */ + ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL); break; -#endif + } + case DN_TO_IP6_IN: + proto_inject(PF_INET6, m); + break; +#endif /* INET6 */ default: printf("dummynet: bad switch %d!\n", pkt->dn_dir); m_freem(m); @@ -1185,51 +1148,6 @@ dummynet_send(struct mbuf *m) } } - - -/* - * called by an interface when tx_rdy occurs. - */ -int -if_tx_rdy(struct ifnet *ifp) -{ - struct dn_pipe *p; - struct mbuf *head = NULL, *tail = NULL; - int i; - - lck_mtx_lock(dn_mutex); - - for (i = 0; i < HASHSIZE; i++) - SLIST_FOREACH(p, &pipehash[i], next) - if (p->ifp == ifp) - break ; - if (p == NULL) { - char buf[32]; - snprintf(buf, sizeof(buf), "%s%d",ifp->if_name, ifp->if_unit); - for (i = 0; i < HASHSIZE; i++) - SLIST_FOREACH(p, &pipehash[i], next) - if (!strcmp(p->if_name, buf) ) { - p->ifp = ifp ; - DPRINTF(("dummynet: ++ tx rdy from %s (now found)\n", buf)); - break ; - } - } - if (p != NULL) { - DPRINTF(("dummynet: ++ tx rdy from %s%d - qlen %d\n", ifp->if_name, - ifp->if_unit, ifp->if_snd.ifq_len)); - p->numbytes = 0 ; /* mark ready for I/O */ - ready_event_wfq(p, &head, &tail); - } - lck_mtx_unlock(dn_mutex); - - - /* Send out the de-queued list of ready-to-send packets */ - if (head != NULL) - dummynet_send(head); - - return 0; -} - /* * Unconditionally expire empty queues in case of shortage. * Returns the number of queues freed. @@ -1241,6 +1159,7 @@ expire_queues(struct dn_flow_set *fs) int i, initial_elements = fs->rq_elements ; struct timeval timenow; + /* reviewed for getmicrotime usage */ getmicrotime(&timenow); if (fs->last_expired == timenow.tv_sec) @@ -1302,41 +1221,84 @@ create_queue(struct dn_flow_set *fs, int i) * so that further searches take less time. */ static struct dn_flow_queue * -find_queue(struct dn_flow_set *fs, struct ipfw_flow_id *id) +find_queue(struct dn_flow_set *fs, struct ip_flow_id *id) { int i = 0 ; /* we need i and q for new allocations */ struct dn_flow_queue *q, *prev; + int is_v6 = IS_IP6_FLOW_ID(id); if ( !(fs->flags_fs & DN_HAVE_FLOW_MASK) ) q = fs->rq[0] ; else { - /* first, do the masking */ - id->dst_ip &= fs->flow_mask.dst_ip ; - id->src_ip &= fs->flow_mask.src_ip ; + /* first, do the masking, then hash */ id->dst_port &= fs->flow_mask.dst_port ; id->src_port &= fs->flow_mask.src_port ; id->proto &= fs->flow_mask.proto ; id->flags = 0 ; /* we don't care about this one */ - /* then, hash function */ - i = ( (id->dst_ip) & 0xffff ) ^ - ( (id->dst_ip >> 15) & 0xffff ) ^ - ( (id->src_ip << 1) & 0xffff ) ^ - ( (id->src_ip >> 16 ) & 0xffff ) ^ - (id->dst_port << 1) ^ (id->src_port) ^ - (id->proto ); + if (is_v6) { + APPLY_MASK(&id->dst_ip6, &fs->flow_mask.dst_ip6); + APPLY_MASK(&id->src_ip6, &fs->flow_mask.src_ip6); + id->flow_id6 &= fs->flow_mask.flow_id6; + + i = ((id->dst_ip6.__u6_addr.__u6_addr32[0]) & 0xffff)^ + ((id->dst_ip6.__u6_addr.__u6_addr32[1]) & 0xffff)^ + ((id->dst_ip6.__u6_addr.__u6_addr32[2]) & 0xffff)^ + ((id->dst_ip6.__u6_addr.__u6_addr32[3]) & 0xffff)^ + + ((id->dst_ip6.__u6_addr.__u6_addr32[0] >> 15) & 0xffff)^ + ((id->dst_ip6.__u6_addr.__u6_addr32[1] >> 15) & 0xffff)^ + ((id->dst_ip6.__u6_addr.__u6_addr32[2] >> 15) & 0xffff)^ + ((id->dst_ip6.__u6_addr.__u6_addr32[3] >> 15) & 0xffff)^ + + ((id->src_ip6.__u6_addr.__u6_addr32[0] << 1) & 0xfffff)^ + ((id->src_ip6.__u6_addr.__u6_addr32[1] << 1) & 0xfffff)^ + ((id->src_ip6.__u6_addr.__u6_addr32[2] << 1) & 0xfffff)^ + ((id->src_ip6.__u6_addr.__u6_addr32[3] << 1) & 0xfffff)^ + + ((id->src_ip6.__u6_addr.__u6_addr32[0] >> 16) & 0xffff)^ + ((id->src_ip6.__u6_addr.__u6_addr32[1] >> 16) & 0xffff)^ + ((id->src_ip6.__u6_addr.__u6_addr32[2] >> 16) & 0xffff)^ + ((id->src_ip6.__u6_addr.__u6_addr32[3] >> 16) & 0xffff)^ + + (id->dst_port << 1) ^ (id->src_port) ^ + (id->proto ) ^ + (id->flow_id6); + } else { + id->dst_ip &= fs->flow_mask.dst_ip ; + id->src_ip &= fs->flow_mask.src_ip ; + + i = ( (id->dst_ip) & 0xffff ) ^ + ( (id->dst_ip >> 15) & 0xffff ) ^ + ( (id->src_ip << 1) & 0xffff ) ^ + ( (id->src_ip >> 16 ) & 0xffff ) ^ + (id->dst_port << 1) ^ (id->src_port) ^ + (id->proto ); + } i = i % fs->rq_size ; /* finally, scan the current list for a match */ searches++ ; for (prev=NULL, q = fs->rq[i] ; q ; ) { search_steps++; - if (id->dst_ip == q->id.dst_ip && - id->src_ip == q->id.src_ip && - id->dst_port == q->id.dst_port && - id->src_port == q->id.src_port && - id->proto == q->id.proto && - id->flags == q->id.flags) - break ; /* found */ - else if (pipe_expire && q->head == NULL && q->S == q->F+1 ) { + if (is_v6 && + IN6_ARE_ADDR_EQUAL(&id->dst_ip6,&q->id.dst_ip6) && + IN6_ARE_ADDR_EQUAL(&id->src_ip6,&q->id.src_ip6) && + id->dst_port == q->id.dst_port && + id->src_port == q->id.src_port && + id->proto == q->id.proto && + id->flags == q->id.flags && + id->flow_id6 == q->id.flow_id6) + break ; /* found */ + + if (!is_v6 && id->dst_ip == q->id.dst_ip && + id->src_ip == q->id.src_ip && + id->dst_port == q->id.dst_port && + id->src_port == q->id.src_port && + id->proto == q->id.proto && + id->flags == q->id.flags) + break ; /* found */ + + /* No match. Check if we can expire the entry */ + if (pipe_expire && q->head == NULL && q->S == q->F+1 ) { /* entry is idle and not in any heap, expire it */ struct dn_flow_queue *old_q = q ; @@ -1449,7 +1411,7 @@ red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len) if (fs->flags_fs & DN_QSIZE_IS_BYTES) p_b = (p_b * len) / fs->max_pkt_size; if (++q->count == 0) - q->random = MY_RANDOM & 0xffff; + q->random = (my_random() & 0xffff); else { /* * q->count counts packets arrived since last drop, so a greater @@ -1459,7 +1421,7 @@ red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len) q->count = 0; DPRINTF(("dummynet: - red drop")); /* after a drop we calculate a new random value */ - q->random = MY_RANDOM & 0xffff; + q->random = (my_random() & 0xffff); return 1; /* drop */ } } @@ -1475,7 +1437,7 @@ locate_flowset(int fs_nr) SLIST_FOREACH(fs, &flowsethash[HASH(fs_nr)], next) if (fs->fs_nr == fs_nr) return fs ; - + return (NULL); } @@ -1510,40 +1472,53 @@ locate_pipe(int pipe_nr) * */ static int -dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa) +dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa, int client) { - struct mbuf *head = NULL, *tail = NULL; + struct mbuf *head = NULL, *tail = NULL; struct dn_pkt_tag *pkt; struct m_tag *mtag; struct dn_flow_set *fs = NULL; struct dn_pipe *pipe ; u_int64_t len = m->m_pkthdr.len ; struct dn_flow_queue *q = NULL ; - int is_pipe; + int is_pipe = 0; struct timespec ts; struct timeval tv; - + + DPRINTF(("dummynet_io m: 0x%llx pipe: %d dir: %d client: %d\n", + (uint64_t)VM_KERNEL_ADDRPERM(m), pipe_nr, dir, client)); + +#if IPFIREWALL #if IPFW2 - ipfw_insn *cmd = fwa->rule->cmd + fwa->rule->act_ofs; + if (client == DN_CLIENT_IPFW) { + ipfw_insn *cmd = fwa->fwa_ipfw_rule->cmd + fwa->fwa_ipfw_rule->act_ofs; - if (cmd->opcode == O_LOG) - cmd += F_LEN(cmd); - is_pipe = (cmd->opcode == O_PIPE); + if (cmd->opcode == O_LOG) + cmd += F_LEN(cmd); + is_pipe = (cmd->opcode == O_PIPE); + } #else - is_pipe = (fwa->rule->fw_flg & IP_FW_F_COMMAND) == IP_FW_F_PIPE; + if (client == DN_CLIENT_IPFW) + is_pipe = (fwa->fwa_ipfw_rule->fw_flg & IP_FW_F_COMMAND) == IP_FW_F_PIPE; #endif +#endif /* IPFIREWALL */ + +#if DUMMYNET + if (client == DN_CLIENT_PF) + is_pipe = fwa->fwa_flags == DN_IS_PIPE ? 1 : 0; +#endif /* DUMMYNET */ pipe_nr &= 0xffff ; lck_mtx_lock(dn_mutex); - /* make all time measurements in milliseconds (ms) - - * here we convert secs and usecs to msecs (just divide the + /* make all time measurements in milliseconds (ms) - + * here we convert secs and usecs to msecs (just divide the * usecs and take the closest whole number). */ - microuptime(&tv); + microuptime(&tv); curr_time = (tv.tv_sec * 1000) + (tv.tv_usec / 1000); - + /* * This is a dummynet rule, so we expect an O_PIPE or O_QUEUE rule. */ @@ -1553,8 +1528,8 @@ dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa) fs = &(pipe->fs); } else fs = locate_flowset(pipe_nr); - - + + if (fs == NULL){ goto dropit ; /* this queue/pipe does not exist! */ } @@ -1570,7 +1545,7 @@ dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa) goto dropit ; } } - q = find_queue(fs, &(fwa->f_id)); + q = find_queue(fs, &(fwa->fwa_id)); if ( q == NULL ) goto dropit ; /* cannot allocate queue */ /* @@ -1578,7 +1553,7 @@ dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa) */ q->tot_bytes += len ; q->tot_pkts++ ; - if ( fs->plr && (MY_RANDOM < fs->plr) ) + if ( fs->plr && (my_random() < fs->plr)) goto dropit ; /* random pkt drop */ if ( fs->flags_fs & DN_QSIZE_IS_BYTES) { if (q->len_bytes > fs->qsize) @@ -1591,8 +1566,8 @@ dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa) goto dropit ; /* XXX expensive to zero, see if we can remove it*/ - mtag = m_tag_alloc(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DUMMYNET, - sizeof(struct dn_pkt_tag), M_NOWAIT); + mtag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DUMMYNET, + sizeof(struct dn_pkt_tag), M_NOWAIT, m); if ( mtag == NULL ) goto dropit ; /* cannot allocate packet header */ m_tag_prepend(m, mtag); /* attach to mbuf chain */ @@ -1601,28 +1576,66 @@ dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa) bzero(pkt, sizeof(struct dn_pkt_tag)); /* ok, i can handle the pkt now... */ /* build and enqueue packet + parameters */ - pkt->rule = fwa->rule ; + /* + * PF is checked before ipfw so remember ipfw rule only when + * the caller is ipfw. When the caller is PF, fwa_ipfw_rule + * is a fake rule just used for convenience + */ + if (client == DN_CLIENT_IPFW) + pkt->dn_ipfw_rule = fwa->fwa_ipfw_rule; + pkt->dn_pf_rule = fwa->fwa_pf_rule; pkt->dn_dir = dir ; + pkt->dn_client = client; - pkt->ifp = fwa->oif; + pkt->dn_ifp = fwa->fwa_oif; if (dir == DN_TO_IP_OUT) { - /* - * We need to copy *ro because for ICMP pkts (and maybe others) - * the caller passed a pointer into the stack; dst might also be - * a pointer into *ro so it needs to be updated. - */ - pkt->ro = *(fwa->ro); - if (fwa->ro->ro_rt) - RT_ADDREF(fwa->ro->ro_rt); + /* + * We need to copy *ro because for ICMP pkts (and maybe others) + * the caller passed a pointer into the stack; dst might also be + * a pointer into *ro so it needs to be updated. + */ + if (fwa->fwa_ro) { + route_copyout(&pkt->dn_ro, fwa->fwa_ro, sizeof (pkt->dn_ro)); + } + if (fwa->fwa_dst) { + if (fwa->fwa_dst == (struct sockaddr_in *)&fwa->fwa_ro->ro_dst) /* dst points into ro */ + fwa->fwa_dst = (struct sockaddr_in *)&(pkt->dn_ro.ro_dst) ; - if (fwa->dst == (struct sockaddr_in *)&fwa->ro->ro_dst) /* dst points into ro */ - fwa->dst = (struct sockaddr_in *)&(pkt->ro.ro_dst) ; + bcopy (fwa->fwa_dst, &pkt->dn_dst, sizeof(pkt->dn_dst)); + } + } else if (dir == DN_TO_IP6_OUT) { + if (fwa->fwa_ro6) { + route_copyout((struct route *)&pkt->dn_ro6, + (struct route *)fwa->fwa_ro6, sizeof (pkt->dn_ro6)); + } + if (fwa->fwa_ro6_pmtu) { + route_copyout((struct route *)&pkt->dn_ro6_pmtu, + (struct route *)fwa->fwa_ro6_pmtu, sizeof (pkt->dn_ro6_pmtu)); + } + if (fwa->fwa_dst6) { + if (fwa->fwa_dst6 == (struct sockaddr_in6 *)&fwa->fwa_ro6->ro_dst) /* dst points into ro */ + fwa->fwa_dst6 = (struct sockaddr_in6 *)&(pkt->dn_ro6.ro_dst) ; - pkt->dn_dst = fwa->dst; - pkt->flags = fwa->flags; - if (fwa->ipoa != NULL) - pkt->ipoa = *(fwa->ipoa); - } + bcopy (fwa->fwa_dst6, &pkt->dn_dst6, sizeof(pkt->dn_dst6)); + } + pkt->dn_origifp = fwa->fwa_origifp; + pkt->dn_mtu = fwa->fwa_mtu; + pkt->dn_alwaysfrag = fwa->fwa_alwaysfrag; + pkt->dn_unfragpartlen = fwa->fwa_unfragpartlen; + if (fwa->fwa_exthdrs) { + bcopy (fwa->fwa_exthdrs, &pkt->dn_exthdrs, sizeof(pkt->dn_exthdrs)); + /* + * Need to zero out the source structure so the mbufs + * won't be freed by ip6_output() + */ + bzero(fwa->fwa_exthdrs, sizeof(struct ip6_exthdrs)); + } + } + if (dir == DN_TO_IP_OUT || dir == DN_TO_IP6_OUT) { + pkt->dn_flags = fwa->fwa_oflags; + if (fwa->fwa_ipoa != NULL) + pkt->dn_ipoa = *(fwa->fwa_ipoa); + } if (q->head == NULL) q->head = m; else @@ -1709,11 +1722,13 @@ done: ts.tv_nsec = 1 * 1000000; // 1ms timer_enabled = 1; bsd_timeout(dummynet, NULL, &ts); - } + } lck_mtx_unlock(dn_mutex); - if (head != NULL) + + if (head != NULL) { dummynet_send(head); + } return 0; @@ -1726,17 +1741,14 @@ dropit: } /* - * Below, the rtfree is only needed when (pkt->dn_dir == DN_TO_IP_OUT) + * Below, the ROUTE_RELEASE is only needed when (pkt->dn_dir == DN_TO_IP_OUT) * Doing this would probably save us the initial bzero of dn_pkt */ #define DN_FREE_PKT(_m) do { \ struct m_tag *tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DUMMYNET, NULL); \ if (tag) { \ struct dn_pkt_tag *n = (struct dn_pkt_tag *)(tag+1); \ - if (n->ro.ro_rt != NULL) { \ - rtfree(n->ro.ro_rt); \ - n->ro.ro_rt = NULL; \ - } \ + ROUTE_RELEASE(&n->dn_ro); \ } \ m_tag_delete(_m, tag); \ m_freem(_m); \ @@ -1754,7 +1766,7 @@ purge_flow_set(struct dn_flow_set *fs, int all) struct dn_flow_queue *q, *qn ; int i ; - lck_mtx_assert(dn_mutex, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); for (i = 0 ; i <= fs->rq_size ; i++ ) { for (q = fs->rq[i] ; q ; q = qn ) { @@ -1819,9 +1831,11 @@ dummynet_flush(void) lck_mtx_lock(dn_mutex); - /* remove all references to pipes ...*/ - flush_pipe_ptrs(NULL); - +#if IPFW2 + /* remove all references to pipes ...*/ + flush_pipe_ptrs(NULL); +#endif /* IPFW2 */ + /* Free heaps so we don't have unwanted events. */ heap_free(&ready_heap); heap_free(&wfq_ready_heap); @@ -1847,9 +1861,8 @@ dummynet_flush(void) } -extern struct ip_fw *ip_fw_default_rule ; static void -dn_rule_delete_fs(struct dn_flow_set *fs, void *r) +dn_ipfw_rule_delete_fs(struct dn_flow_set *fs, void *r) { int i ; struct dn_flow_queue *q ; @@ -1859,8 +1872,8 @@ dn_rule_delete_fs(struct dn_flow_set *fs, void *r) for (q = fs->rq[i] ; q ; q = q->next ) for (m = q->head ; m ; m = m->m_nextpkt ) { struct dn_pkt_tag *pkt = dn_tag_get(m) ; - if (pkt->rule == r) - pkt->rule = ip_fw_default_rule ; + if (pkt->dn_ipfw_rule == r) + pkt->dn_ipfw_rule = &default_rule ; } } /* @@ -1868,7 +1881,7 @@ dn_rule_delete_fs(struct dn_flow_set *fs, void *r) * from packets matching this rule. */ void -dn_rule_delete(void *r) +dn_ipfw_rule_delete(void *r) { struct dn_pipe *p ; struct dn_flow_set *fs ; @@ -1885,16 +1898,16 @@ dn_rule_delete(void *r) */ for (i = 0; i < HASHSIZE; i++) SLIST_FOREACH(fs, &flowsethash[i], next) - dn_rule_delete_fs(fs, r); + dn_ipfw_rule_delete_fs(fs, r); for (i = 0; i < HASHSIZE; i++) SLIST_FOREACH(p, &pipehash[i], next) { fs = &(p->fs); - dn_rule_delete_fs(fs, r); + dn_ipfw_rule_delete_fs(fs, r); for (m = p->head ; m ; m = m->m_nextpkt ) { pkt = dn_tag_get(m); - if (pkt->rule == r) - pkt->rule = ip_fw_default_rule; + if (pkt->dn_ipfw_rule == r) + pkt->dn_ipfw_rule = &default_rule; } } lck_mtx_unlock(dn_mutex); @@ -2003,7 +2016,6 @@ set_fs_parms(struct dn_flow_set *x, struct dn_flow_set *src) /* * setup pipe or queue parameters. */ - static int config_pipe(struct dn_pipe *p) { @@ -2025,12 +2037,12 @@ config_pipe(struct dn_pipe *p) return EINVAL ; if (p->pipe_nr != 0) { /* this is a pipe */ struct dn_pipe *x, *b; - + struct dummynet_event dn_event; lck_mtx_lock(dn_mutex); /* locate pipe */ b = locate_pipe(p->pipe_nr); - + if (b == NULL || b->pipe_nr != p->pipe_nr) { /* new pipe */ x = _MALLOC(sizeof(struct dn_pipe), M_DUMMYNET, M_DONTWAIT | M_ZERO) ; if (x == NULL) { @@ -2071,6 +2083,14 @@ config_pipe(struct dn_pipe *p) x, next); } lck_mtx_unlock(dn_mutex); + + bzero(&dn_event, sizeof(dn_event)); + dn_event.dn_event_code = DUMMYNET_PIPE_CONFIG; + dn_event.dn_event_pipe_config.bandwidth = p->bandwidth; + dn_event.dn_event_pipe_config.delay = p->delay; + dn_event.dn_event_pipe_config.plr = pfs->plr; + + dummynet_event_enqueue_nwk_wq_entry(&dn_event); } else { /* config queue */ struct dn_flow_set *x, *b ; @@ -2170,7 +2190,7 @@ dummynet_drain(void) struct mbuf *m, *mnext; int i; - lck_mtx_assert(dn_mutex, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); heap_free(&ready_heap); heap_free(&wfq_ready_heap); @@ -2182,7 +2202,7 @@ dummynet_drain(void) for (i = 0; i < HASHSIZE; i++) SLIST_FOREACH(p, &pipehash[i], next) { - purge_flow_set(&(p->fs), 0); + purge_flow_set(&(p->fs), 0); mnext = p->head; while ((m = mnext) != NULL) { @@ -2219,8 +2239,10 @@ delete_pipe(struct dn_pipe *p) /* Unlink from list of pipes. */ SLIST_REMOVE(&pipehash[HASH(b->pipe_nr)], b, dn_pipe, next); +#if IPFW2 /* remove references to this pipe from the ip_fw rules. */ flush_pipe_ptrs(&(b->fs)); +#endif /* IPFW2 */ /* Remove all references to this pipe from flow_sets. */ for (i = 0; i < HASHSIZE; i++) @@ -2238,7 +2260,7 @@ delete_pipe(struct dn_pipe *p) pipe_remove_from_heap(&extract_heap, b); pipe_remove_from_heap(&wfq_ready_heap, b); lck_mtx_unlock(dn_mutex); - + FREE(b, M_DUMMYNET); } else { /* this is a WF2Q queue (dn_flow_set) */ struct dn_flow_set *b; @@ -2251,8 +2273,10 @@ delete_pipe(struct dn_pipe *p) return EINVAL ; /* not found */ } +#if IPFW2 /* remove references to this flow_set from the ip_fw rules. */ flush_pipe_ptrs(b); +#endif /* IPFW2 */ /* Unlink from list of flowsets. */ SLIST_REMOVE( &flowsethash[HASH(b->fs_nr)], b, dn_flow_set, next); @@ -2275,23 +2299,25 @@ delete_pipe(struct dn_pipe *p) /* * helper function used to copy data from kernel in DUMMYNET_GET */ -static +static char* dn_copy_set_32(struct dn_flow_set *set, char *bp) { int i, copied = 0 ; struct dn_flow_queue *q; struct dn_flow_queue_32 *qp = (struct dn_flow_queue_32 *)bp; - - lck_mtx_assert(dn_mutex, LCK_MTX_ASSERT_OWNED); - + + LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); + for (i = 0 ; i <= set->rq_size ; i++) for (q = set->rq[i] ; q ; q = q->next, qp++ ) { if (q->hash_slot != i) printf("dummynet: ++ at %d: wrong slot (have %d, " "should be %d)\n", copied, q->hash_slot, i); if (q->fs != set) - printf("dummynet: ++ at %d: wrong fs ptr (have %p, should be %p)\n", - i, q->fs, set); + printf("dummynet: ++ at %d: wrong fs ptr " + "(have 0x%llx, should be 0x%llx)\n", i, + (uint64_t)VM_KERNEL_ADDRPERM(q->fs), + (uint64_t)VM_KERNEL_ADDRPERM(set)); copied++ ; cp_queue_to_32_user( q, qp ); /* cleanup pointers */ @@ -2305,23 +2331,25 @@ char* dn_copy_set_32(struct dn_flow_set *set, char *bp) return (char *)qp ; } -static +static char* dn_copy_set_64(struct dn_flow_set *set, char *bp) { int i, copied = 0 ; struct dn_flow_queue *q; struct dn_flow_queue_64 *qp = (struct dn_flow_queue_64 *)bp; - - lck_mtx_assert(dn_mutex, LCK_MTX_ASSERT_OWNED); - + + LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); + for (i = 0 ; i <= set->rq_size ; i++) for (q = set->rq[i] ; q ; q = q->next, qp++ ) { if (q->hash_slot != i) printf("dummynet: ++ at %d: wrong slot (have %d, " "should be %d)\n", copied, q->hash_slot, i); if (q->fs != set) - printf("dummynet: ++ at %d: wrong fs ptr (have %p, should be %p)\n", - i, q->fs, set); + printf("dummynet: ++ at %d: wrong fs ptr " + "(have 0x%llx, should be 0x%llx)\n", i, + (uint64_t)VM_KERNEL_ADDRPERM(q->fs), + (uint64_t)VM_KERNEL_ADDRPERM(set)); copied++ ; //bcopy(q, qp, sizeof(*q)); cp_queue_to_64_user( q, qp ); @@ -2347,7 +2375,7 @@ dn_calc_size(int is64user) size_t setsize; int i; - lck_mtx_assert(dn_mutex, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); if ( is64user ){ pipesize = sizeof(struct dn_pipe_64); queuesize = sizeof(struct dn_flow_queue_64); @@ -2375,70 +2403,74 @@ dn_calc_size(int is64user) static int dummynet_get(struct sockopt *sopt) { - char *buf, *bp=NULL; /* bp is the "copy-pointer" */ - size_t size ; - struct dn_flow_set *set ; - struct dn_pipe *p ; - int error=0, i ; - int is64user = 0; - - /* XXX lock held too long */ - lck_mtx_lock(dn_mutex); - /* - * XXX: Ugly, but we need to allocate memory with M_WAITOK flag and we - * cannot use this flag while holding a mutex. - */ + char *buf = NULL, *bp = NULL; /* bp is the "copy-pointer" */ + size_t size = 0; + struct dn_flow_set *set; + struct dn_pipe *p; + int error = 0, i; + int is64user = 0; + + /* XXX lock held too long */ + lck_mtx_lock(dn_mutex); + /* + * XXX: Ugly, but we need to allocate memory with M_WAITOK flag + * and we cannot use this flag while holding a mutex. + */ if (proc_is64bit(sopt->sopt_p)) is64user = 1; - for (i = 0; i < 10; i++) { + for (i = 0; i < 10; i++) { size = dn_calc_size(is64user); lck_mtx_unlock(dn_mutex); - buf = _MALLOC(size, M_TEMP, M_WAITOK); + buf = _MALLOC(size, M_TEMP, M_WAITOK | M_ZERO); if (buf == NULL) - return ENOBUFS; + return(ENOBUFS); lck_mtx_lock(dn_mutex); if (size == dn_calc_size(is64user)) break; FREE(buf, M_TEMP); buf = NULL; - } - if (buf == NULL) { + } + if (buf == NULL) { lck_mtx_unlock(dn_mutex); - return ENOBUFS ; - } - + return(ENOBUFS); + } - bp = buf; - for (i = 0; i < HASHSIZE; i++) - SLIST_FOREACH(p, &pipehash[i], next) { - /* - * copy pipe descriptor into *bp, convert delay back to ms, - * then copy the flow_set descriptor(s) one at a time. - * After each flow_set, copy the queue descriptor it owns. - */ - if ( is64user ){ - bp = cp_pipe_to_64_user(p, (struct dn_pipe_64 *)bp); + bp = buf; + for (i = 0; i < HASHSIZE; i++) { + SLIST_FOREACH(p, &pipehash[i], next) { + /* + * copy pipe descriptor into *bp, convert delay + * back to ms, then copy the flow_set descriptor(s) + * one at a time. After each flow_set, copy the + * queue descriptor it owns. + */ + if ( is64user ) { + bp = cp_pipe_to_64_user(p, + (struct dn_pipe_64 *)bp); + } else { + bp = cp_pipe_to_32_user(p, + (struct dn_pipe_32 *)bp); + } } - else{ - bp = cp_pipe_to_32_user(p, (struct dn_pipe_32 *)bp); + } + for (i = 0; i < HASHSIZE; i++) { + SLIST_FOREACH(set, &flowsethash[i], next) { + struct dn_flow_set_64 *fs_bp = + (struct dn_flow_set_64 *)bp ; + cp_flow_set_to_64_user(set, fs_bp); + /* XXX same hack as above */ + fs_bp->next = CAST_DOWN(user64_addr_t, + DN_IS_QUEUE); + fs_bp->pipe = USER_ADDR_NULL; + fs_bp->rq = USER_ADDR_NULL ; + bp += sizeof(struct dn_flow_set_64); + bp = dn_copy_set_64( set, bp ); } - } - for (i = 0; i < HASHSIZE; i++) - SLIST_FOREACH(set, &flowsethash[i], next) { - struct dn_flow_set_64 *fs_bp = (struct dn_flow_set_64 *)bp ; - cp_flow_set_to_64_user(set, fs_bp); - /* XXX same hack as above */ - fs_bp->next = CAST_DOWN(user64_addr_t, DN_IS_QUEUE); - fs_bp->pipe = USER_ADDR_NULL; - fs_bp->rq = USER_ADDR_NULL ; - bp += sizeof(struct dn_flow_set_64); - bp = dn_copy_set_64( set, bp ); - } - lck_mtx_unlock(dn_mutex); - - error = sooptcopyout(sopt, buf, size); - FREE(buf, M_TEMP); - return error ; + } + lck_mtx_unlock(dn_mutex); + error = sooptcopyout(sopt, buf, size); + FREE(buf, M_TEMP); + return(error); } /* @@ -2494,6 +2526,12 @@ ip_dn_ctl(struct sockopt *sopt) return error ; } +void +dummynet_init(void) +{ + eventhandler_lists_ctxt_init(&dummynet_evhdlr_ctxt); +} + void ip_dn_init(void) { @@ -2501,21 +2539,64 @@ ip_dn_init(void) dn_mutex_grp_attr = lck_grp_attr_alloc_init(); dn_mutex_grp = lck_grp_alloc_init("dn", dn_mutex_grp_attr); dn_mutex_attr = lck_attr_alloc_init(); - - if ((dn_mutex = lck_mtx_alloc_init(dn_mutex_grp, dn_mutex_attr)) == NULL) { - printf("ip_dn_init: can't alloc dn_mutex\n"); - return; - } + lck_mtx_init(dn_mutex, dn_mutex_grp, dn_mutex_attr); ready_heap.size = ready_heap.elements = 0 ; - ready_heap.offset = 0 ; + ready_heap.offset = 0 ; + + wfq_ready_heap.size = wfq_ready_heap.elements = 0 ; + wfq_ready_heap.offset = 0 ; + + extract_heap.size = extract_heap.elements = 0 ; + extract_heap.offset = 0 ; + ip_dn_ctl_ptr = ip_dn_ctl; + ip_dn_io_ptr = dummynet_io; + + bzero(&default_rule, sizeof default_rule); +#if IPFIREWALL + default_rule.act_ofs = 0; + default_rule.rulenum = IPFW_DEFAULT_RULE; + default_rule.cmd_len = 1; + default_rule.set = RESVD_SET; + + default_rule.cmd[0].len = 1; + default_rule.cmd[0].opcode = +#ifdef IPFIREWALL_DEFAULT_TO_ACCEPT + (1) ? O_ACCEPT : +#endif + O_DENY; +#endif +} + +struct dn_event_nwk_wq_entry +{ + struct nwk_wq_entry nwk_wqe; + struct dummynet_event dn_ev_arg; +}; + +static void +dummynet_event_callback(void *arg) +{ + struct dummynet_event *p_dn_ev = (struct dummynet_event *)arg; + + EVENTHANDLER_INVOKE(&dummynet_evhdlr_ctxt, dummynet_event, p_dn_ev); + return; +} + +void +dummynet_event_enqueue_nwk_wq_entry(struct dummynet_event *p_dn_event) +{ + struct dn_event_nwk_wq_entry *p_dn_ev = NULL; + + MALLOC(p_dn_ev, struct dn_event_nwk_wq_entry *, + sizeof(struct dn_event_nwk_wq_entry), + M_NWKWQ, M_WAITOK | M_ZERO); - wfq_ready_heap.size = wfq_ready_heap.elements = 0 ; - wfq_ready_heap.offset = 0 ; + p_dn_ev->nwk_wqe.func = dummynet_event_callback; + p_dn_ev->nwk_wqe.is_arg_managed = TRUE; + p_dn_ev->nwk_wqe.arg = &p_dn_ev->dn_ev_arg; - extract_heap.size = extract_heap.elements = 0 ; - extract_heap.offset = 0 ; - ip_dn_ctl_ptr = ip_dn_ctl; - ip_dn_io_ptr = dummynet_io; - ip_dn_ruledel_ptr = dn_rule_delete; + bcopy(p_dn_event, &(p_dn_ev->dn_ev_arg), + sizeof(struct dummynet_event)); + nwk_wq_enqueue((struct nwk_wq_entry*)p_dn_ev); }