X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/fe8ab488e9161c46dd9885d58fc52996dc0249ff..0a7de7458d150b5d4dffc935ba399be265ef0a1a:/bsd/netinet/ip_dummynet.c?ds=inline diff --git a/bsd/netinet/ip_dummynet.c b/bsd/netinet/ip_dummynet.c index 38338ae58..c9f566822 100644 --- a/bsd/netinet/ip_dummynet.c +++ b/bsd/netinet/ip_dummynet.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2013 Apple Inc. All rights reserved. + * Copyright (c) 2000-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -85,6 +85,7 @@ #include #include /* XXX */ #include +#include #include #include #include @@ -95,6 +96,8 @@ #if DUMMYNET #include #endif /* DUMMYNET */ +#include +#include #include #include #include @@ -116,7 +119,7 @@ static dn_key curr_time = 0 ; /* current simulation time */ /* this is for the timer that fires to call dummynet() - we only enable the timer when there are packets to process, otherwise it's disabled */ -static int timer_enabled = 0; +static int timer_enabled = 0; static int dn_hash_size = 64 ; /* default hash size */ @@ -155,7 +158,7 @@ static void ready_event(struct dn_flow_queue *q, struct mbuf **head, static void ready_event_wfq(struct dn_pipe *p, struct mbuf **head, struct mbuf **tail); -/* +/* * Packets are retrieved from queues in Dummynet in chains instead of * packet-by-packet. The entire list of packets is first dequeued and * sent out by the following function. @@ -167,7 +170,6 @@ static void dummynet_send(struct mbuf *m); static struct dn_pipe_head pipehash[HASHSIZE]; /* all pipes */ static struct dn_flow_set_head flowsethash[HASHSIZE]; /* all flowsets */ - #ifdef SYSCTL_NODE SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Dummynet"); @@ -186,7 +188,7 @@ SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, search_steps, SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire, CTLFLAG_RW | CTLFLAG_LOCKED, &pipe_expire, 0, "Expire queue if empty"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len, - CTLFLAG_RW | CTLFLAG_LOCKED, &dn_max_ratio, 0, + CTLFLAG_RW | CTLFLAG_LOCKED, &dn_max_ratio, 0, "Max ratio between dynamic queues and buckets"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth, CTLFLAG_RD | CTLFLAG_LOCKED, &red_lookup_depth, 0, "Depth of RED lookup table"); @@ -207,13 +209,6 @@ SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED, #define DPRINTF(X) #endif -/* contrary to the comment above random(), it does not actually - * return a value [0, 2^31 - 1], which breaks plr amongst other - * things. Masking it should work even if the behavior of - * the function is fixed. - */ -#define MY_RANDOM (random() & 0x7FFFFFFF) - /* dummynet lock */ static lck_grp_t *dn_mutex_grp; static lck_grp_attr_t *dn_mutex_grp_attr; @@ -229,8 +224,6 @@ static void dummynet_flush(void); void dummynet_drain(void); static ip_dn_io_t dummynet_io; -int if_tx_rdy(struct ifnet *ifp); - static void cp_flow_set_to_64_user(struct dn_flow_set *set, struct dn_flow_set_64 *fs_bp); static void cp_queue_to_64_user( struct dn_flow_queue *q, struct dn_flow_queue_64 *qp); static char *cp_pipe_to_64_user(struct dn_pipe *p, struct dn_pipe_64 *pipe_bp); @@ -243,6 +236,16 @@ static char *cp_pipe_to_32_user(struct dn_pipe *p, struct dn_pipe_32 *pipe_bp); static char* dn_copy_set_32(struct dn_flow_set *set, char *bp); static int cp_pipe_from_user_32( struct sockopt *sopt, struct dn_pipe *p ); +struct eventhandler_lists_ctxt dummynet_evhdlr_ctxt; + +uint32_t my_random(void) +{ + uint32_t val; + read_frandom(&val, sizeof(val)); + val &= 0x7FFFFFFF; + + return (val); +} /* * Heap management functions. @@ -268,7 +271,7 @@ int cp_pipe_from_user_32( struct sockopt *sopt, struct dn_pipe *p ) { struct dn_pipe_32 user_pipe_32; int error=0; - + error = sooptcopyin(sopt, &user_pipe_32, sizeof(struct dn_pipe_32), sizeof(struct dn_pipe_32)); if ( !error ){ p->pipe_nr = user_pipe_32.pipe_nr; @@ -280,7 +283,7 @@ int cp_pipe_from_user_32( struct sockopt *sopt, struct dn_pipe *p ) p->sched_time = user_pipe_32.sched_time; bcopy( user_pipe_32.if_name, p->if_name, IFNAMSIZ); p->ready = user_pipe_32.ready; - + p->fs.fs_nr = user_pipe_32.fs.fs_nr; p->fs.flags_fs = user_pipe_32.fs.flags_fs; p->fs.parent_nr = user_pipe_32.fs.parent_nr; @@ -314,7 +317,7 @@ int cp_pipe_from_user_64( struct sockopt *sopt, struct dn_pipe *p ) { struct dn_pipe_64 user_pipe_64; int error=0; - + error = sooptcopyin(sopt, &user_pipe_64, sizeof(struct dn_pipe_64), sizeof(struct dn_pipe_64)); if ( !error ){ p->pipe_nr = user_pipe_64.pipe_nr; @@ -326,7 +329,7 @@ int cp_pipe_from_user_64( struct sockopt *sopt, struct dn_pipe *p ) p->sched_time = user_pipe_64.sched_time; bcopy( user_pipe_64.if_name, p->if_name, IFNAMSIZ); p->ready = user_pipe_64.ready; - + p->fs.fs_nr = user_pipe_64.fs.fs_nr; p->fs.flags_fs = user_pipe_64.fs.flags_fs; p->fs.parent_nr = user_pipe_64.fs.parent_nr; @@ -461,7 +464,7 @@ static char *cp_pipe_to_32_user(struct dn_pipe *p, struct dn_pipe_32 *pipe_bp) { char *bp; - + pipe_bp->pipe_nr = p->pipe_nr; pipe_bp->bandwidth = p->bandwidth; pipe_bp->delay = p->delay; @@ -478,10 +481,10 @@ char *cp_pipe_to_32_user(struct dn_pipe *p, struct dn_pipe_32 *pipe_bp) bcopy( p->if_name, pipe_bp->if_name, IFNAMSIZ); pipe_bp->ifp = CAST_DOWN_EXPLICIT(user32_addr_t, p->ifp); pipe_bp->ready = p->ready; - + cp_flow_set_to_32_user( &(p->fs), &(pipe_bp->fs)); - - pipe_bp->delay = (pipe_bp->delay * 1000) / (hz*10) ; + + pipe_bp->delay = (pipe_bp->delay * 1000) / (hz*10) ; /* * XXX the following is a hack based on ->next being the * first field in dn_pipe and dn_flow_set. The correct @@ -502,7 +505,7 @@ static char *cp_pipe_to_64_user(struct dn_pipe *p, struct dn_pipe_64 *pipe_bp) { char *bp; - + pipe_bp->pipe_nr = p->pipe_nr; pipe_bp->bandwidth = p->bandwidth; pipe_bp->delay = p->delay; @@ -519,10 +522,10 @@ char *cp_pipe_to_64_user(struct dn_pipe *p, struct dn_pipe_64 *pipe_bp) bcopy( p->if_name, pipe_bp->if_name, IFNAMSIZ); pipe_bp->ifp = CAST_DOWN(user64_addr_t, p->ifp); pipe_bp->ready = p->ready; - + cp_flow_set_to_64_user( &(p->fs), &(pipe_bp->fs)); - - pipe_bp->delay = (pipe_bp->delay * 1000) / (hz*10) ; + + pipe_bp->delay = (pipe_bp->delay * 1000) / (hz*10) ; /* * XXX the following is a hack based on ->next being the * first field in dn_pipe and dn_flow_set. The correct @@ -730,7 +733,7 @@ transmit_event(struct dn_pipe *pipe, struct mbuf **head, struct mbuf **tail) struct dn_pkt_tag *pkt = NULL; u_int64_t schedule_time; - lck_mtx_assert(dn_mutex, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); ASSERT(serialize >= 0); if (serialize == 0) { while ((m = pipe->head) != NULL) { @@ -745,7 +748,7 @@ transmit_event(struct dn_pipe *pipe, struct mbuf **head, struct mbuf **tail) *head = m; *tail = m; } - + if (*tail != NULL) (*tail)->m_nextpkt = NULL; } @@ -768,11 +771,11 @@ transmit_event(struct dn_pipe *pipe, struct mbuf **head, struct mbuf **tail) * before being able to transmit a packet. The credit is taken from * either a pipe (WF2Q) or a flow_queue (per-flow queueing) */ - -/* hz is 100, which gives a granularity of 10ms in the old timer. + +/* hz is 100, which gives a granularity of 10ms in the old timer. * The timer has been changed to fire every 1ms, so the use of * hz has been modified here. All instances of hz have been left - * in place but adjusted by a factor of 10 so that hz is functionally + * in place but adjusted by a factor of 10 so that hz is functionally * equal to 1000. */ #define SET_TICKS(_m, q, p) \ @@ -817,8 +820,8 @@ ready_event(struct dn_flow_queue *q, struct mbuf **head, struct mbuf **tail) struct dn_pipe *p = q->fs->pipe ; int p_was_empty ; - lck_mtx_assert(dn_mutex, LCK_MTX_ASSERT_OWNED); - + LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); + if (p == NULL) { printf("dummynet: ready_event pipe is gone\n"); return ; @@ -884,7 +887,7 @@ ready_event_wfq(struct dn_pipe *p, struct mbuf **head, struct mbuf **tail) struct dn_heap *neh = &(p->not_eligible_heap) ; int64_t p_numbytes = p->numbytes; - lck_mtx_assert(dn_mutex, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); if (p->if_name[0] == 0) /* tx clock is simulated */ p_numbytes += ( curr_time - p->sched_time ) * p->bandwidth; @@ -985,7 +988,7 @@ ready_event_wfq(struct dn_pipe *p, struct mbuf **head, struct mbuf **tail) * queue on error hoping next time we are luckier. */ } - + /* Fit (adjust if necessary) 64bit result into 32bit variable. */ if (p_numbytes > INT_MAX) p->numbytes = INT_MAX; @@ -1024,9 +1027,9 @@ dummynet(__unused void * unused) heaps[2] = &extract_heap ; /* delay line */ lck_mtx_lock(dn_mutex); - - /* make all time measurements in milliseconds (ms) - - * here we convert secs and usecs to msecs (just divide the + + /* make all time measurements in milliseconds (ms) - + * here we convert secs and usecs to msecs (just divide the * usecs and take the closest whole number). */ microuptime(&tv); @@ -1066,8 +1069,8 @@ dummynet(__unused void * unused) pe->sum -= q->fs->weight ; } - /* check the heaps to see if there's still stuff in there, and - * only set the timer if there are packets to process + /* check the heaps to see if there's still stuff in there, and + * only set the timer if there are packets to process */ timer_enabled = 0; for (i=0; i < 3 ; i++) { @@ -1080,10 +1083,10 @@ dummynet(__unused void * unused) break; } } - + if (head != NULL) serialize++; - + lck_mtx_unlock(dn_mutex); /* Send out the de-queued list of ready-to-send packets */ @@ -1136,7 +1139,7 @@ dummynet_send(struct mbuf *m) case DN_TO_IP6_IN: proto_inject(PF_INET6, m); break; -#endif /* INET6 */ +#endif /* INET6 */ default: printf("dummynet: bad switch %d!\n", pkt->dn_dir); m_freem(m); @@ -1145,58 +1148,6 @@ dummynet_send(struct mbuf *m) } } - - -/* - * called by an interface when tx_rdy occurs. - */ -int -if_tx_rdy(struct ifnet *ifp) -{ - struct dn_pipe *p; - struct mbuf *head = NULL, *tail = NULL; - int i; - - lck_mtx_lock(dn_mutex); - - for (i = 0; i < HASHSIZE; i++) - SLIST_FOREACH(p, &pipehash[i], next) - if (p->ifp == ifp) - break ; - if (p == NULL) { - char buf[32]; - snprintf(buf, sizeof(buf), "%s", if_name(ifp)); - for (i = 0; i < HASHSIZE; i++) - SLIST_FOREACH(p, &pipehash[i], next) - if (!strcmp(p->if_name, buf) ) { - p->ifp = ifp ; - DPRINTF(("dummynet: ++ tx rdy from %s (now found)\n", buf)); - break ; - } - } - if (p != NULL) { - DPRINTF(("dummynet: ++ tx rdy from %s - qlen %d\n", if_name(ifp), - IFCQ_LEN(&ifp->if_snd))); - p->numbytes = 0 ; /* mark ready for I/O */ - ready_event_wfq(p, &head, &tail); - } - - if (head != NULL) { - serialize++; - } - - lck_mtx_unlock(dn_mutex); - - /* Send out the de-queued list of ready-to-send packets */ - if (head != NULL) { - dummynet_send(head); - lck_mtx_lock(dn_mutex); - serialize--; - lck_mtx_unlock(dn_mutex); - } - return 0; -} - /* * Unconditionally expire empty queues in case of shortage. * Returns the number of queues freed. @@ -1304,10 +1255,10 @@ find_queue(struct dn_flow_set *fs, struct ip_flow_id *id) ((id->src_ip6.__u6_addr.__u6_addr32[2] << 1) & 0xfffff)^ ((id->src_ip6.__u6_addr.__u6_addr32[3] << 1) & 0xfffff)^ - ((id->src_ip6.__u6_addr.__u6_addr32[0] << 16) & 0xffff)^ - ((id->src_ip6.__u6_addr.__u6_addr32[1] << 16) & 0xffff)^ - ((id->src_ip6.__u6_addr.__u6_addr32[2] << 16) & 0xffff)^ - ((id->src_ip6.__u6_addr.__u6_addr32[3] << 16) & 0xffff)^ + ((id->src_ip6.__u6_addr.__u6_addr32[0] >> 16) & 0xffff)^ + ((id->src_ip6.__u6_addr.__u6_addr32[1] >> 16) & 0xffff)^ + ((id->src_ip6.__u6_addr.__u6_addr32[2] >> 16) & 0xffff)^ + ((id->src_ip6.__u6_addr.__u6_addr32[3] >> 16) & 0xffff)^ (id->dst_port << 1) ^ (id->src_port) ^ (id->proto ) ^ @@ -1460,7 +1411,7 @@ red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len) if (fs->flags_fs & DN_QSIZE_IS_BYTES) p_b = (p_b * len) / fs->max_pkt_size; if (++q->count == 0) - q->random = MY_RANDOM & 0xffff; + q->random = (my_random() & 0xffff); else { /* * q->count counts packets arrived since last drop, so a greater @@ -1470,7 +1421,7 @@ red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len) q->count = 0; DPRINTF(("dummynet: - red drop")); /* after a drop we calculate a new random value */ - q->random = MY_RANDOM & 0xffff; + q->random = (my_random() & 0xffff); return 1; /* drop */ } } @@ -1486,7 +1437,7 @@ locate_flowset(int fs_nr) SLIST_FOREACH(fs, &flowsethash[HASH(fs_nr)], next) if (fs->fs_nr == fs_nr) return fs ; - + return (NULL); } @@ -1561,13 +1512,13 @@ dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa, int cl lck_mtx_lock(dn_mutex); - /* make all time measurements in milliseconds (ms) - - * here we convert secs and usecs to msecs (just divide the + /* make all time measurements in milliseconds (ms) - + * here we convert secs and usecs to msecs (just divide the * usecs and take the closest whole number). */ microuptime(&tv); curr_time = (tv.tv_sec * 1000) + (tv.tv_usec / 1000); - + /* * This is a dummynet rule, so we expect an O_PIPE or O_QUEUE rule. */ @@ -1577,8 +1528,8 @@ dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa, int cl fs = &(pipe->fs); } else fs = locate_flowset(pipe_nr); - - + + if (fs == NULL){ goto dropit ; /* this queue/pipe does not exist! */ } @@ -1602,7 +1553,7 @@ dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa, int cl */ q->tot_bytes += len ; q->tot_pkts++ ; - if ( fs->plr && (MY_RANDOM < fs->plr) ) + if ( fs->plr && (my_random() < fs->plr)) goto dropit ; /* random pkt drop */ if ( fs->flags_fs & DN_QSIZE_IS_BYTES) { if (q->len_bytes > fs->qsize) @@ -1649,7 +1600,7 @@ dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa, int cl if (fwa->fwa_dst) { if (fwa->fwa_dst == (struct sockaddr_in *)&fwa->fwa_ro->ro_dst) /* dst points into ro */ fwa->fwa_dst = (struct sockaddr_in *)&(pkt->dn_ro.ro_dst) ; - + bcopy (fwa->fwa_dst, &pkt->dn_dst, sizeof(pkt->dn_dst)); } } else if (dir == DN_TO_IP6_OUT) { @@ -1664,7 +1615,7 @@ dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa, int cl if (fwa->fwa_dst6) { if (fwa->fwa_dst6 == (struct sockaddr_in6 *)&fwa->fwa_ro6->ro_dst) /* dst points into ro */ fwa->fwa_dst6 = (struct sockaddr_in6 *)&(pkt->dn_ro6.ro_dst) ; - + bcopy (fwa->fwa_dst6, &pkt->dn_dst6, sizeof(pkt->dn_dst6)); } pkt->dn_origifp = fwa->fwa_origifp; @@ -1673,10 +1624,10 @@ dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa, int cl pkt->dn_unfragpartlen = fwa->fwa_unfragpartlen; if (fwa->fwa_exthdrs) { bcopy (fwa->fwa_exthdrs, &pkt->dn_exthdrs, sizeof(pkt->dn_exthdrs)); - /* + /* * Need to zero out the source structure so the mbufs * won't be freed by ip6_output() - */ + */ bzero(fwa->fwa_exthdrs, sizeof(struct ip6_exthdrs)); } } @@ -1774,7 +1725,7 @@ done: } lck_mtx_unlock(dn_mutex); - + if (head != NULL) { dummynet_send(head); } @@ -1815,7 +1766,7 @@ purge_flow_set(struct dn_flow_set *fs, int all) struct dn_flow_queue *q, *qn ; int i ; - lck_mtx_assert(dn_mutex, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); for (i = 0 ; i <= fs->rq_size ; i++ ) { for (q = fs->rq[i] ; q ; q = qn ) { @@ -2065,7 +2016,6 @@ set_fs_parms(struct dn_flow_set *x, struct dn_flow_set *src) /* * setup pipe or queue parameters. */ - static int config_pipe(struct dn_pipe *p) { @@ -2087,12 +2037,12 @@ config_pipe(struct dn_pipe *p) return EINVAL ; if (p->pipe_nr != 0) { /* this is a pipe */ struct dn_pipe *x, *b; - + struct dummynet_event dn_event; lck_mtx_lock(dn_mutex); /* locate pipe */ b = locate_pipe(p->pipe_nr); - + if (b == NULL || b->pipe_nr != p->pipe_nr) { /* new pipe */ x = _MALLOC(sizeof(struct dn_pipe), M_DUMMYNET, M_DONTWAIT | M_ZERO) ; if (x == NULL) { @@ -2133,6 +2083,14 @@ config_pipe(struct dn_pipe *p) x, next); } lck_mtx_unlock(dn_mutex); + + bzero(&dn_event, sizeof(dn_event)); + dn_event.dn_event_code = DUMMYNET_PIPE_CONFIG; + dn_event.dn_event_pipe_config.bandwidth = p->bandwidth; + dn_event.dn_event_pipe_config.delay = p->delay; + dn_event.dn_event_pipe_config.plr = pfs->plr; + + dummynet_event_enqueue_nwk_wq_entry(&dn_event); } else { /* config queue */ struct dn_flow_set *x, *b ; @@ -2232,7 +2190,7 @@ dummynet_drain(void) struct mbuf *m, *mnext; int i; - lck_mtx_assert(dn_mutex, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); heap_free(&ready_heap); heap_free(&wfq_ready_heap); @@ -2244,7 +2202,7 @@ dummynet_drain(void) for (i = 0; i < HASHSIZE; i++) SLIST_FOREACH(p, &pipehash[i], next) { - purge_flow_set(&(p->fs), 0); + purge_flow_set(&(p->fs), 0); mnext = p->head; while ((m = mnext) != NULL) { @@ -2302,7 +2260,7 @@ delete_pipe(struct dn_pipe *p) pipe_remove_from_heap(&extract_heap, b); pipe_remove_from_heap(&wfq_ready_heap, b); lck_mtx_unlock(dn_mutex); - + FREE(b, M_DUMMYNET); } else { /* this is a WF2Q queue (dn_flow_set) */ struct dn_flow_set *b; @@ -2341,15 +2299,15 @@ delete_pipe(struct dn_pipe *p) /* * helper function used to copy data from kernel in DUMMYNET_GET */ -static +static char* dn_copy_set_32(struct dn_flow_set *set, char *bp) { int i, copied = 0 ; struct dn_flow_queue *q; struct dn_flow_queue_32 *qp = (struct dn_flow_queue_32 *)bp; - - lck_mtx_assert(dn_mutex, LCK_MTX_ASSERT_OWNED); - + + LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); + for (i = 0 ; i <= set->rq_size ; i++) for (q = set->rq[i] ; q ; q = q->next, qp++ ) { if (q->hash_slot != i) @@ -2373,15 +2331,15 @@ char* dn_copy_set_32(struct dn_flow_set *set, char *bp) return (char *)qp ; } -static +static char* dn_copy_set_64(struct dn_flow_set *set, char *bp) { int i, copied = 0 ; struct dn_flow_queue *q; struct dn_flow_queue_64 *qp = (struct dn_flow_queue_64 *)bp; - - lck_mtx_assert(dn_mutex, LCK_MTX_ASSERT_OWNED); - + + LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); + for (i = 0 ; i <= set->rq_size ; i++) for (q = set->rq[i] ; q ; q = q->next, qp++ ) { if (q->hash_slot != i) @@ -2417,7 +2375,7 @@ dn_calc_size(int is64user) size_t setsize; int i; - lck_mtx_assert(dn_mutex, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); if ( is64user ){ pipesize = sizeof(struct dn_pipe_64); queuesize = sizeof(struct dn_flow_queue_64); @@ -2445,70 +2403,74 @@ dn_calc_size(int is64user) static int dummynet_get(struct sockopt *sopt) { - char *buf, *bp=NULL; /* bp is the "copy-pointer" */ - size_t size ; - struct dn_flow_set *set ; - struct dn_pipe *p ; - int error=0, i ; - int is64user = 0; - - /* XXX lock held too long */ - lck_mtx_lock(dn_mutex); - /* - * XXX: Ugly, but we need to allocate memory with M_WAITOK flag and we - * cannot use this flag while holding a mutex. - */ + char *buf = NULL, *bp = NULL; /* bp is the "copy-pointer" */ + size_t size = 0; + struct dn_flow_set *set; + struct dn_pipe *p; + int error = 0, i; + int is64user = 0; + + /* XXX lock held too long */ + lck_mtx_lock(dn_mutex); + /* + * XXX: Ugly, but we need to allocate memory with M_WAITOK flag + * and we cannot use this flag while holding a mutex. + */ if (proc_is64bit(sopt->sopt_p)) is64user = 1; - for (i = 0; i < 10; i++) { + for (i = 0; i < 10; i++) { size = dn_calc_size(is64user); lck_mtx_unlock(dn_mutex); - buf = _MALLOC(size, M_TEMP, M_WAITOK); + buf = _MALLOC(size, M_TEMP, M_WAITOK | M_ZERO); if (buf == NULL) - return ENOBUFS; + return(ENOBUFS); lck_mtx_lock(dn_mutex); if (size == dn_calc_size(is64user)) break; FREE(buf, M_TEMP); buf = NULL; - } - if (buf == NULL) { + } + if (buf == NULL) { lck_mtx_unlock(dn_mutex); - return ENOBUFS ; - } - + return(ENOBUFS); + } - bp = buf; - for (i = 0; i < HASHSIZE; i++) - SLIST_FOREACH(p, &pipehash[i], next) { - /* - * copy pipe descriptor into *bp, convert delay back to ms, - * then copy the flow_set descriptor(s) one at a time. - * After each flow_set, copy the queue descriptor it owns. - */ - if ( is64user ){ - bp = cp_pipe_to_64_user(p, (struct dn_pipe_64 *)bp); + bp = buf; + for (i = 0; i < HASHSIZE; i++) { + SLIST_FOREACH(p, &pipehash[i], next) { + /* + * copy pipe descriptor into *bp, convert delay + * back to ms, then copy the flow_set descriptor(s) + * one at a time. After each flow_set, copy the + * queue descriptor it owns. + */ + if ( is64user ) { + bp = cp_pipe_to_64_user(p, + (struct dn_pipe_64 *)bp); + } else { + bp = cp_pipe_to_32_user(p, + (struct dn_pipe_32 *)bp); + } } - else{ - bp = cp_pipe_to_32_user(p, (struct dn_pipe_32 *)bp); + } + for (i = 0; i < HASHSIZE; i++) { + SLIST_FOREACH(set, &flowsethash[i], next) { + struct dn_flow_set_64 *fs_bp = + (struct dn_flow_set_64 *)bp ; + cp_flow_set_to_64_user(set, fs_bp); + /* XXX same hack as above */ + fs_bp->next = CAST_DOWN(user64_addr_t, + DN_IS_QUEUE); + fs_bp->pipe = USER_ADDR_NULL; + fs_bp->rq = USER_ADDR_NULL ; + bp += sizeof(struct dn_flow_set_64); + bp = dn_copy_set_64( set, bp ); } - } - for (i = 0; i < HASHSIZE; i++) - SLIST_FOREACH(set, &flowsethash[i], next) { - struct dn_flow_set_64 *fs_bp = (struct dn_flow_set_64 *)bp ; - cp_flow_set_to_64_user(set, fs_bp); - /* XXX same hack as above */ - fs_bp->next = CAST_DOWN(user64_addr_t, DN_IS_QUEUE); - fs_bp->pipe = USER_ADDR_NULL; - fs_bp->rq = USER_ADDR_NULL ; - bp += sizeof(struct dn_flow_set_64); - bp = dn_copy_set_64( set, bp ); - } - lck_mtx_unlock(dn_mutex); - - error = sooptcopyout(sopt, buf, size); - FREE(buf, M_TEMP); - return error ; + } + lck_mtx_unlock(dn_mutex); + error = sooptcopyout(sopt, buf, size); + FREE(buf, M_TEMP); + return(error); } /* @@ -2564,6 +2526,12 @@ ip_dn_ctl(struct sockopt *sopt) return error ; } +void +dummynet_init(void) +{ + eventhandler_lists_ctxt_init(&dummynet_evhdlr_ctxt); +} + void ip_dn_init(void) { @@ -2585,16 +2553,50 @@ ip_dn_init(void) ip_dn_io_ptr = dummynet_io; bzero(&default_rule, sizeof default_rule); - +#if IPFIREWALL default_rule.act_ofs = 0; default_rule.rulenum = IPFW_DEFAULT_RULE; default_rule.cmd_len = 1; default_rule.set = RESVD_SET; default_rule.cmd[0].len = 1; - default_rule.cmd[0].opcode = + default_rule.cmd[0].opcode = #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT - (1) ? O_ACCEPT : + (1) ? O_ACCEPT : #endif - O_DENY; + O_DENY; +#endif +} + +struct dn_event_nwk_wq_entry +{ + struct nwk_wq_entry nwk_wqe; + struct dummynet_event dn_ev_arg; +}; + +static void +dummynet_event_callback(void *arg) +{ + struct dummynet_event *p_dn_ev = (struct dummynet_event *)arg; + + EVENTHANDLER_INVOKE(&dummynet_evhdlr_ctxt, dummynet_event, p_dn_ev); + return; +} + +void +dummynet_event_enqueue_nwk_wq_entry(struct dummynet_event *p_dn_event) +{ + struct dn_event_nwk_wq_entry *p_dn_ev = NULL; + + MALLOC(p_dn_ev, struct dn_event_nwk_wq_entry *, + sizeof(struct dn_event_nwk_wq_entry), + M_NWKWQ, M_WAITOK | M_ZERO); + + p_dn_ev->nwk_wqe.func = dummynet_event_callback; + p_dn_ev->nwk_wqe.is_arg_managed = TRUE; + p_dn_ev->nwk_wqe.arg = &p_dn_ev->dn_ev_arg; + + bcopy(p_dn_event, &(p_dn_ev->dn_ev_arg), + sizeof(struct dummynet_event)); + nwk_wq_enqueue((struct nwk_wq_entry*)p_dn_ev); }