X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/fe8ab488e9161c46dd9885d58fc52996dc0249ff..3903760236c30e3b5ace7a4eefac3a269d68957c:/bsd/netinet/ip_dummynet.c diff --git a/bsd/netinet/ip_dummynet.c b/bsd/netinet/ip_dummynet.c index 38338ae58..b40ca4612 100644 --- a/bsd/netinet/ip_dummynet.c +++ b/bsd/netinet/ip_dummynet.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -116,7 +116,7 @@ static dn_key curr_time = 0 ; /* current simulation time */ /* this is for the timer that fires to call dummynet() - we only enable the timer when there are packets to process, otherwise it's disabled */ -static int timer_enabled = 0; +static int timer_enabled = 0; static int dn_hash_size = 64 ; /* default hash size */ @@ -155,7 +155,7 @@ static void ready_event(struct dn_flow_queue *q, struct mbuf **head, static void ready_event_wfq(struct dn_pipe *p, struct mbuf **head, struct mbuf **tail); -/* +/* * Packets are retrieved from queues in Dummynet in chains instead of * packet-by-packet. The entire list of packets is first dequeued and * sent out by the following function. @@ -186,7 +186,7 @@ SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, search_steps, SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire, CTLFLAG_RW | CTLFLAG_LOCKED, &pipe_expire, 0, "Expire queue if empty"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len, - CTLFLAG_RW | CTLFLAG_LOCKED, &dn_max_ratio, 0, + CTLFLAG_RW | CTLFLAG_LOCKED, &dn_max_ratio, 0, "Max ratio between dynamic queues and buckets"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth, CTLFLAG_RD | CTLFLAG_LOCKED, &red_lookup_depth, 0, "Depth of RED lookup table"); @@ -268,7 +268,7 @@ int cp_pipe_from_user_32( struct sockopt *sopt, struct dn_pipe *p ) { struct dn_pipe_32 user_pipe_32; int error=0; - + error = sooptcopyin(sopt, &user_pipe_32, sizeof(struct dn_pipe_32), sizeof(struct dn_pipe_32)); if ( !error ){ p->pipe_nr = user_pipe_32.pipe_nr; @@ -280,7 +280,7 @@ int cp_pipe_from_user_32( struct sockopt *sopt, struct dn_pipe *p ) p->sched_time = user_pipe_32.sched_time; bcopy( user_pipe_32.if_name, p->if_name, IFNAMSIZ); p->ready = user_pipe_32.ready; - + p->fs.fs_nr = user_pipe_32.fs.fs_nr; p->fs.flags_fs = user_pipe_32.fs.flags_fs; p->fs.parent_nr = user_pipe_32.fs.parent_nr; @@ -314,7 +314,7 @@ int cp_pipe_from_user_64( struct sockopt *sopt, struct dn_pipe *p ) { struct dn_pipe_64 user_pipe_64; int error=0; - + error = sooptcopyin(sopt, &user_pipe_64, sizeof(struct dn_pipe_64), sizeof(struct dn_pipe_64)); if ( !error ){ p->pipe_nr = user_pipe_64.pipe_nr; @@ -326,7 +326,7 @@ int cp_pipe_from_user_64( struct sockopt *sopt, struct dn_pipe *p ) p->sched_time = user_pipe_64.sched_time; bcopy( user_pipe_64.if_name, p->if_name, IFNAMSIZ); p->ready = user_pipe_64.ready; - + p->fs.fs_nr = user_pipe_64.fs.fs_nr; p->fs.flags_fs = user_pipe_64.fs.flags_fs; p->fs.parent_nr = user_pipe_64.fs.parent_nr; @@ -461,7 +461,7 @@ static char *cp_pipe_to_32_user(struct dn_pipe *p, struct dn_pipe_32 *pipe_bp) { char *bp; - + pipe_bp->pipe_nr = p->pipe_nr; pipe_bp->bandwidth = p->bandwidth; pipe_bp->delay = p->delay; @@ -478,10 +478,10 @@ char *cp_pipe_to_32_user(struct dn_pipe *p, struct dn_pipe_32 *pipe_bp) bcopy( p->if_name, pipe_bp->if_name, IFNAMSIZ); pipe_bp->ifp = CAST_DOWN_EXPLICIT(user32_addr_t, p->ifp); pipe_bp->ready = p->ready; - + cp_flow_set_to_32_user( &(p->fs), &(pipe_bp->fs)); - - pipe_bp->delay = (pipe_bp->delay * 1000) / (hz*10) ; + + pipe_bp->delay = (pipe_bp->delay * 1000) / (hz*10) ; /* * XXX the following is a hack based on ->next being the * first field in dn_pipe and dn_flow_set. The correct @@ -502,7 +502,7 @@ static char *cp_pipe_to_64_user(struct dn_pipe *p, struct dn_pipe_64 *pipe_bp) { char *bp; - + pipe_bp->pipe_nr = p->pipe_nr; pipe_bp->bandwidth = p->bandwidth; pipe_bp->delay = p->delay; @@ -519,10 +519,10 @@ char *cp_pipe_to_64_user(struct dn_pipe *p, struct dn_pipe_64 *pipe_bp) bcopy( p->if_name, pipe_bp->if_name, IFNAMSIZ); pipe_bp->ifp = CAST_DOWN(user64_addr_t, p->ifp); pipe_bp->ready = p->ready; - + cp_flow_set_to_64_user( &(p->fs), &(pipe_bp->fs)); - - pipe_bp->delay = (pipe_bp->delay * 1000) / (hz*10) ; + + pipe_bp->delay = (pipe_bp->delay * 1000) / (hz*10) ; /* * XXX the following is a hack based on ->next being the * first field in dn_pipe and dn_flow_set. The correct @@ -745,7 +745,7 @@ transmit_event(struct dn_pipe *pipe, struct mbuf **head, struct mbuf **tail) *head = m; *tail = m; } - + if (*tail != NULL) (*tail)->m_nextpkt = NULL; } @@ -768,11 +768,11 @@ transmit_event(struct dn_pipe *pipe, struct mbuf **head, struct mbuf **tail) * before being able to transmit a packet. The credit is taken from * either a pipe (WF2Q) or a flow_queue (per-flow queueing) */ - -/* hz is 100, which gives a granularity of 10ms in the old timer. + +/* hz is 100, which gives a granularity of 10ms in the old timer. * The timer has been changed to fire every 1ms, so the use of * hz has been modified here. All instances of hz have been left - * in place but adjusted by a factor of 10 so that hz is functionally + * in place but adjusted by a factor of 10 so that hz is functionally * equal to 1000. */ #define SET_TICKS(_m, q, p) \ @@ -818,7 +818,7 @@ ready_event(struct dn_flow_queue *q, struct mbuf **head, struct mbuf **tail) int p_was_empty ; lck_mtx_assert(dn_mutex, LCK_MTX_ASSERT_OWNED); - + if (p == NULL) { printf("dummynet: ready_event pipe is gone\n"); return ; @@ -985,7 +985,7 @@ ready_event_wfq(struct dn_pipe *p, struct mbuf **head, struct mbuf **tail) * queue on error hoping next time we are luckier. */ } - + /* Fit (adjust if necessary) 64bit result into 32bit variable. */ if (p_numbytes > INT_MAX) p->numbytes = INT_MAX; @@ -1024,9 +1024,9 @@ dummynet(__unused void * unused) heaps[2] = &extract_heap ; /* delay line */ lck_mtx_lock(dn_mutex); - - /* make all time measurements in milliseconds (ms) - - * here we convert secs and usecs to msecs (just divide the + + /* make all time measurements in milliseconds (ms) - + * here we convert secs and usecs to msecs (just divide the * usecs and take the closest whole number). */ microuptime(&tv); @@ -1066,8 +1066,8 @@ dummynet(__unused void * unused) pe->sum -= q->fs->weight ; } - /* check the heaps to see if there's still stuff in there, and - * only set the timer if there are packets to process + /* check the heaps to see if there's still stuff in there, and + * only set the timer if there are packets to process */ timer_enabled = 0; for (i=0; i < 3 ; i++) { @@ -1080,10 +1080,10 @@ dummynet(__unused void * unused) break; } } - + if (head != NULL) serialize++; - + lck_mtx_unlock(dn_mutex); /* Send out the de-queued list of ready-to-send packets */ @@ -1136,7 +1136,7 @@ dummynet_send(struct mbuf *m) case DN_TO_IP6_IN: proto_inject(PF_INET6, m); break; -#endif /* INET6 */ +#endif /* INET6 */ default: printf("dummynet: bad switch %d!\n", pkt->dn_dir); m_freem(m); @@ -1146,7 +1146,7 @@ dummynet_send(struct mbuf *m) } - + /* * called by an interface when tx_rdy occurs. */ @@ -1156,9 +1156,9 @@ if_tx_rdy(struct ifnet *ifp) struct dn_pipe *p; struct mbuf *head = NULL, *tail = NULL; int i; - + lck_mtx_lock(dn_mutex); - + for (i = 0; i < HASHSIZE; i++) SLIST_FOREACH(p, &pipehash[i], next) if (p->ifp == ifp) @@ -1180,11 +1180,11 @@ if_tx_rdy(struct ifnet *ifp) p->numbytes = 0 ; /* mark ready for I/O */ ready_event_wfq(p, &head, &tail); } - + if (head != NULL) { serialize++; } - + lck_mtx_unlock(dn_mutex); /* Send out the de-queued list of ready-to-send packets */ @@ -1304,10 +1304,10 @@ find_queue(struct dn_flow_set *fs, struct ip_flow_id *id) ((id->src_ip6.__u6_addr.__u6_addr32[2] << 1) & 0xfffff)^ ((id->src_ip6.__u6_addr.__u6_addr32[3] << 1) & 0xfffff)^ - ((id->src_ip6.__u6_addr.__u6_addr32[0] << 16) & 0xffff)^ - ((id->src_ip6.__u6_addr.__u6_addr32[1] << 16) & 0xffff)^ - ((id->src_ip6.__u6_addr.__u6_addr32[2] << 16) & 0xffff)^ - ((id->src_ip6.__u6_addr.__u6_addr32[3] << 16) & 0xffff)^ + ((id->src_ip6.__u6_addr.__u6_addr32[0] >> 16) & 0xffff)^ + ((id->src_ip6.__u6_addr.__u6_addr32[1] >> 16) & 0xffff)^ + ((id->src_ip6.__u6_addr.__u6_addr32[2] >> 16) & 0xffff)^ + ((id->src_ip6.__u6_addr.__u6_addr32[3] >> 16) & 0xffff)^ (id->dst_port << 1) ^ (id->src_port) ^ (id->proto ) ^ @@ -1486,7 +1486,7 @@ locate_flowset(int fs_nr) SLIST_FOREACH(fs, &flowsethash[HASH(fs_nr)], next) if (fs->fs_nr == fs_nr) return fs ; - + return (NULL); } @@ -1561,13 +1561,13 @@ dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa, int cl lck_mtx_lock(dn_mutex); - /* make all time measurements in milliseconds (ms) - - * here we convert secs and usecs to msecs (just divide the + /* make all time measurements in milliseconds (ms) - + * here we convert secs and usecs to msecs (just divide the * usecs and take the closest whole number). */ microuptime(&tv); curr_time = (tv.tv_sec * 1000) + (tv.tv_usec / 1000); - + /* * This is a dummynet rule, so we expect an O_PIPE or O_QUEUE rule. */ @@ -1577,8 +1577,8 @@ dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa, int cl fs = &(pipe->fs); } else fs = locate_flowset(pipe_nr); - - + + if (fs == NULL){ goto dropit ; /* this queue/pipe does not exist! */ } @@ -1649,7 +1649,7 @@ dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa, int cl if (fwa->fwa_dst) { if (fwa->fwa_dst == (struct sockaddr_in *)&fwa->fwa_ro->ro_dst) /* dst points into ro */ fwa->fwa_dst = (struct sockaddr_in *)&(pkt->dn_ro.ro_dst) ; - + bcopy (fwa->fwa_dst, &pkt->dn_dst, sizeof(pkt->dn_dst)); } } else if (dir == DN_TO_IP6_OUT) { @@ -1664,7 +1664,7 @@ dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa, int cl if (fwa->fwa_dst6) { if (fwa->fwa_dst6 == (struct sockaddr_in6 *)&fwa->fwa_ro6->ro_dst) /* dst points into ro */ fwa->fwa_dst6 = (struct sockaddr_in6 *)&(pkt->dn_ro6.ro_dst) ; - + bcopy (fwa->fwa_dst6, &pkt->dn_dst6, sizeof(pkt->dn_dst6)); } pkt->dn_origifp = fwa->fwa_origifp; @@ -1673,10 +1673,10 @@ dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa, int cl pkt->dn_unfragpartlen = fwa->fwa_unfragpartlen; if (fwa->fwa_exthdrs) { bcopy (fwa->fwa_exthdrs, &pkt->dn_exthdrs, sizeof(pkt->dn_exthdrs)); - /* + /* * Need to zero out the source structure so the mbufs * won't be freed by ip6_output() - */ + */ bzero(fwa->fwa_exthdrs, sizeof(struct ip6_exthdrs)); } } @@ -1774,7 +1774,7 @@ done: } lck_mtx_unlock(dn_mutex); - + if (head != NULL) { dummynet_send(head); } @@ -2092,7 +2092,7 @@ config_pipe(struct dn_pipe *p) /* locate pipe */ b = locate_pipe(p->pipe_nr); - + if (b == NULL || b->pipe_nr != p->pipe_nr) { /* new pipe */ x = _MALLOC(sizeof(struct dn_pipe), M_DUMMYNET, M_DONTWAIT | M_ZERO) ; if (x == NULL) { @@ -2244,7 +2244,7 @@ dummynet_drain(void) for (i = 0; i < HASHSIZE; i++) SLIST_FOREACH(p, &pipehash[i], next) { - purge_flow_set(&(p->fs), 0); + purge_flow_set(&(p->fs), 0); mnext = p->head; while ((m = mnext) != NULL) { @@ -2302,7 +2302,7 @@ delete_pipe(struct dn_pipe *p) pipe_remove_from_heap(&extract_heap, b); pipe_remove_from_heap(&wfq_ready_heap, b); lck_mtx_unlock(dn_mutex); - + FREE(b, M_DUMMYNET); } else { /* this is a WF2Q queue (dn_flow_set) */ struct dn_flow_set *b; @@ -2341,15 +2341,15 @@ delete_pipe(struct dn_pipe *p) /* * helper function used to copy data from kernel in DUMMYNET_GET */ -static +static char* dn_copy_set_32(struct dn_flow_set *set, char *bp) { int i, copied = 0 ; struct dn_flow_queue *q; struct dn_flow_queue_32 *qp = (struct dn_flow_queue_32 *)bp; - + lck_mtx_assert(dn_mutex, LCK_MTX_ASSERT_OWNED); - + for (i = 0 ; i <= set->rq_size ; i++) for (q = set->rq[i] ; q ; q = q->next, qp++ ) { if (q->hash_slot != i) @@ -2373,15 +2373,15 @@ char* dn_copy_set_32(struct dn_flow_set *set, char *bp) return (char *)qp ; } -static +static char* dn_copy_set_64(struct dn_flow_set *set, char *bp) { int i, copied = 0 ; struct dn_flow_queue *q; struct dn_flow_queue_64 *qp = (struct dn_flow_queue_64 *)bp; - + lck_mtx_assert(dn_mutex, LCK_MTX_ASSERT_OWNED); - + for (i = 0 ; i <= set->rq_size ; i++) for (q = set->rq[i] ; q ; q = q->next, qp++ ) { if (q->hash_slot != i) @@ -2585,16 +2585,17 @@ ip_dn_init(void) ip_dn_io_ptr = dummynet_io; bzero(&default_rule, sizeof default_rule); - +#if IPFIREWALL default_rule.act_ofs = 0; default_rule.rulenum = IPFW_DEFAULT_RULE; default_rule.cmd_len = 1; default_rule.set = RESVD_SET; default_rule.cmd[0].len = 1; - default_rule.cmd[0].opcode = + default_rule.cmd[0].opcode = #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT (1) ? O_ACCEPT : #endif O_DENY; +#endif }