]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/ip_fw2.c
400e032b551fb4369df46999ef90a16ec88af13f
[apple/xnu.git] / bsd / netinet / ip_fw2.c
1 /*
2 * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: src/sys/netinet/ip_fw2.c,v 1.6.2.18 2003/10/17 11:01:03 scottl Exp $
26 */
27
28 #define DEB(x)
29 #define DDB(x) x
30
31 /*
32 * Implement IP packet firewall (new version)
33 */
34
35 #ifndef INET
36 #error IPFIREWALL requires INET.
37 #endif /* INET */
38
39 #if IPFW2
40 #include <machine/spl.h>
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/kernel.h>
47 #include <sys/proc.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/sysctl.h>
51 #include <sys/syslog.h>
52 #include <sys/ucred.h>
53 #include <sys/kern_event.h>
54
55 #include <net/if.h>
56 #include <net/route.h>
57 #include <netinet/in.h>
58 #include <netinet/in_systm.h>
59 #include <netinet/in_var.h>
60 #include <netinet/in_pcb.h>
61 #include <netinet/ip.h>
62 #include <netinet/ip_var.h>
63 #include <netinet/ip_icmp.h>
64 #include <netinet/ip_fw.h>
65 #include <netinet/ip_divert.h>
66
67 #if DUMMYNET
68 #include <netinet/ip_dummynet.h>
69 #endif /* DUMMYNET */
70
71 #include <netinet/tcp.h>
72 #include <netinet/tcp_timer.h>
73 #include <netinet/tcp_var.h>
74 #include <netinet/tcpip.h>
75 #include <netinet/udp.h>
76 #include <netinet/udp_var.h>
77
78 #ifdef IPSEC
79 #include <netinet6/ipsec.h>
80 #endif
81
82 #include <netinet/if_ether.h> /* XXX for ETHERTYPE_IP */
83
84 #include "ip_fw2_compat.h"
85
86 #include <sys/kern_event.h>
87 #include <stdarg.h>
88
89 /*
90 #include <machine/in_cksum.h>
91 */ /* XXX for in_cksum */
92
93 /*
94 * XXX This one should go in sys/mbuf.h. It is used to avoid that
95 * a firewall-generated packet loops forever through the firewall.
96 */
97 #ifndef M_SKIP_FIREWALL
98 #define M_SKIP_FIREWALL 0x4000
99 #endif
100
101 /*
102 * set_disable contains one bit per set value (0..31).
103 * If the bit is set, all rules with the corresponding set
104 * are disabled. Set RESVD_SET(31) is reserved for the default rule
105 * and rules that are not deleted by the flush command,
106 * and CANNOT be disabled.
107 * Rules in set RESVD_SET can only be deleted explicitly.
108 */
109 static u_int32_t set_disable;
110
111 int fw_verbose;
112 static int verbose_limit;
113 extern int fw_bypass;
114
115 #define IPFW_DEFAULT_RULE 65535
116
117 #define IPFW_RULE_INACTIVE 1
118
119 /*
120 * list of rules for layer 3
121 */
122 static struct ip_fw *layer3_chain;
123
124 MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's");
125
126 static int fw_debug = 0;
127 static int autoinc_step = 100; /* bounded to 1..1000 in add_rule() */
128
129 static void ipfw_kev_post_msg(u_int32_t );
130
131 #ifdef SYSCTL_NODE
132
133 static int ipfw_sysctl SYSCTL_HANDLER_ARGS;
134
135 SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Firewall");
136 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, enable,
137 CTLTYPE_INT | CTLFLAG_RW,
138 &fw_enable, 0, ipfw_sysctl, "I", "Enable ipfw");
139 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, autoinc_step, CTLFLAG_RW,
140 &autoinc_step, 0, "Rule number autincrement step");
141 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, one_pass,
142 CTLFLAG_RW,
143 &fw_one_pass, 0,
144 "Only do a single pass through ipfw when using dummynet(4)");
145 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, debug,
146 CTLFLAG_RW,
147 &fw_debug, 0, "Enable printing of debug ip_fw statements");
148 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose,
149 CTLFLAG_RW,
150 &fw_verbose, 0, "Log matches to ipfw rules");
151 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose_limit, CTLFLAG_RW,
152 &verbose_limit, 0, "Set upper limit of matches of ipfw rules logged");
153
154 /*
155 * Description of dynamic rules.
156 *
157 * Dynamic rules are stored in lists accessed through a hash table
158 * (ipfw_dyn_v) whose size is curr_dyn_buckets. This value can
159 * be modified through the sysctl variable dyn_buckets which is
160 * updated when the table becomes empty.
161 *
162 * XXX currently there is only one list, ipfw_dyn.
163 *
164 * When a packet is received, its address fields are first masked
165 * with the mask defined for the rule, then hashed, then matched
166 * against the entries in the corresponding list.
167 * Dynamic rules can be used for different purposes:
168 * + stateful rules;
169 * + enforcing limits on the number of sessions;
170 * + in-kernel NAT (not implemented yet)
171 *
172 * The lifetime of dynamic rules is regulated by dyn_*_lifetime,
173 * measured in seconds and depending on the flags.
174 *
175 * The total number of dynamic rules is stored in dyn_count.
176 * The max number of dynamic rules is dyn_max. When we reach
177 * the maximum number of rules we do not create anymore. This is
178 * done to avoid consuming too much memory, but also too much
179 * time when searching on each packet (ideally, we should try instead
180 * to put a limit on the length of the list on each bucket...).
181 *
182 * Each dynamic rule holds a pointer to the parent ipfw rule so
183 * we know what action to perform. Dynamic rules are removed when
184 * the parent rule is deleted. XXX we should make them survive.
185 *
186 * There are some limitations with dynamic rules -- we do not
187 * obey the 'randomized match', and we do not do multiple
188 * passes through the firewall. XXX check the latter!!!
189 */
190 static ipfw_dyn_rule **ipfw_dyn_v = NULL;
191 static u_int32_t dyn_buckets = 256; /* must be power of 2 */
192 static u_int32_t curr_dyn_buckets = 256; /* must be power of 2 */
193
194 /*
195 * Timeouts for various events in handing dynamic rules.
196 */
197 static u_int32_t dyn_ack_lifetime = 300;
198 static u_int32_t dyn_syn_lifetime = 20;
199 static u_int32_t dyn_fin_lifetime = 1;
200 static u_int32_t dyn_rst_lifetime = 1;
201 static u_int32_t dyn_udp_lifetime = 10;
202 static u_int32_t dyn_short_lifetime = 5;
203
204 /*
205 * Keepalives are sent if dyn_keepalive is set. They are sent every
206 * dyn_keepalive_period seconds, in the last dyn_keepalive_interval
207 * seconds of lifetime of a rule.
208 * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower
209 * than dyn_keepalive_period.
210 */
211
212 static u_int32_t dyn_keepalive_interval = 20;
213 static u_int32_t dyn_keepalive_period = 5;
214 static u_int32_t dyn_keepalive = 1; /* do send keepalives */
215
216 static u_int32_t static_count; /* # of static rules */
217 static u_int32_t static_len; /* size in bytes of static rules */
218 static u_int32_t dyn_count; /* # of dynamic rules */
219 static u_int32_t dyn_max = 4096; /* max # of dynamic rules */
220
221 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_buckets, CTLFLAG_RW,
222 &dyn_buckets, 0, "Number of dyn. buckets");
223 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, curr_dyn_buckets, CTLFLAG_RD,
224 &curr_dyn_buckets, 0, "Current Number of dyn. buckets");
225 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_count, CTLFLAG_RD,
226 &dyn_count, 0, "Number of dyn. rules");
227 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_max, CTLFLAG_RW,
228 &dyn_max, 0, "Max number of dyn. rules");
229 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, static_count, CTLFLAG_RD,
230 &static_count, 0, "Number of static rules");
231 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime, CTLFLAG_RW,
232 &dyn_ack_lifetime, 0, "Lifetime of dyn. rules for acks");
233 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime, CTLFLAG_RW,
234 &dyn_syn_lifetime, 0, "Lifetime of dyn. rules for syn");
235 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime, CTLFLAG_RW,
236 &dyn_fin_lifetime, 0, "Lifetime of dyn. rules for fin");
237 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime, CTLFLAG_RW,
238 &dyn_rst_lifetime, 0, "Lifetime of dyn. rules for rst");
239 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime, CTLFLAG_RW,
240 &dyn_udp_lifetime, 0, "Lifetime of dyn. rules for UDP");
241 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime, CTLFLAG_RW,
242 &dyn_short_lifetime, 0, "Lifetime of dyn. rules for other situations");
243 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_keepalive, CTLFLAG_RW,
244 &dyn_keepalive, 0, "Enable keepalives for dyn. rules");
245
246 static int
247 ipfw_sysctl SYSCTL_HANDLER_ARGS
248 {
249 #pragma unused(arg1, arg2)
250 int error;
251
252 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
253 if (error || !req->newptr)
254 return (error);
255
256 ipfw_kev_post_msg(KEV_IPFW_ENABLE);
257
258 return error;
259 }
260
261 #endif /* SYSCTL_NODE */
262
263
264 static ip_fw_chk_t ipfw_chk;
265
266 /* firewall lock */
267 lck_grp_t *ipfw_mutex_grp;
268 lck_grp_attr_t *ipfw_mutex_grp_attr;
269 lck_attr_t *ipfw_mutex_attr;
270 lck_mtx_t *ipfw_mutex;
271
272 extern void ipfwsyslog( int level, const char *format,...);
273
274 #if DUMMYNET
275 ip_dn_ruledel_t *ip_dn_ruledel_ptr = NULL; /* hook into dummynet */
276 #endif /* DUMMYNET */
277
278 #define KEV_LOG_SUBCLASS 10
279 #define IPFWLOGEVENT 0
280
281 #define ipfwstring "ipfw:"
282 static size_t ipfwstringlen;
283
284 #define dolog( a ) { \
285 if ( fw_verbose == 2 ) /* Apple logging, log to ipfw.log */ \
286 ipfwsyslog a ; \
287 else log a ; \
288 }
289
290 void ipfwsyslog( int level, const char *format,...)
291 {
292 #define msgsize 100
293
294 struct kev_msg ev_msg;
295 va_list ap;
296 char msgBuf[msgsize];
297 char *dptr = msgBuf;
298 unsigned char pri;
299 int loglen;
300
301 va_start( ap, format );
302 loglen = vsnprintf(msgBuf, msgsize, format, ap);
303 va_end( ap );
304
305 ev_msg.vendor_code = KEV_VENDOR_APPLE;
306 ev_msg.kev_class = KEV_NETWORK_CLASS;
307 ev_msg.kev_subclass = KEV_LOG_SUBCLASS;
308 ev_msg.event_code = IPFWLOGEVENT;
309
310 /* get rid of the trailing \n */
311 dptr[loglen-1] = 0;
312
313 pri = LOG_PRI(level);
314
315 /* remove "ipfw:" prefix if logging to ipfw log */
316 if ( !(strncmp( ipfwstring, msgBuf, ipfwstringlen))){
317 dptr = msgBuf+ipfwstringlen;
318 }
319
320 ev_msg.dv[0].data_ptr = &pri;
321 ev_msg.dv[0].data_length = 1;
322 ev_msg.dv[1].data_ptr = dptr;
323 ev_msg.dv[1].data_length = 100; /* bug in kern_post_msg, it can't handle size > 256-msghdr */
324 ev_msg.dv[2].data_length = 0;
325
326 kev_post_msg(&ev_msg);
327 }
328
329 /*
330 * This macro maps an ip pointer into a layer3 header pointer of type T
331 */
332 #define L3HDR(T, ip) ((T *)((u_int32_t *)(ip) + (ip)->ip_hl))
333
334 static __inline int
335 icmptype_match(struct ip *ip, ipfw_insn_u32 *cmd)
336 {
337 int type = L3HDR(struct icmp,ip)->icmp_type;
338
339 return (type <= ICMP_MAXTYPE && (cmd->d[0] & (1<<type)) );
340 }
341
342 #define TT ( (1 << ICMP_ECHO) | (1 << ICMP_ROUTERSOLICIT) | \
343 (1 << ICMP_TSTAMP) | (1 << ICMP_IREQ) | (1 << ICMP_MASKREQ) )
344
345 static int
346 is_icmp_query(struct ip *ip)
347 {
348 int type = L3HDR(struct icmp, ip)->icmp_type;
349 return (type <= ICMP_MAXTYPE && (TT & (1<<type)) );
350 }
351 #undef TT
352
353 /*
354 * The following checks use two arrays of 8 or 16 bits to store the
355 * bits that we want set or clear, respectively. They are in the
356 * low and high half of cmd->arg1 or cmd->d[0].
357 *
358 * We scan options and store the bits we find set. We succeed if
359 *
360 * (want_set & ~bits) == 0 && (want_clear & ~bits) == want_clear
361 *
362 * The code is sometimes optimized not to store additional variables.
363 */
364
365 static int
366 flags_match(ipfw_insn *cmd, u_int8_t bits)
367 {
368 u_char want_clear;
369 bits = ~bits;
370
371 if ( ((cmd->arg1 & 0xff) & bits) != 0)
372 return 0; /* some bits we want set were clear */
373 want_clear = (cmd->arg1 >> 8) & 0xff;
374 if ( (want_clear & bits) != want_clear)
375 return 0; /* some bits we want clear were set */
376 return 1;
377 }
378
379 static int
380 ipopts_match(struct ip *ip, ipfw_insn *cmd)
381 {
382 int optlen, bits = 0;
383 u_char *cp = (u_char *)(ip + 1);
384 int x = (ip->ip_hl << 2) - sizeof (struct ip);
385
386 for (; x > 0; x -= optlen, cp += optlen) {
387 int opt = cp[IPOPT_OPTVAL];
388
389 if (opt == IPOPT_EOL)
390 break;
391 if (opt == IPOPT_NOP)
392 optlen = 1;
393 else {
394 optlen = cp[IPOPT_OLEN];
395 if (optlen <= 0 || optlen > x)
396 return 0; /* invalid or truncated */
397 }
398 switch (opt) {
399
400 default:
401 break;
402
403 case IPOPT_LSRR:
404 bits |= IP_FW_IPOPT_LSRR;
405 break;
406
407 case IPOPT_SSRR:
408 bits |= IP_FW_IPOPT_SSRR;
409 break;
410
411 case IPOPT_RR:
412 bits |= IP_FW_IPOPT_RR;
413 break;
414
415 case IPOPT_TS:
416 bits |= IP_FW_IPOPT_TS;
417 break;
418 }
419 }
420 return (flags_match(cmd, bits));
421 }
422
423 static int
424 tcpopts_match(struct ip *ip, ipfw_insn *cmd)
425 {
426 int optlen, bits = 0;
427 struct tcphdr *tcp = L3HDR(struct tcphdr,ip);
428 u_char *cp = (u_char *)(tcp + 1);
429 int x = (tcp->th_off << 2) - sizeof(struct tcphdr);
430
431 for (; x > 0; x -= optlen, cp += optlen) {
432 int opt = cp[0];
433 if (opt == TCPOPT_EOL)
434 break;
435 if (opt == TCPOPT_NOP)
436 optlen = 1;
437 else {
438 optlen = cp[1];
439 if (optlen <= 0)
440 break;
441 }
442
443 switch (opt) {
444
445 default:
446 break;
447
448 case TCPOPT_MAXSEG:
449 bits |= IP_FW_TCPOPT_MSS;
450 break;
451
452 case TCPOPT_WINDOW:
453 bits |= IP_FW_TCPOPT_WINDOW;
454 break;
455
456 case TCPOPT_SACK_PERMITTED:
457 case TCPOPT_SACK:
458 bits |= IP_FW_TCPOPT_SACK;
459 break;
460
461 case TCPOPT_TIMESTAMP:
462 bits |= IP_FW_TCPOPT_TS;
463 break;
464
465 case TCPOPT_CC:
466 case TCPOPT_CCNEW:
467 case TCPOPT_CCECHO:
468 bits |= IP_FW_TCPOPT_CC;
469 break;
470 }
471 }
472 return (flags_match(cmd, bits));
473 }
474
475 static int
476 iface_match(struct ifnet *ifp, ipfw_insn_if *cmd)
477 {
478 if (ifp == NULL) /* no iface with this packet, match fails */
479 return 0;
480 /* Check by name or by IP address */
481 if (cmd->name[0] != '\0') { /* match by name */
482 /* Check unit number (-1 is wildcard) */
483 if (cmd->p.unit != -1 && cmd->p.unit != ifp->if_unit)
484 return(0);
485 /* Check name */
486 if (!strncmp(ifp->if_name, cmd->name, IFNAMSIZ))
487 return(1);
488 } else {
489 struct ifaddr *ia;
490
491 ifnet_lock_shared(ifp);
492 TAILQ_FOREACH(ia, &ifp->if_addrhead, ifa_link) {
493 if (ia->ifa_addr == NULL)
494 continue;
495 if (ia->ifa_addr->sa_family != AF_INET)
496 continue;
497 if (cmd->p.ip.s_addr == ((struct sockaddr_in *)
498 (ia->ifa_addr))->sin_addr.s_addr) {
499 ifnet_lock_done(ifp);
500 return(1); /* match */
501 }
502 }
503 ifnet_lock_done(ifp);
504 }
505 return(0); /* no match, fail ... */
506 }
507
508 /*
509 * The 'verrevpath' option checks that the interface that an IP packet
510 * arrives on is the same interface that traffic destined for the
511 * packet's source address would be routed out of. This is a measure
512 * to block forged packets. This is also commonly known as "anti-spoofing"
513 * or Unicast Reverse Path Forwarding (Unicast RFP) in Cisco-ese. The
514 * name of the knob is purposely reminisent of the Cisco IOS command,
515 *
516 * ip verify unicast reverse-path
517 *
518 * which implements the same functionality. But note that syntax is
519 * misleading. The check may be performed on all IP packets whether unicast,
520 * multicast, or broadcast.
521 */
522 static int
523 verify_rev_path(struct in_addr src, struct ifnet *ifp)
524 {
525 static struct route ro;
526 struct sockaddr_in *dst;
527
528 dst = (struct sockaddr_in *)&(ro.ro_dst);
529
530 /* Check if we've cached the route from the previous call. */
531 if (src.s_addr != dst->sin_addr.s_addr) {
532 ro.ro_rt = NULL;
533
534 bzero(dst, sizeof(*dst));
535 dst->sin_family = AF_INET;
536 dst->sin_len = sizeof(*dst);
537 dst->sin_addr = src;
538
539 rtalloc_ign(&ro, RTF_CLONING|RTF_PRCLONING);
540 }
541
542 if ((ro.ro_rt == NULL) || (ifp == NULL) ||
543 (ro.ro_rt->rt_ifp->if_index != ifp->if_index))
544 return 0;
545
546 return 1;
547 }
548
549
550 static u_int64_t norule_counter; /* counter for ipfw_log(NULL...) */
551
552 #define SNPARGS(buf, len) buf + len, sizeof(buf) > len ? sizeof(buf) - len : 0
553 #define SNP(buf) buf, sizeof(buf)
554
555 /*
556 * We enter here when we have a rule with O_LOG.
557 * XXX this function alone takes about 2Kbytes of code!
558 */
559 static void
560 ipfw_log(struct ip_fw *f, u_int hlen, struct ether_header *eh,
561 struct mbuf *m, struct ifnet *oif)
562 {
563 const char *action;
564 int limit_reached = 0;
565 char ipv4str[MAX_IPv4_STR_LEN];
566 char action2[40], proto[48], fragment[28];
567
568 fragment[0] = '\0';
569 proto[0] = '\0';
570
571 if (f == NULL) { /* bogus pkt */
572 if (verbose_limit != 0 && norule_counter >= verbose_limit)
573 return;
574 norule_counter++;
575 if (norule_counter == verbose_limit)
576 limit_reached = verbose_limit;
577 action = "Refuse";
578 } else { /* O_LOG is the first action, find the real one */
579 ipfw_insn *cmd = ACTION_PTR(f);
580 ipfw_insn_log *l = (ipfw_insn_log *)cmd;
581
582 if (l->max_log != 0 && l->log_left == 0)
583 return;
584 l->log_left--;
585 if (l->log_left == 0)
586 limit_reached = l->max_log;
587 cmd += F_LEN(cmd); /* point to first action */
588 if (cmd->opcode == O_PROB)
589 cmd += F_LEN(cmd);
590
591 action = action2;
592 switch (cmd->opcode) {
593 case O_DENY:
594 action = "Deny";
595 break;
596
597 case O_REJECT:
598 if (cmd->arg1==ICMP_REJECT_RST)
599 action = "Reset";
600 else if (cmd->arg1==ICMP_UNREACH_HOST)
601 action = "Reject";
602 else
603 snprintf(SNPARGS(action2, 0), "Unreach %d",
604 cmd->arg1);
605 break;
606
607 case O_ACCEPT:
608 action = "Accept";
609 break;
610 case O_COUNT:
611 action = "Count";
612 break;
613 case O_DIVERT:
614 snprintf(SNPARGS(action2, 0), "Divert %d",
615 cmd->arg1);
616 break;
617 case O_TEE:
618 snprintf(SNPARGS(action2, 0), "Tee %d",
619 cmd->arg1);
620 break;
621 case O_SKIPTO:
622 snprintf(SNPARGS(action2, 0), "SkipTo %d",
623 cmd->arg1);
624 break;
625 case O_PIPE:
626 snprintf(SNPARGS(action2, 0), "Pipe %d",
627 cmd->arg1);
628 break;
629 case O_QUEUE:
630 snprintf(SNPARGS(action2, 0), "Queue %d",
631 cmd->arg1);
632 break;
633 case O_FORWARD_IP: {
634 ipfw_insn_sa *sa = (ipfw_insn_sa *)cmd;
635 int len;
636
637 if (f->reserved_1 == IPFW_RULE_INACTIVE) {
638 break;
639 }
640 len = snprintf(SNPARGS(action2, 0), "Forward to %s",
641 inet_ntop(AF_INET, &sa->sa.sin_addr, ipv4str, sizeof(ipv4str)));
642 if (sa->sa.sin_port)
643 snprintf(SNPARGS(action2, len), ":%d",
644 sa->sa.sin_port);
645 }
646 break;
647 default:
648 action = "UNKNOWN";
649 break;
650 }
651 }
652
653 if (hlen == 0) { /* non-ip */
654 snprintf(SNPARGS(proto, 0), "MAC");
655 } else {
656 struct ip *ip = mtod(m, struct ip *);
657 /* these three are all aliases to the same thing */
658 struct icmp *const icmp = L3HDR(struct icmp, ip);
659 struct tcphdr *const tcp = (struct tcphdr *)icmp;
660 struct udphdr *const udp = (struct udphdr *)icmp;
661
662 int ip_off, offset, ip_len;
663
664 int len;
665
666 if (eh != NULL) { /* layer 2 packets are as on the wire */
667 ip_off = ntohs(ip->ip_off);
668 ip_len = ntohs(ip->ip_len);
669 } else {
670 ip_off = ip->ip_off;
671 ip_len = ip->ip_len;
672 }
673 offset = ip_off & IP_OFFMASK;
674 switch (ip->ip_p) {
675 case IPPROTO_TCP:
676 len = snprintf(SNPARGS(proto, 0), "TCP %s",
677 inet_ntop(AF_INET, &ip->ip_src, ipv4str, sizeof(ipv4str)));
678 if (offset == 0)
679 snprintf(SNPARGS(proto, len), ":%d %s:%d",
680 ntohs(tcp->th_sport),
681 inet_ntop(AF_INET, &ip->ip_dst, ipv4str, sizeof(ipv4str)),
682 ntohs(tcp->th_dport));
683 else
684 snprintf(SNPARGS(proto, len), " %s",
685 inet_ntop(AF_INET, &ip->ip_dst, ipv4str, sizeof(ipv4str)));
686 break;
687
688 case IPPROTO_UDP:
689 len = snprintf(SNPARGS(proto, 0), "UDP %s",
690 inet_ntop(AF_INET, &ip->ip_src, ipv4str, sizeof(ipv4str)));
691 if (offset == 0)
692 snprintf(SNPARGS(proto, len), ":%d %s:%d",
693 ntohs(udp->uh_sport),
694 inet_ntop(AF_INET, &ip->ip_dst, ipv4str, sizeof(ipv4str)),
695 ntohs(udp->uh_dport));
696 else
697 snprintf(SNPARGS(proto, len), " %s",
698 inet_ntop(AF_INET, &ip->ip_dst, ipv4str, sizeof(ipv4str)));
699 break;
700
701 case IPPROTO_ICMP:
702 if (offset == 0)
703 len = snprintf(SNPARGS(proto, 0),
704 "ICMP:%u.%u ",
705 icmp->icmp_type, icmp->icmp_code);
706 else
707 len = snprintf(SNPARGS(proto, 0), "ICMP ");
708 len += snprintf(SNPARGS(proto, len), "%s",
709 inet_ntop(AF_INET, &ip->ip_src, ipv4str, sizeof(ipv4str)));
710 snprintf(SNPARGS(proto, len), " %s",
711 inet_ntop(AF_INET, &ip->ip_dst, ipv4str, sizeof(ipv4str)));
712 break;
713
714 default:
715 len = snprintf(SNPARGS(proto, 0), "P:%d %s", ip->ip_p,
716 inet_ntop(AF_INET, &ip->ip_src, ipv4str, sizeof(ipv4str)));
717 snprintf(SNPARGS(proto, len), " %s",
718 inet_ntop(AF_INET, &ip->ip_dst, ipv4str, sizeof(ipv4str)));
719 break;
720 }
721
722 if (ip_off & (IP_MF | IP_OFFMASK))
723 snprintf(SNPARGS(fragment, 0), " (frag %d:%d@%d%s)",
724 ntohs(ip->ip_id), ip_len - (ip->ip_hl << 2),
725 offset << 3,
726 (ip_off & IP_MF) ? "+" : "");
727 }
728 if (oif || m->m_pkthdr.rcvif)
729 {
730 dolog((LOG_AUTHPRIV | LOG_INFO,
731 "ipfw: %d %s %s %s via %s%d%s\n",
732 f ? f->rulenum : -1,
733 action, proto, oif ? "out" : "in",
734 oif ? oif->if_name : m->m_pkthdr.rcvif->if_name,
735 oif ? oif->if_unit : m->m_pkthdr.rcvif->if_unit,
736 fragment));
737 }
738 else{
739 dolog((LOG_AUTHPRIV | LOG_INFO,
740 "ipfw: %d %s %s [no if info]%s\n",
741 f ? f->rulenum : -1,
742 action, proto, fragment));
743 }
744 if (limit_reached){
745 dolog((LOG_AUTHPRIV | LOG_NOTICE,
746 "ipfw: limit %d reached on entry %d\n",
747 limit_reached, f ? f->rulenum : -1));
748 }
749 }
750
751 /*
752 * IMPORTANT: the hash function for dynamic rules must be commutative
753 * in source and destination (ip,port), because rules are bidirectional
754 * and we want to find both in the same bucket.
755 */
756 static __inline int
757 hash_packet(struct ipfw_flow_id *id)
758 {
759 u_int32_t i;
760
761 i = (id->dst_ip) ^ (id->src_ip) ^ (id->dst_port) ^ (id->src_port);
762 i &= (curr_dyn_buckets - 1);
763 return i;
764 }
765
766 /**
767 * unlink a dynamic rule from a chain. prev is a pointer to
768 * the previous one, q is a pointer to the rule to delete,
769 * head is a pointer to the head of the queue.
770 * Modifies q and potentially also head.
771 */
772 #define UNLINK_DYN_RULE(prev, head, q) { \
773 ipfw_dyn_rule *old_q = q; \
774 \
775 /* remove a refcount to the parent */ \
776 if (q->dyn_type == O_LIMIT) \
777 q->parent->count--; \
778 DEB(printf("ipfw: unlink entry 0x%08x %d -> 0x%08x %d, %d left\n",\
779 (q->id.src_ip), (q->id.src_port), \
780 (q->id.dst_ip), (q->id.dst_port), dyn_count-1 ); ) \
781 if (prev != NULL) \
782 prev->next = q = q->next; \
783 else \
784 head = q = q->next; \
785 dyn_count--; \
786 _FREE(old_q, M_IPFW); }
787
788 #define TIME_LEQ(a,b) ((int)((a)-(b)) <= 0)
789
790 /**
791 * Remove dynamic rules pointing to "rule", or all of them if rule == NULL.
792 *
793 * If keep_me == NULL, rules are deleted even if not expired,
794 * otherwise only expired rules are removed.
795 *
796 * The value of the second parameter is also used to point to identify
797 * a rule we absolutely do not want to remove (e.g. because we are
798 * holding a reference to it -- this is the case with O_LIMIT_PARENT
799 * rules). The pointer is only used for comparison, so any non-null
800 * value will do.
801 */
802 static void
803 remove_dyn_rule(struct ip_fw *rule, ipfw_dyn_rule *keep_me)
804 {
805 static u_int32_t last_remove = 0;
806
807 #define FORCE (keep_me == NULL)
808
809 ipfw_dyn_rule *prev, *q;
810 int i, pass = 0, max_pass = 0;
811 struct timeval timenow;
812
813 getmicrotime(&timenow);
814
815 if (ipfw_dyn_v == NULL || dyn_count == 0)
816 return;
817 /* do not expire more than once per second, it is useless */
818 if (!FORCE && last_remove == timenow.tv_sec)
819 return;
820 last_remove = timenow.tv_sec;
821
822 /*
823 * because O_LIMIT refer to parent rules, during the first pass only
824 * remove child and mark any pending LIMIT_PARENT, and remove
825 * them in a second pass.
826 */
827 next_pass:
828 for (i = 0 ; i < curr_dyn_buckets ; i++) {
829 for (prev=NULL, q = ipfw_dyn_v[i] ; q ; ) {
830 /*
831 * Logic can become complex here, so we split tests.
832 */
833 if (q == keep_me)
834 goto next;
835 if (rule != NULL && rule != q->rule)
836 goto next; /* not the one we are looking for */
837 if (q->dyn_type == O_LIMIT_PARENT) {
838 /*
839 * handle parent in the second pass,
840 * record we need one.
841 */
842 max_pass = 1;
843 if (pass == 0)
844 goto next;
845 if (FORCE && q->count != 0 ) {
846 /* XXX should not happen! */
847 printf("ipfw: OUCH! cannot remove rule,"
848 " count %d\n", q->count);
849 }
850 } else {
851 if (!FORCE &&
852 !TIME_LEQ( q->expire, timenow.tv_sec ))
853 goto next;
854 }
855 if (q->dyn_type != O_LIMIT_PARENT || !q->count) {
856 UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q);
857 continue;
858 }
859 next:
860 prev=q;
861 q=q->next;
862 }
863 }
864 if (pass++ < max_pass)
865 goto next_pass;
866 }
867
868
869 /**
870 * lookup a dynamic rule.
871 */
872 static ipfw_dyn_rule *
873 lookup_dyn_rule(struct ipfw_flow_id *pkt, int *match_direction,
874 struct tcphdr *tcp)
875 {
876 /*
877 * stateful ipfw extensions.
878 * Lookup into dynamic session queue
879 */
880 #define MATCH_REVERSE 0
881 #define MATCH_FORWARD 1
882 #define MATCH_NONE 2
883 #define MATCH_UNKNOWN 3
884 #define BOTH_SYN (TH_SYN | (TH_SYN << 8))
885 #define BOTH_FIN (TH_FIN | (TH_FIN << 8))
886
887 int i, dir = MATCH_NONE;
888 ipfw_dyn_rule *prev, *q=NULL;
889 struct timeval timenow;
890
891 getmicrotime(&timenow);
892
893 if (ipfw_dyn_v == NULL)
894 goto done; /* not found */
895 i = hash_packet( pkt );
896 for (prev=NULL, q = ipfw_dyn_v[i] ; q != NULL ; ) {
897 if (q->dyn_type == O_LIMIT_PARENT && q->count)
898 goto next;
899 if (TIME_LEQ( q->expire, timenow.tv_sec)) { /* expire entry */
900 int dounlink = 1;
901
902 /* check if entry is TCP */
903 if ( q->id.proto == IPPROTO_TCP )
904 {
905 /* do not delete an established TCP connection which hasn't been closed by both sides */
906 if ( (q->state & (BOTH_SYN | BOTH_FIN)) != (BOTH_SYN | BOTH_FIN) )
907 dounlink = 0;
908 }
909 if ( dounlink ){
910 UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q);
911 continue;
912 }
913 }
914 if (pkt->proto == q->id.proto &&
915 q->dyn_type != O_LIMIT_PARENT) {
916 if (pkt->src_ip == q->id.src_ip &&
917 pkt->dst_ip == q->id.dst_ip &&
918 pkt->src_port == q->id.src_port &&
919 pkt->dst_port == q->id.dst_port ) {
920 dir = MATCH_FORWARD;
921 break;
922 }
923 if (pkt->src_ip == q->id.dst_ip &&
924 pkt->dst_ip == q->id.src_ip &&
925 pkt->src_port == q->id.dst_port &&
926 pkt->dst_port == q->id.src_port ) {
927 dir = MATCH_REVERSE;
928 break;
929 }
930 }
931 next:
932 prev = q;
933 q = q->next;
934 }
935 if (q == NULL)
936 goto done; /* q = NULL, not found */
937
938 if ( prev != NULL) { /* found and not in front */
939 prev->next = q->next;
940 q->next = ipfw_dyn_v[i];
941 ipfw_dyn_v[i] = q;
942 }
943 if (pkt->proto == IPPROTO_TCP) { /* update state according to flags */
944 u_char flags = pkt->flags & (TH_FIN|TH_SYN|TH_RST);
945
946 q->state |= (dir == MATCH_FORWARD ) ? flags : (flags << 8);
947 switch (q->state) {
948 case TH_SYN: /* opening */
949 q->expire = timenow.tv_sec + dyn_syn_lifetime;
950 break;
951
952 case BOTH_SYN: /* move to established */
953 case BOTH_SYN | TH_FIN : /* one side tries to close */
954 case BOTH_SYN | (TH_FIN << 8) :
955 if (tcp) {
956 #define _SEQ_GE(a,b) ((int)(a) - (int)(b) >= 0)
957 u_int32_t ack = ntohl(tcp->th_ack);
958 if (dir == MATCH_FORWARD) {
959 if (q->ack_fwd == 0 || _SEQ_GE(ack, q->ack_fwd))
960 q->ack_fwd = ack;
961 else { /* ignore out-of-sequence */
962 break;
963 }
964 } else {
965 if (q->ack_rev == 0 || _SEQ_GE(ack, q->ack_rev))
966 q->ack_rev = ack;
967 else { /* ignore out-of-sequence */
968 break;
969 }
970 }
971 }
972 q->expire = timenow.tv_sec + dyn_ack_lifetime;
973 break;
974
975 case BOTH_SYN | BOTH_FIN: /* both sides closed */
976 if (dyn_fin_lifetime >= dyn_keepalive_period)
977 dyn_fin_lifetime = dyn_keepalive_period - 1;
978 q->expire = timenow.tv_sec + dyn_fin_lifetime;
979 break;
980
981 default:
982 #if 0
983 /*
984 * reset or some invalid combination, but can also
985 * occur if we use keep-state the wrong way.
986 */
987 if ( (q->state & ((TH_RST << 8)|TH_RST)) == 0)
988 printf("invalid state: 0x%x\n", q->state);
989 #endif
990 if (dyn_rst_lifetime >= dyn_keepalive_period)
991 dyn_rst_lifetime = dyn_keepalive_period - 1;
992 q->expire = timenow.tv_sec + dyn_rst_lifetime;
993 break;
994 }
995 } else if (pkt->proto == IPPROTO_UDP) {
996 q->expire = timenow.tv_sec + dyn_udp_lifetime;
997 } else {
998 /* other protocols */
999 q->expire = timenow.tv_sec + dyn_short_lifetime;
1000 }
1001 done:
1002 if (match_direction)
1003 *match_direction = dir;
1004 return q;
1005 }
1006
1007 static void
1008 realloc_dynamic_table(void)
1009 {
1010 /*
1011 * Try reallocation, make sure we have a power of 2 and do
1012 * not allow more than 64k entries. In case of overflow,
1013 * default to 1024.
1014 */
1015
1016 if (dyn_buckets > 65536)
1017 dyn_buckets = 1024;
1018 if ((dyn_buckets & (dyn_buckets-1)) != 0) { /* not a power of 2 */
1019 dyn_buckets = curr_dyn_buckets; /* reset */
1020 return;
1021 }
1022 curr_dyn_buckets = dyn_buckets;
1023 if (ipfw_dyn_v != NULL)
1024 _FREE(ipfw_dyn_v, M_IPFW);
1025 for (;;) {
1026 ipfw_dyn_v = _MALLOC(curr_dyn_buckets * sizeof(ipfw_dyn_rule *),
1027 M_IPFW, M_NOWAIT | M_ZERO);
1028 if (ipfw_dyn_v != NULL || curr_dyn_buckets <= 2)
1029 break;
1030 curr_dyn_buckets /= 2;
1031 }
1032 }
1033
1034 /**
1035 * Install state of type 'type' for a dynamic session.
1036 * The hash table contains two type of rules:
1037 * - regular rules (O_KEEP_STATE)
1038 * - rules for sessions with limited number of sess per user
1039 * (O_LIMIT). When they are created, the parent is
1040 * increased by 1, and decreased on delete. In this case,
1041 * the third parameter is the parent rule and not the chain.
1042 * - "parent" rules for the above (O_LIMIT_PARENT).
1043 */
1044 static ipfw_dyn_rule *
1045 add_dyn_rule(struct ipfw_flow_id *id, u_int8_t dyn_type, struct ip_fw *rule)
1046 {
1047 ipfw_dyn_rule *r;
1048 int i;
1049 struct timeval timenow;
1050
1051 getmicrotime(&timenow);
1052
1053 if (ipfw_dyn_v == NULL ||
1054 (dyn_count == 0 && dyn_buckets != curr_dyn_buckets)) {
1055 realloc_dynamic_table();
1056 if (ipfw_dyn_v == NULL)
1057 return NULL; /* failed ! */
1058 }
1059 i = hash_packet(id);
1060
1061 r = _MALLOC(sizeof *r, M_IPFW, M_NOWAIT | M_ZERO);
1062 if (r == NULL) {
1063 #if IPFW_DEBUG
1064 printf ("ipfw: sorry cannot allocate state\n");
1065 #endif
1066 return NULL;
1067 }
1068
1069 /* increase refcount on parent, and set pointer */
1070 if (dyn_type == O_LIMIT) {
1071 ipfw_dyn_rule *parent = (ipfw_dyn_rule *)rule;
1072 if ( parent->dyn_type != O_LIMIT_PARENT)
1073 panic("invalid parent");
1074 parent->count++;
1075 r->parent = parent;
1076 rule = parent->rule;
1077 }
1078
1079 r->id = *id;
1080 r->expire = timenow.tv_sec + dyn_syn_lifetime;
1081 r->rule = rule;
1082 r->dyn_type = dyn_type;
1083 r->pcnt = r->bcnt = 0;
1084 r->count = 0;
1085
1086 r->bucket = i;
1087 r->next = ipfw_dyn_v[i];
1088 ipfw_dyn_v[i] = r;
1089 dyn_count++;
1090 DEB(printf("ipfw: add dyn entry ty %d 0x%08x %d -> 0x%08x %d, total %d\n",
1091 dyn_type,
1092 (r->id.src_ip), (r->id.src_port),
1093 (r->id.dst_ip), (r->id.dst_port),
1094 dyn_count ); )
1095 return r;
1096 }
1097
1098 /**
1099 * lookup dynamic parent rule using pkt and rule as search keys.
1100 * If the lookup fails, then install one.
1101 */
1102 static ipfw_dyn_rule *
1103 lookup_dyn_parent(struct ipfw_flow_id *pkt, struct ip_fw *rule)
1104 {
1105 ipfw_dyn_rule *q;
1106 int i;
1107 struct timeval timenow;
1108
1109 getmicrotime(&timenow);
1110
1111 if (ipfw_dyn_v) {
1112 i = hash_packet( pkt );
1113 for (q = ipfw_dyn_v[i] ; q != NULL ; q=q->next)
1114 if (q->dyn_type == O_LIMIT_PARENT &&
1115 rule== q->rule &&
1116 pkt->proto == q->id.proto &&
1117 pkt->src_ip == q->id.src_ip &&
1118 pkt->dst_ip == q->id.dst_ip &&
1119 pkt->src_port == q->id.src_port &&
1120 pkt->dst_port == q->id.dst_port) {
1121 q->expire = timenow.tv_sec + dyn_short_lifetime;
1122 DEB(printf("ipfw: lookup_dyn_parent found 0x%p\n",q);)
1123 return q;
1124 }
1125 }
1126 return add_dyn_rule(pkt, O_LIMIT_PARENT, rule);
1127 }
1128
1129 /**
1130 * Install dynamic state for rule type cmd->o.opcode
1131 *
1132 * Returns 1 (failure) if state is not installed because of errors or because
1133 * session limitations are enforced.
1134 */
1135 static int
1136 install_state(struct ip_fw *rule, ipfw_insn_limit *cmd,
1137 struct ip_fw_args *args)
1138 {
1139 static int last_log;
1140 struct timeval timenow;
1141
1142 ipfw_dyn_rule *q;
1143 getmicrotime(&timenow);
1144
1145 DEB(printf("ipfw: install state type %d 0x%08x %u -> 0x%08x %u\n",
1146 cmd->o.opcode,
1147 (args->f_id.src_ip), (args->f_id.src_port),
1148 (args->f_id.dst_ip), (args->f_id.dst_port) );)
1149
1150 q = lookup_dyn_rule(&args->f_id, NULL, NULL);
1151
1152 if (q != NULL) { /* should never occur */
1153 if (last_log != timenow.tv_sec) {
1154 last_log = timenow.tv_sec;
1155 printf("ipfw: install_state: entry already present, done\n");
1156 }
1157 return 0;
1158 }
1159
1160 if (dyn_count >= dyn_max)
1161 /*
1162 * Run out of slots, try to remove any expired rule.
1163 */
1164 remove_dyn_rule(NULL, (ipfw_dyn_rule *)1);
1165
1166 if (dyn_count >= dyn_max) {
1167 if (last_log != timenow.tv_sec) {
1168 last_log = timenow.tv_sec;
1169 printf("ipfw: install_state: Too many dynamic rules\n");
1170 }
1171 return 1; /* cannot install, notify caller */
1172 }
1173
1174 switch (cmd->o.opcode) {
1175 case O_KEEP_STATE: /* bidir rule */
1176 add_dyn_rule(&args->f_id, O_KEEP_STATE, rule);
1177 break;
1178
1179 case O_LIMIT: /* limit number of sessions */
1180 {
1181 u_int16_t limit_mask = cmd->limit_mask;
1182 struct ipfw_flow_id id;
1183 ipfw_dyn_rule *parent;
1184
1185 DEB(printf("ipfw: installing dyn-limit rule %d\n",
1186 cmd->conn_limit);)
1187
1188 id.dst_ip = id.src_ip = 0;
1189 id.dst_port = id.src_port = 0;
1190 id.proto = args->f_id.proto;
1191
1192 if (limit_mask & DYN_SRC_ADDR)
1193 id.src_ip = args->f_id.src_ip;
1194 if (limit_mask & DYN_DST_ADDR)
1195 id.dst_ip = args->f_id.dst_ip;
1196 if (limit_mask & DYN_SRC_PORT)
1197 id.src_port = args->f_id.src_port;
1198 if (limit_mask & DYN_DST_PORT)
1199 id.dst_port = args->f_id.dst_port;
1200 parent = lookup_dyn_parent(&id, rule);
1201 if (parent == NULL) {
1202 printf("ipfw: add parent failed\n");
1203 return 1;
1204 }
1205 if (parent->count >= cmd->conn_limit) {
1206 /*
1207 * See if we can remove some expired rule.
1208 */
1209 remove_dyn_rule(rule, parent);
1210 if (parent->count >= cmd->conn_limit) {
1211 if (fw_verbose && last_log != timenow.tv_sec) {
1212 last_log = timenow.tv_sec;
1213 dolog((LOG_AUTHPRIV | LOG_DEBUG,
1214 "drop session, too many entries\n"));
1215 }
1216 return 1;
1217 }
1218 }
1219 add_dyn_rule(&args->f_id, O_LIMIT, (struct ip_fw *)parent);
1220 }
1221 break;
1222 default:
1223 printf("ipfw: unknown dynamic rule type %u\n", cmd->o.opcode);
1224 return 1;
1225 }
1226 lookup_dyn_rule(&args->f_id, NULL, NULL); /* XXX just set lifetime */
1227 return 0;
1228 }
1229
1230 /*
1231 * Transmit a TCP packet, containing either a RST or a keepalive.
1232 * When flags & TH_RST, we are sending a RST packet, because of a
1233 * "reset" action matched the packet.
1234 * Otherwise we are sending a keepalive, and flags & TH_
1235 */
1236 static void
1237 send_pkt(struct ipfw_flow_id *id, u_int32_t seq, u_int32_t ack, int flags)
1238 {
1239 struct mbuf *m;
1240 struct ip *ip;
1241 struct tcphdr *tcp;
1242 struct route sro; /* fake route */
1243
1244 MGETHDR(m, M_DONTWAIT, MT_HEADER); /* MAC-OK */
1245 if (m == 0)
1246 return;
1247 m->m_pkthdr.rcvif = (struct ifnet *)0;
1248 m->m_pkthdr.len = m->m_len = sizeof(struct ip) + sizeof(struct tcphdr);
1249 m->m_data += max_linkhdr;
1250
1251 ip = mtod(m, struct ip *);
1252 bzero(ip, m->m_len);
1253 tcp = (struct tcphdr *)(ip + 1); /* no IP options */
1254 ip->ip_p = IPPROTO_TCP;
1255 tcp->th_off = 5;
1256 /*
1257 * Assume we are sending a RST (or a keepalive in the reverse
1258 * direction), swap src and destination addresses and ports.
1259 */
1260 ip->ip_src.s_addr = htonl(id->dst_ip);
1261 ip->ip_dst.s_addr = htonl(id->src_ip);
1262 tcp->th_sport = htons(id->dst_port);
1263 tcp->th_dport = htons(id->src_port);
1264 if (flags & TH_RST) { /* we are sending a RST */
1265 if (flags & TH_ACK) {
1266 tcp->th_seq = htonl(ack);
1267 tcp->th_ack = htonl(0);
1268 tcp->th_flags = TH_RST;
1269 } else {
1270 if (flags & TH_SYN)
1271 seq++;
1272 tcp->th_seq = htonl(0);
1273 tcp->th_ack = htonl(seq);
1274 tcp->th_flags = TH_RST | TH_ACK;
1275 }
1276 } else {
1277 /*
1278 * We are sending a keepalive. flags & TH_SYN determines
1279 * the direction, forward if set, reverse if clear.
1280 * NOTE: seq and ack are always assumed to be correct
1281 * as set by the caller. This may be confusing...
1282 */
1283 if (flags & TH_SYN) {
1284 /*
1285 * we have to rewrite the correct addresses!
1286 */
1287 ip->ip_dst.s_addr = htonl(id->dst_ip);
1288 ip->ip_src.s_addr = htonl(id->src_ip);
1289 tcp->th_dport = htons(id->dst_port);
1290 tcp->th_sport = htons(id->src_port);
1291 }
1292 tcp->th_seq = htonl(seq);
1293 tcp->th_ack = htonl(ack);
1294 tcp->th_flags = TH_ACK;
1295 }
1296 /*
1297 * set ip_len to the payload size so we can compute
1298 * the tcp checksum on the pseudoheader
1299 * XXX check this, could save a couple of words ?
1300 */
1301 ip->ip_len = htons(sizeof(struct tcphdr));
1302 tcp->th_sum = in_cksum(m, m->m_pkthdr.len);
1303 /*
1304 * now fill fields left out earlier
1305 */
1306 ip->ip_ttl = ip_defttl;
1307 ip->ip_len = m->m_pkthdr.len;
1308 bzero (&sro, sizeof (sro));
1309 ip_rtaddr(ip->ip_dst, &sro);
1310 m->m_flags |= M_SKIP_FIREWALL;
1311 ip_output_list(m, 0, NULL, &sro, 0, NULL, NULL);
1312 if (sro.ro_rt) {
1313 RTFREE(sro.ro_rt);
1314 sro.ro_rt = NULL;
1315 }
1316 }
1317
1318 /*
1319 * sends a reject message, consuming the mbuf passed as an argument.
1320 */
1321 static void
1322 send_reject(struct ip_fw_args *args, int code, int offset, __unused int ip_len)
1323 {
1324
1325 if (code != ICMP_REJECT_RST) { /* Send an ICMP unreach */
1326 /* We need the IP header in host order for icmp_error(). */
1327 if (args->eh != NULL) {
1328 struct ip *ip = mtod(args->m, struct ip *);
1329 ip->ip_len = ntohs(ip->ip_len);
1330 ip->ip_off = ntohs(ip->ip_off);
1331 }
1332 args->m->m_flags |= M_SKIP_FIREWALL;
1333 icmp_error(args->m, ICMP_UNREACH, code, 0L, 0);
1334 } else if (offset == 0 && args->f_id.proto == IPPROTO_TCP) {
1335 struct tcphdr *const tcp =
1336 L3HDR(struct tcphdr, mtod(args->m, struct ip *));
1337 if ( (tcp->th_flags & TH_RST) == 0) {
1338 send_pkt(&(args->f_id), ntohl(tcp->th_seq),
1339 ntohl(tcp->th_ack),
1340 tcp->th_flags | TH_RST);
1341 }
1342 m_freem(args->m);
1343 } else
1344 m_freem(args->m);
1345 args->m = NULL;
1346 }
1347
1348 /**
1349 *
1350 * Given an ip_fw *, lookup_next_rule will return a pointer
1351 * to the next rule, which can be either the jump
1352 * target (for skipto instructions) or the next one in the list (in
1353 * all other cases including a missing jump target).
1354 * The result is also written in the "next_rule" field of the rule.
1355 * Backward jumps are not allowed, so start looking from the next
1356 * rule...
1357 *
1358 * This never returns NULL -- in case we do not have an exact match,
1359 * the next rule is returned. When the ruleset is changed,
1360 * pointers are flushed so we are always correct.
1361 */
1362
1363 static struct ip_fw *
1364 lookup_next_rule(struct ip_fw *me)
1365 {
1366 struct ip_fw *rule = NULL;
1367 ipfw_insn *cmd;
1368
1369 /* look for action, in case it is a skipto */
1370 cmd = ACTION_PTR(me);
1371 if (cmd->opcode == O_LOG)
1372 cmd += F_LEN(cmd);
1373 if ( cmd->opcode == O_SKIPTO )
1374 for (rule = me->next; rule ; rule = rule->next)
1375 if (rule->rulenum >= cmd->arg1)
1376 break;
1377 if (rule == NULL) /* failure or not a skipto */
1378 rule = me->next;
1379 me->next_rule = rule;
1380 return rule;
1381 }
1382
1383 /*
1384 * The main check routine for the firewall.
1385 *
1386 * All arguments are in args so we can modify them and return them
1387 * back to the caller.
1388 *
1389 * Parameters:
1390 *
1391 * args->m (in/out) The packet; we set to NULL when/if we nuke it.
1392 * Starts with the IP header.
1393 * args->eh (in) Mac header if present, or NULL for layer3 packet.
1394 * args->oif Outgoing interface, or NULL if packet is incoming.
1395 * The incoming interface is in the mbuf. (in)
1396 * args->divert_rule (in/out)
1397 * Skip up to the first rule past this rule number;
1398 * upon return, non-zero port number for divert or tee.
1399 *
1400 * args->rule Pointer to the last matching rule (in/out)
1401 * args->next_hop Socket we are forwarding to (out).
1402 * args->f_id Addresses grabbed from the packet (out)
1403 *
1404 * Return value:
1405 *
1406 * IP_FW_PORT_DENY_FLAG the packet must be dropped.
1407 * 0 The packet is to be accepted and routed normally OR
1408 * the packet was denied/rejected and has been dropped;
1409 * in the latter case, *m is equal to NULL upon return.
1410 * port Divert the packet to port, with these caveats:
1411 *
1412 * - If IP_FW_PORT_TEE_FLAG is set, tee the packet instead
1413 * of diverting it (ie, 'ipfw tee').
1414 *
1415 * - If IP_FW_PORT_DYNT_FLAG is set, interpret the lower
1416 * 16 bits as a dummynet pipe number instead of diverting
1417 */
1418
1419 static int
1420 ipfw_chk(struct ip_fw_args *args)
1421 {
1422 /*
1423 * Local variables hold state during the processing of a packet.
1424 *
1425 * IMPORTANT NOTE: to speed up the processing of rules, there
1426 * are some assumption on the values of the variables, which
1427 * are documented here. Should you change them, please check
1428 * the implementation of the various instructions to make sure
1429 * that they still work.
1430 *
1431 * args->eh The MAC header. It is non-null for a layer2
1432 * packet, it is NULL for a layer-3 packet.
1433 *
1434 * m | args->m Pointer to the mbuf, as received from the caller.
1435 * It may change if ipfw_chk() does an m_pullup, or if it
1436 * consumes the packet because it calls send_reject().
1437 * XXX This has to change, so that ipfw_chk() never modifies
1438 * or consumes the buffer.
1439 * ip is simply an alias of the value of m, and it is kept
1440 * in sync with it (the packet is supposed to start with
1441 * the ip header).
1442 */
1443 struct mbuf *m = args->m;
1444 struct ip *ip = mtod(m, struct ip *);
1445
1446 /*
1447 * oif | args->oif If NULL, ipfw_chk has been called on the
1448 * inbound path (ether_input, bdg_forward, ip_input).
1449 * If non-NULL, ipfw_chk has been called on the outbound path
1450 * (ether_output, ip_output).
1451 */
1452 struct ifnet *oif = args->oif;
1453
1454 struct ip_fw *f = NULL; /* matching rule */
1455 int retval = 0;
1456
1457 /*
1458 * hlen The length of the IPv4 header.
1459 * hlen >0 means we have an IPv4 packet.
1460 */
1461 u_int hlen = 0; /* hlen >0 means we have an IP pkt */
1462
1463 /*
1464 * offset The offset of a fragment. offset != 0 means that
1465 * we have a fragment at this offset of an IPv4 packet.
1466 * offset == 0 means that (if this is an IPv4 packet)
1467 * this is the first or only fragment.
1468 */
1469 u_short offset = 0;
1470
1471 /*
1472 * Local copies of addresses. They are only valid if we have
1473 * an IP packet.
1474 *
1475 * proto The protocol. Set to 0 for non-ip packets,
1476 * or to the protocol read from the packet otherwise.
1477 * proto != 0 means that we have an IPv4 packet.
1478 *
1479 * src_port, dst_port port numbers, in HOST format. Only
1480 * valid for TCP and UDP packets.
1481 *
1482 * src_ip, dst_ip ip addresses, in NETWORK format.
1483 * Only valid for IPv4 packets.
1484 */
1485 u_int8_t proto;
1486 u_int16_t src_port = 0, dst_port = 0; /* NOTE: host format */
1487 struct in_addr src_ip = { 0 } , dst_ip = { 0 }; /* NOTE: network format */
1488 u_int16_t ip_len=0;
1489 int pktlen;
1490 int dyn_dir = MATCH_UNKNOWN;
1491 ipfw_dyn_rule *q = NULL;
1492 struct timeval timenow;
1493
1494 if (m->m_flags & M_SKIP_FIREWALL || fw_bypass) {
1495 return 0; /* accept */
1496 }
1497
1498 /*
1499 * Clear packet chain if we find one here.
1500 */
1501
1502 if (m->m_nextpkt != NULL) {
1503 m_freem_list(m->m_nextpkt);
1504 m->m_nextpkt = NULL;
1505 }
1506
1507 lck_mtx_lock(ipfw_mutex);
1508
1509 getmicrotime(&timenow);
1510 /*
1511 * dyn_dir = MATCH_UNKNOWN when rules unchecked,
1512 * MATCH_NONE when checked and not matched (q = NULL),
1513 * MATCH_FORWARD or MATCH_REVERSE otherwise (q != NULL)
1514 */
1515
1516 pktlen = m->m_pkthdr.len;
1517 if (args->eh == NULL || /* layer 3 packet */
1518 ( m->m_pkthdr.len >= sizeof(struct ip) &&
1519 ntohs(args->eh->ether_type) == ETHERTYPE_IP))
1520 hlen = ip->ip_hl << 2;
1521
1522 /*
1523 * Collect parameters into local variables for faster matching.
1524 */
1525 if (hlen == 0) { /* do not grab addresses for non-ip pkts */
1526 proto = args->f_id.proto = 0; /* mark f_id invalid */
1527 goto after_ip_checks;
1528 }
1529
1530 proto = args->f_id.proto = ip->ip_p;
1531 src_ip = ip->ip_src;
1532 dst_ip = ip->ip_dst;
1533 if (args->eh != NULL) { /* layer 2 packets are as on the wire */
1534 offset = ntohs(ip->ip_off) & IP_OFFMASK;
1535 ip_len = ntohs(ip->ip_len);
1536 } else {
1537 offset = ip->ip_off & IP_OFFMASK;
1538 ip_len = ip->ip_len;
1539 }
1540 pktlen = ip_len < pktlen ? ip_len : pktlen;
1541
1542 #define PULLUP_TO(len) \
1543 do { \
1544 if ((m)->m_len < (len)) { \
1545 args->m = m = m_pullup(m, (len)); \
1546 if (m == 0) \
1547 goto pullup_failed; \
1548 ip = mtod(m, struct ip *); \
1549 } \
1550 } while (0)
1551
1552 if (offset == 0) {
1553 switch (proto) {
1554 case IPPROTO_TCP:
1555 {
1556 struct tcphdr *tcp;
1557
1558 PULLUP_TO(hlen + sizeof(struct tcphdr));
1559 tcp = L3HDR(struct tcphdr, ip);
1560 dst_port = tcp->th_dport;
1561 src_port = tcp->th_sport;
1562 args->f_id.flags = tcp->th_flags;
1563 }
1564 break;
1565
1566 case IPPROTO_UDP:
1567 {
1568 struct udphdr *udp;
1569
1570 PULLUP_TO(hlen + sizeof(struct udphdr));
1571 udp = L3HDR(struct udphdr, ip);
1572 dst_port = udp->uh_dport;
1573 src_port = udp->uh_sport;
1574 }
1575 break;
1576
1577 case IPPROTO_ICMP:
1578 PULLUP_TO(hlen + 4); /* type, code and checksum. */
1579 args->f_id.flags = L3HDR(struct icmp, ip)->icmp_type;
1580 break;
1581
1582 default:
1583 break;
1584 }
1585 #undef PULLUP_TO
1586 }
1587
1588 args->f_id.src_ip = ntohl(src_ip.s_addr);
1589 args->f_id.dst_ip = ntohl(dst_ip.s_addr);
1590 args->f_id.src_port = src_port = ntohs(src_port);
1591 args->f_id.dst_port = dst_port = ntohs(dst_port);
1592
1593 after_ip_checks:
1594 if (args->rule) {
1595 /*
1596 * Packet has already been tagged. Look for the next rule
1597 * to restart processing.
1598 *
1599 * If fw_one_pass != 0 then just accept it.
1600 * XXX should not happen here, but optimized out in
1601 * the caller.
1602 */
1603 if (fw_one_pass) {
1604 lck_mtx_unlock(ipfw_mutex);
1605 return 0;
1606 }
1607
1608 f = args->rule->next_rule;
1609 if (f == NULL)
1610 f = lookup_next_rule(args->rule);
1611 } else {
1612 /*
1613 * Find the starting rule. It can be either the first
1614 * one, or the one after divert_rule if asked so.
1615 */
1616 int skipto = args->divert_rule;
1617
1618 f = layer3_chain;
1619 if (args->eh == NULL && skipto != 0) {
1620 if (skipto >= IPFW_DEFAULT_RULE) {
1621 lck_mtx_unlock(ipfw_mutex);
1622 return(IP_FW_PORT_DENY_FLAG); /* invalid */
1623 }
1624 while (f && f->rulenum <= skipto)
1625 f = f->next;
1626 if (f == NULL) { /* drop packet */
1627 lck_mtx_unlock(ipfw_mutex);
1628 return(IP_FW_PORT_DENY_FLAG);
1629 }
1630 }
1631 }
1632 args->divert_rule = 0; /* reset to avoid confusion later */
1633
1634 /*
1635 * Now scan the rules, and parse microinstructions for each rule.
1636 */
1637 for (; f; f = f->next) {
1638 int l, cmdlen;
1639 ipfw_insn *cmd;
1640 int skip_or; /* skip rest of OR block */
1641
1642 again:
1643 if (f->reserved_1 == IPFW_RULE_INACTIVE) {
1644 continue;
1645 }
1646
1647 if (set_disable & (1 << f->set) )
1648 continue;
1649
1650 skip_or = 0;
1651 for (l = f->cmd_len, cmd = f->cmd ; l > 0 ;
1652 l -= cmdlen, cmd += cmdlen) {
1653 int match;
1654
1655 /*
1656 * check_body is a jump target used when we find a
1657 * CHECK_STATE, and need to jump to the body of
1658 * the target rule.
1659 */
1660
1661 check_body:
1662 cmdlen = F_LEN(cmd);
1663 /*
1664 * An OR block (insn_1 || .. || insn_n) has the
1665 * F_OR bit set in all but the last instruction.
1666 * The first match will set "skip_or", and cause
1667 * the following instructions to be skipped until
1668 * past the one with the F_OR bit clear.
1669 */
1670 if (skip_or) { /* skip this instruction */
1671 if ((cmd->len & F_OR) == 0)
1672 skip_or = 0; /* next one is good */
1673 continue;
1674 }
1675 match = 0; /* set to 1 if we succeed */
1676
1677 switch (cmd->opcode) {
1678 /*
1679 * The first set of opcodes compares the packet's
1680 * fields with some pattern, setting 'match' if a
1681 * match is found. At the end of the loop there is
1682 * logic to deal with F_NOT and F_OR flags associated
1683 * with the opcode.
1684 */
1685 case O_NOP:
1686 match = 1;
1687 break;
1688
1689 case O_FORWARD_MAC:
1690 printf("ipfw: opcode %d unimplemented\n",
1691 cmd->opcode);
1692 break;
1693
1694 #ifndef __APPLE__
1695 case O_GID:
1696 #endif
1697 case O_UID:
1698 /*
1699 * We only check offset == 0 && proto != 0,
1700 * as this ensures that we have an IPv4
1701 * packet with the ports info.
1702 */
1703 if (offset!=0)
1704 break;
1705
1706 {
1707 struct inpcbinfo *pi;
1708 int wildcard;
1709 struct inpcb *pcb;
1710
1711 if (proto == IPPROTO_TCP) {
1712 wildcard = 0;
1713 pi = &tcbinfo;
1714 } else if (proto == IPPROTO_UDP) {
1715 wildcard = 1;
1716 pi = &udbinfo;
1717 } else
1718 break;
1719
1720 pcb = (oif) ?
1721 in_pcblookup_hash(pi,
1722 dst_ip, htons(dst_port),
1723 src_ip, htons(src_port),
1724 wildcard, oif) :
1725 in_pcblookup_hash(pi,
1726 src_ip, htons(src_port),
1727 dst_ip, htons(dst_port),
1728 wildcard, NULL);
1729
1730 if (pcb == NULL || pcb->inp_socket == NULL)
1731 break;
1732 #if __FreeBSD_version < 500034
1733 #define socheckuid(a,b) (kauth_cred_getuid((a)->so_cred) != (b))
1734 #endif
1735 if (cmd->opcode == O_UID) {
1736 match =
1737 #ifdef __APPLE__
1738 (pcb->inp_socket->so_uid == (uid_t)((ipfw_insn_u32 *)cmd)->d[0]);
1739 #else
1740 !socheckuid(pcb->inp_socket,
1741 (uid_t)((ipfw_insn_u32 *)cmd)->d[0]);
1742 #endif
1743 }
1744 #ifndef __APPLE__
1745 else {
1746 match = 0;
1747 kauth_cred_ismember_gid(pcb->inp_socket->so_cred,
1748 (gid_t)((ipfw_insn_u32 *)cmd)->d[0], &match);
1749 }
1750 #endif
1751 }
1752
1753 break;
1754
1755 case O_RECV:
1756 match = iface_match(m->m_pkthdr.rcvif,
1757 (ipfw_insn_if *)cmd);
1758 break;
1759
1760 case O_XMIT:
1761 match = iface_match(oif, (ipfw_insn_if *)cmd);
1762 break;
1763
1764 case O_VIA:
1765 match = iface_match(oif ? oif :
1766 m->m_pkthdr.rcvif, (ipfw_insn_if *)cmd);
1767 break;
1768
1769 case O_MACADDR2:
1770 if (args->eh != NULL) { /* have MAC header */
1771 u_int32_t *want = (u_int32_t *)
1772 ((ipfw_insn_mac *)cmd)->addr;
1773 u_int32_t *mask = (u_int32_t *)
1774 ((ipfw_insn_mac *)cmd)->mask;
1775 u_int32_t *hdr = (u_int32_t *)args->eh;
1776
1777 match =
1778 ( want[0] == (hdr[0] & mask[0]) &&
1779 want[1] == (hdr[1] & mask[1]) &&
1780 want[2] == (hdr[2] & mask[2]) );
1781 }
1782 break;
1783
1784 case O_MAC_TYPE:
1785 if (args->eh != NULL) {
1786 u_int16_t t =
1787 ntohs(args->eh->ether_type);
1788 u_int16_t *p =
1789 ((ipfw_insn_u16 *)cmd)->ports;
1790 int i;
1791
1792 for (i = cmdlen - 1; !match && i>0;
1793 i--, p += 2)
1794 match = (t>=p[0] && t<=p[1]);
1795 }
1796 break;
1797
1798 case O_FRAG:
1799 match = (hlen > 0 && offset != 0);
1800 break;
1801
1802 case O_IN: /* "out" is "not in" */
1803 match = (oif == NULL);
1804 break;
1805
1806 case O_LAYER2:
1807 match = (args->eh != NULL);
1808 break;
1809
1810 case O_PROTO:
1811 /*
1812 * We do not allow an arg of 0 so the
1813 * check of "proto" only suffices.
1814 */
1815 match = (proto == cmd->arg1);
1816 break;
1817
1818 case O_IP_SRC:
1819 match = (hlen > 0 &&
1820 ((ipfw_insn_ip *)cmd)->addr.s_addr ==
1821 src_ip.s_addr);
1822 break;
1823
1824 case O_IP_SRC_MASK:
1825 case O_IP_DST_MASK:
1826 if (hlen > 0) {
1827 uint32_t a =
1828 (cmd->opcode == O_IP_DST_MASK) ?
1829 dst_ip.s_addr : src_ip.s_addr;
1830 uint32_t *p = ((ipfw_insn_u32 *)cmd)->d;
1831 int i = cmdlen-1;
1832
1833 for (; !match && i>0; i-= 2, p+= 2)
1834 match = (p[0] == (a & p[1]));
1835 }
1836 break;
1837
1838 case O_IP_SRC_ME:
1839 if (hlen > 0) {
1840 struct ifnet *tif;
1841
1842 INADDR_TO_IFP(src_ip, tif);
1843 match = (tif != NULL);
1844 }
1845 break;
1846
1847 case O_IP_DST_SET:
1848 case O_IP_SRC_SET:
1849 if (hlen > 0) {
1850 u_int32_t *d = (u_int32_t *)(cmd+1);
1851 u_int32_t addr =
1852 cmd->opcode == O_IP_DST_SET ?
1853 args->f_id.dst_ip :
1854 args->f_id.src_ip;
1855
1856 if (addr < d[0])
1857 break;
1858 addr -= d[0]; /* subtract base */
1859 match = (addr < cmd->arg1) &&
1860 ( d[ 1 + (addr>>5)] &
1861 (1<<(addr & 0x1f)) );
1862 }
1863 break;
1864
1865 case O_IP_DST:
1866 match = (hlen > 0 &&
1867 ((ipfw_insn_ip *)cmd)->addr.s_addr ==
1868 dst_ip.s_addr);
1869 break;
1870
1871 case O_IP_DST_ME:
1872 if (hlen > 0) {
1873 struct ifnet *tif;
1874
1875 INADDR_TO_IFP(dst_ip, tif);
1876 match = (tif != NULL);
1877 }
1878 break;
1879
1880 case O_IP_SRCPORT:
1881 case O_IP_DSTPORT:
1882 /*
1883 * offset == 0 && proto != 0 is enough
1884 * to guarantee that we have an IPv4
1885 * packet with port info.
1886 */
1887 if ((proto==IPPROTO_UDP || proto==IPPROTO_TCP)
1888 && offset == 0) {
1889 u_int16_t x =
1890 (cmd->opcode == O_IP_SRCPORT) ?
1891 src_port : dst_port ;
1892 u_int16_t *p =
1893 ((ipfw_insn_u16 *)cmd)->ports;
1894 int i;
1895
1896 for (i = cmdlen - 1; !match && i>0;
1897 i--, p += 2)
1898 match = (x>=p[0] && x<=p[1]);
1899 }
1900 break;
1901
1902 case O_ICMPTYPE:
1903 match = (offset == 0 && proto==IPPROTO_ICMP &&
1904 icmptype_match(ip, (ipfw_insn_u32 *)cmd) );
1905 break;
1906
1907 case O_IPOPT:
1908 match = (hlen > 0 && ipopts_match(ip, cmd) );
1909 break;
1910
1911 case O_IPVER:
1912 match = (hlen > 0 && cmd->arg1 == ip->ip_v);
1913 break;
1914
1915 case O_IPID:
1916 case O_IPLEN:
1917 case O_IPTTL:
1918 if (hlen > 0) { /* only for IP packets */
1919 uint16_t x;
1920 uint16_t *p;
1921 int i;
1922
1923 if (cmd->opcode == O_IPLEN)
1924 x = ip_len;
1925 else if (cmd->opcode == O_IPTTL)
1926 x = ip->ip_ttl;
1927 else /* must be IPID */
1928 x = ntohs(ip->ip_id);
1929 if (cmdlen == 1) {
1930 match = (cmd->arg1 == x);
1931 break;
1932 }
1933 /* otherwise we have ranges */
1934 p = ((ipfw_insn_u16 *)cmd)->ports;
1935 i = cmdlen - 1;
1936 for (; !match && i>0; i--, p += 2)
1937 match = (x >= p[0] && x <= p[1]);
1938 }
1939 break;
1940
1941 case O_IPPRECEDENCE:
1942 match = (hlen > 0 &&
1943 (cmd->arg1 == (ip->ip_tos & 0xe0)) );
1944 break;
1945
1946 case O_IPTOS:
1947 match = (hlen > 0 &&
1948 flags_match(cmd, ip->ip_tos));
1949 break;
1950
1951 case O_TCPFLAGS:
1952 match = (proto == IPPROTO_TCP && offset == 0 &&
1953 flags_match(cmd,
1954 L3HDR(struct tcphdr,ip)->th_flags));
1955 break;
1956
1957 case O_TCPOPTS:
1958 match = (proto == IPPROTO_TCP && offset == 0 &&
1959 tcpopts_match(ip, cmd));
1960 break;
1961
1962 case O_TCPSEQ:
1963 match = (proto == IPPROTO_TCP && offset == 0 &&
1964 ((ipfw_insn_u32 *)cmd)->d[0] ==
1965 L3HDR(struct tcphdr,ip)->th_seq);
1966 break;
1967
1968 case O_TCPACK:
1969 match = (proto == IPPROTO_TCP && offset == 0 &&
1970 ((ipfw_insn_u32 *)cmd)->d[0] ==
1971 L3HDR(struct tcphdr,ip)->th_ack);
1972 break;
1973
1974 case O_TCPWIN:
1975 match = (proto == IPPROTO_TCP && offset == 0 &&
1976 cmd->arg1 ==
1977 L3HDR(struct tcphdr,ip)->th_win);
1978 break;
1979
1980 case O_ESTAB:
1981 /* reject packets which have SYN only */
1982 /* XXX should i also check for TH_ACK ? */
1983 match = (proto == IPPROTO_TCP && offset == 0 &&
1984 (L3HDR(struct tcphdr,ip)->th_flags &
1985 (TH_RST | TH_ACK | TH_SYN)) != TH_SYN);
1986 break;
1987
1988 case O_LOG:
1989 if (fw_verbose)
1990 ipfw_log(f, hlen, args->eh, m, oif);
1991 match = 1;
1992 break;
1993
1994 case O_PROB:
1995 match = (random()<((ipfw_insn_u32 *)cmd)->d[0]);
1996 break;
1997
1998 case O_VERREVPATH:
1999 /* Outgoing packets automatically pass/match */
2000 match = ((oif != NULL) ||
2001 (m->m_pkthdr.rcvif == NULL) ||
2002 verify_rev_path(src_ip, m->m_pkthdr.rcvif));
2003 break;
2004
2005 case O_IPSEC:
2006 #ifdef FAST_IPSEC
2007 match = (m_tag_find(m,
2008 PACKET_TAG_IPSEC_IN_DONE, NULL) != NULL);
2009 #endif
2010 #ifdef IPSEC
2011 match = (ipsec_gethist(m, NULL) != NULL);
2012 #endif
2013 /* otherwise no match */
2014 break;
2015
2016 /*
2017 * The second set of opcodes represents 'actions',
2018 * i.e. the terminal part of a rule once the packet
2019 * matches all previous patterns.
2020 * Typically there is only one action for each rule,
2021 * and the opcode is stored at the end of the rule
2022 * (but there are exceptions -- see below).
2023 *
2024 * In general, here we set retval and terminate the
2025 * outer loop (would be a 'break 3' in some language,
2026 * but we need to do a 'goto done').
2027 *
2028 * Exceptions:
2029 * O_COUNT and O_SKIPTO actions:
2030 * instead of terminating, we jump to the next rule
2031 * ('goto next_rule', equivalent to a 'break 2'),
2032 * or to the SKIPTO target ('goto again' after
2033 * having set f, cmd and l), respectively.
2034 *
2035 * O_LIMIT and O_KEEP_STATE: these opcodes are
2036 * not real 'actions', and are stored right
2037 * before the 'action' part of the rule.
2038 * These opcodes try to install an entry in the
2039 * state tables; if successful, we continue with
2040 * the next opcode (match=1; break;), otherwise
2041 * the packet * must be dropped
2042 * ('goto done' after setting retval);
2043 *
2044 * O_PROBE_STATE and O_CHECK_STATE: these opcodes
2045 * cause a lookup of the state table, and a jump
2046 * to the 'action' part of the parent rule
2047 * ('goto check_body') if an entry is found, or
2048 * (CHECK_STATE only) a jump to the next rule if
2049 * the entry is not found ('goto next_rule').
2050 * The result of the lookup is cached to make
2051 * further instances of these opcodes are
2052 * effectively NOPs.
2053 */
2054 case O_LIMIT:
2055 case O_KEEP_STATE:
2056 if (install_state(f,
2057 (ipfw_insn_limit *)cmd, args)) {
2058 retval = IP_FW_PORT_DENY_FLAG;
2059 goto done; /* error/limit violation */
2060 }
2061 match = 1;
2062 break;
2063
2064 case O_PROBE_STATE:
2065 case O_CHECK_STATE:
2066 /*
2067 * dynamic rules are checked at the first
2068 * keep-state or check-state occurrence,
2069 * with the result being stored in dyn_dir.
2070 * The compiler introduces a PROBE_STATE
2071 * instruction for us when we have a
2072 * KEEP_STATE (because PROBE_STATE needs
2073 * to be run first).
2074 */
2075 if (dyn_dir == MATCH_UNKNOWN &&
2076 (q = lookup_dyn_rule(&args->f_id,
2077 &dyn_dir, proto == IPPROTO_TCP ?
2078 L3HDR(struct tcphdr, ip) : NULL))
2079 != NULL) {
2080 /*
2081 * Found dynamic entry, update stats
2082 * and jump to the 'action' part of
2083 * the parent rule.
2084 */
2085 q->pcnt++;
2086 q->bcnt += pktlen;
2087 f = q->rule;
2088 cmd = ACTION_PTR(f);
2089 l = f->cmd_len - f->act_ofs;
2090 goto check_body;
2091 }
2092 /*
2093 * Dynamic entry not found. If CHECK_STATE,
2094 * skip to next rule, if PROBE_STATE just
2095 * ignore and continue with next opcode.
2096 */
2097 if (cmd->opcode == O_CHECK_STATE)
2098 goto next_rule;
2099 match = 1;
2100 break;
2101
2102 case O_ACCEPT:
2103 retval = 0; /* accept */
2104 goto done;
2105
2106 case O_PIPE:
2107 case O_QUEUE:
2108 args->rule = f; /* report matching rule */
2109 retval = cmd->arg1 | IP_FW_PORT_DYNT_FLAG;
2110 goto done;
2111
2112 case O_DIVERT:
2113 case O_TEE:
2114 if (args->eh) /* not on layer 2 */
2115 break;
2116 args->divert_rule = f->rulenum;
2117 retval = (cmd->opcode == O_DIVERT) ?
2118 cmd->arg1 :
2119 cmd->arg1 | IP_FW_PORT_TEE_FLAG;
2120 goto done;
2121
2122 case O_COUNT:
2123 case O_SKIPTO:
2124 f->pcnt++; /* update stats */
2125 f->bcnt += pktlen;
2126 f->timestamp = timenow.tv_sec;
2127 if (cmd->opcode == O_COUNT)
2128 goto next_rule;
2129 /* handle skipto */
2130 if (f->next_rule == NULL)
2131 lookup_next_rule(f);
2132 f = f->next_rule;
2133 goto again;
2134
2135 case O_REJECT:
2136 /*
2137 * Drop the packet and send a reject notice
2138 * if the packet is not ICMP (or is an ICMP
2139 * query), and it is not multicast/broadcast.
2140 */
2141 if (hlen > 0 && offset == 0 &&
2142 (proto != IPPROTO_ICMP ||
2143 is_icmp_query(ip)) &&
2144 !(m->m_flags & (M_BCAST|M_MCAST)) &&
2145 !IN_MULTICAST(dst_ip.s_addr)) {
2146 send_reject(args, cmd->arg1,
2147 offset,ip_len);
2148 m = args->m;
2149 }
2150 /* FALLTHROUGH */
2151 case O_DENY:
2152 retval = IP_FW_PORT_DENY_FLAG;
2153 goto done;
2154
2155 case O_FORWARD_IP:
2156 if (args->eh) /* not valid on layer2 pkts */
2157 break;
2158 if (!q || dyn_dir == MATCH_FORWARD)
2159 args->next_hop =
2160 &((ipfw_insn_sa *)cmd)->sa;
2161 retval = 0;
2162 goto done;
2163
2164 default:
2165 panic("-- unknown opcode %d\n", cmd->opcode);
2166 } /* end of switch() on opcodes */
2167
2168 if (cmd->len & F_NOT)
2169 match = !match;
2170
2171 if (match) {
2172 if (cmd->len & F_OR)
2173 skip_or = 1;
2174 } else {
2175 if (!(cmd->len & F_OR)) /* not an OR block, */
2176 break; /* try next rule */
2177 }
2178
2179 } /* end of inner for, scan opcodes */
2180
2181 next_rule:; /* try next rule */
2182
2183 } /* end of outer for, scan rules */
2184 printf("ipfw: ouch!, skip past end of rules, denying packet\n");
2185 lck_mtx_unlock(ipfw_mutex);
2186 return(IP_FW_PORT_DENY_FLAG);
2187
2188 done:
2189 /* Update statistics */
2190 f->pcnt++;
2191 f->bcnt += pktlen;
2192 f->timestamp = timenow.tv_sec;
2193 lck_mtx_unlock(ipfw_mutex);
2194 return retval;
2195
2196 pullup_failed:
2197 if (fw_verbose)
2198 printf("ipfw: pullup failed\n");
2199 lck_mtx_unlock(ipfw_mutex);
2200 return(IP_FW_PORT_DENY_FLAG);
2201 }
2202
2203 /*
2204 * When a rule is added/deleted, clear the next_rule pointers in all rules.
2205 * These will be reconstructed on the fly as packets are matched.
2206 * Must be called at splimp().
2207 */
2208 static void
2209 flush_rule_ptrs(void)
2210 {
2211 struct ip_fw *rule;
2212
2213 for (rule = layer3_chain; rule; rule = rule->next)
2214 rule->next_rule = NULL;
2215 }
2216
2217 /*
2218 * When pipes/queues are deleted, clear the "pipe_ptr" pointer to a given
2219 * pipe/queue, or to all of them (match == NULL).
2220 * Must be called at splimp().
2221 */
2222 void
2223 flush_pipe_ptrs(struct dn_flow_set *match)
2224 {
2225 struct ip_fw *rule;
2226
2227 for (rule = layer3_chain; rule; rule = rule->next) {
2228 ipfw_insn_pipe *cmd = (ipfw_insn_pipe *)ACTION_PTR(rule);
2229
2230 if (cmd->o.opcode != O_PIPE && cmd->o.opcode != O_QUEUE)
2231 continue;
2232 /*
2233 * XXX Use bcmp/bzero to handle pipe_ptr to overcome
2234 * possible alignment problems on 64-bit architectures.
2235 * This code is seldom used so we do not worry too
2236 * much about efficiency.
2237 */
2238 if (match == NULL ||
2239 !bcmp(&cmd->pipe_ptr, &match, sizeof(match)) )
2240 bzero(&cmd->pipe_ptr, sizeof(cmd->pipe_ptr));
2241 }
2242 }
2243
2244 /*
2245 * Add a new rule to the list. Copy the rule into a malloc'ed area, then
2246 * possibly create a rule number and add the rule to the list.
2247 * Update the rule_number in the input struct so the caller knows it as well.
2248 */
2249 static int
2250 add_rule(struct ip_fw **head, struct ip_fw *input_rule)
2251 {
2252 struct ip_fw *rule, *f, *prev;
2253 int l = RULESIZE(input_rule);
2254
2255 if (*head == NULL && input_rule->rulenum != IPFW_DEFAULT_RULE)
2256 return (EINVAL);
2257
2258 rule = _MALLOC(l, M_IPFW, M_WAIT);
2259 if (rule == NULL) {
2260 printf("ipfw2: add_rule MALLOC failed\n");
2261 return (ENOSPC);
2262 }
2263
2264 bzero(rule, l);
2265 bcopy(input_rule, rule, l);
2266
2267 rule->next = NULL;
2268 rule->next_rule = NULL;
2269
2270 rule->pcnt = 0;
2271 rule->bcnt = 0;
2272 rule->timestamp = 0;
2273
2274 if (*head == NULL) { /* default rule */
2275 *head = rule;
2276 goto done;
2277 }
2278
2279 /*
2280 * If rulenum is 0, find highest numbered rule before the
2281 * default rule, and add autoinc_step
2282 */
2283 if (autoinc_step < 1)
2284 autoinc_step = 1;
2285 else if (autoinc_step > 1000)
2286 autoinc_step = 1000;
2287 if (rule->rulenum == 0) {
2288 /*
2289 * locate the highest numbered rule before default
2290 */
2291 for (f = *head; f; f = f->next) {
2292 if (f->rulenum == IPFW_DEFAULT_RULE)
2293 break;
2294 rule->rulenum = f->rulenum;
2295 }
2296 if (rule->rulenum < IPFW_DEFAULT_RULE - autoinc_step)
2297 rule->rulenum += autoinc_step;
2298 input_rule->rulenum = rule->rulenum;
2299 }
2300
2301 /*
2302 * Now insert the new rule in the right place in the sorted list.
2303 */
2304 for (prev = NULL, f = *head; f; prev = f, f = f->next) {
2305 if (f->rulenum > rule->rulenum) { /* found the location */
2306 if (prev) {
2307 rule->next = f;
2308 prev->next = rule;
2309 } else { /* head insert */
2310 rule->next = *head;
2311 *head = rule;
2312 }
2313 break;
2314 }
2315 }
2316 flush_rule_ptrs();
2317 done:
2318 static_count++;
2319 static_len += l;
2320 DEB(printf("ipfw: installed rule %d, static count now %d\n",
2321 rule->rulenum, static_count);)
2322 return (0);
2323 }
2324
2325 /**
2326 * Free storage associated with a static rule (including derived
2327 * dynamic rules).
2328 * The caller is in charge of clearing rule pointers to avoid
2329 * dangling pointers.
2330 * @return a pointer to the next entry.
2331 * Arguments are not checked, so they better be correct.
2332 * Must be called at splimp().
2333 */
2334 static struct ip_fw *
2335 delete_rule(struct ip_fw **head, struct ip_fw *prev, struct ip_fw *rule)
2336 {
2337 struct ip_fw *n;
2338 int l = RULESIZE(rule);
2339
2340 n = rule->next;
2341 remove_dyn_rule(rule, NULL /* force removal */);
2342 if (prev == NULL)
2343 *head = n;
2344 else
2345 prev->next = n;
2346 static_count--;
2347 static_len -= l;
2348
2349 #if DUMMYNET
2350 if (DUMMYNET_LOADED)
2351 ip_dn_ruledel_ptr(rule);
2352 #endif /* DUMMYNET */
2353 _FREE(rule, M_IPFW);
2354 return n;
2355 }
2356
2357 #if DEBUG_INACTIVE_RULES
2358 static void
2359 print_chain(struct ip_fw **chain)
2360 {
2361 struct ip_fw *rule = *chain;
2362
2363 for (; rule; rule = rule->next) {
2364 ipfw_insn *cmd = ACTION_PTR(rule);
2365
2366 printf("ipfw: rule->rulenum = %d\n", rule->rulenum);
2367
2368 if (rule->reserved_1 == IPFW_RULE_INACTIVE) {
2369 printf("ipfw: rule->reserved = IPFW_RULE_INACTIVE\n");
2370 }
2371
2372 switch (cmd->opcode) {
2373 case O_DENY:
2374 printf("ipfw: ACTION: Deny\n");
2375 break;
2376
2377 case O_REJECT:
2378 if (cmd->arg1==ICMP_REJECT_RST)
2379 printf("ipfw: ACTION: Reset\n");
2380 else if (cmd->arg1==ICMP_UNREACH_HOST)
2381 printf("ipfw: ACTION: Reject\n");
2382 break;
2383
2384 case O_ACCEPT:
2385 printf("ipfw: ACTION: Accept\n");
2386 break;
2387 case O_COUNT:
2388 printf("ipfw: ACTION: Count\n");
2389 break;
2390 case O_DIVERT:
2391 printf("ipfw: ACTION: Divert\n");
2392 break;
2393 case O_TEE:
2394 printf("ipfw: ACTION: Tee\n");
2395 break;
2396 case O_SKIPTO:
2397 printf("ipfw: ACTION: SkipTo\n");
2398 break;
2399 case O_PIPE:
2400 printf("ipfw: ACTION: Pipe\n");
2401 break;
2402 case O_QUEUE:
2403 printf("ipfw: ACTION: Queue\n");
2404 break;
2405 case O_FORWARD_IP:
2406 printf("ipfw: ACTION: Forward\n");
2407 break;
2408 default:
2409 printf("ipfw: invalid action! %d\n", cmd->opcode);
2410 }
2411 }
2412 }
2413 #endif /* DEBUG_INACTIVE_RULES */
2414
2415 static void
2416 flush_inactive(void *param)
2417 {
2418 struct ip_fw *inactive_rule = (struct ip_fw *)param;
2419 struct ip_fw *rule, *prev;
2420
2421 lck_mtx_lock(ipfw_mutex);
2422
2423 for (rule = layer3_chain, prev = NULL; rule; ) {
2424 if (rule == inactive_rule && rule->reserved_1 == IPFW_RULE_INACTIVE) {
2425 struct ip_fw *n = rule;
2426
2427 if (prev == NULL) {
2428 layer3_chain = rule->next;
2429 }
2430 else {
2431 prev->next = rule->next;
2432 }
2433 rule = rule->next;
2434 _FREE(n, M_IPFW);
2435 }
2436 else {
2437 prev = rule;
2438 rule = rule->next;
2439 }
2440 }
2441
2442 #if DEBUG_INACTIVE_RULES
2443 print_chain(&layer3_chain);
2444 #endif
2445 lck_mtx_unlock(ipfw_mutex);
2446 }
2447
2448 static void
2449 mark_inactive(struct ip_fw **prev, struct ip_fw **rule)
2450 {
2451 int l = RULESIZE(*rule);
2452
2453 if ((*rule)->reserved_1 != IPFW_RULE_INACTIVE) {
2454 (*rule)->reserved_1 = IPFW_RULE_INACTIVE;
2455 static_count--;
2456 static_len -= l;
2457
2458 timeout(flush_inactive, *rule, 30*hz); /* 30 sec. */
2459 }
2460
2461 *prev = *rule;
2462 *rule = (*rule)->next;
2463 }
2464
2465 /*
2466 * Deletes all rules from a chain (except rules in set RESVD_SET
2467 * unless kill_default = 1).
2468 * Must be called at splimp().
2469 */
2470 static void
2471 free_chain(struct ip_fw **chain, int kill_default)
2472 {
2473 struct ip_fw *prev, *rule;
2474
2475 flush_rule_ptrs(); /* more efficient to do outside the loop */
2476 for (prev = NULL, rule = *chain; rule ; )
2477 if (kill_default || rule->set != RESVD_SET) {
2478 ipfw_insn *cmd = ACTION_PTR(rule);
2479
2480 /* skip over forwarding rules so struct isn't
2481 * deleted while pointer is still in use elsewhere
2482 */
2483 if (cmd->opcode == O_FORWARD_IP) {
2484 mark_inactive(&prev, &rule);
2485 }
2486 else {
2487 rule = delete_rule(chain, prev, rule);
2488 }
2489 }
2490 else {
2491 prev = rule;
2492 rule = rule->next;
2493 }
2494 }
2495
2496 /**
2497 * Remove all rules with given number, and also do set manipulation.
2498 * Assumes chain != NULL && *chain != NULL.
2499 *
2500 * The argument is an u_int32_t. The low 16 bit are the rule or set number,
2501 * the next 8 bits are the new set, the top 8 bits are the command:
2502 *
2503 * 0 delete rules with given number
2504 * 1 delete rules with given set number
2505 * 2 move rules with given number to new set
2506 * 3 move rules with given set number to new set
2507 * 4 swap sets with given numbers
2508 */
2509 static int
2510 del_entry(struct ip_fw **chain, u_int32_t arg)
2511 {
2512 struct ip_fw *prev = NULL, *rule = *chain;
2513 u_int16_t rulenum; /* rule or old_set */
2514 u_int8_t cmd, new_set;
2515
2516 rulenum = arg & 0xffff;
2517 cmd = (arg >> 24) & 0xff;
2518 new_set = (arg >> 16) & 0xff;
2519
2520 if (cmd > 4)
2521 return EINVAL;
2522 if (new_set > RESVD_SET)
2523 return EINVAL;
2524 if (cmd == 0 || cmd == 2) {
2525 if (rulenum >= IPFW_DEFAULT_RULE)
2526 return EINVAL;
2527 } else {
2528 if (rulenum > RESVD_SET) /* old_set */
2529 return EINVAL;
2530 }
2531
2532 switch (cmd) {
2533 case 0: /* delete rules with given number */
2534 /*
2535 * locate first rule to delete
2536 */
2537 for (; rule->rulenum < rulenum; prev = rule, rule = rule->next)
2538 ;
2539 if (rule->rulenum != rulenum)
2540 return EINVAL;
2541
2542 /*
2543 * flush pointers outside the loop, then delete all matching
2544 * rules. prev remains the same throughout the cycle.
2545 */
2546 flush_rule_ptrs();
2547 while (rule->rulenum == rulenum) {
2548 ipfw_insn *insn = ACTION_PTR(rule);
2549
2550 /* keep forwarding rules around so struct isn't
2551 * deleted while pointer is still in use elsewhere
2552 */
2553 if (insn->opcode == O_FORWARD_IP) {
2554 mark_inactive(&prev, &rule);
2555 }
2556 else {
2557 rule = delete_rule(chain, prev, rule);
2558 }
2559 }
2560 break;
2561
2562 case 1: /* delete all rules with given set number */
2563 flush_rule_ptrs();
2564 while (rule->rulenum < IPFW_DEFAULT_RULE) {
2565 if (rule->set == rulenum) {
2566 ipfw_insn *insn = ACTION_PTR(rule);
2567
2568 /* keep forwarding rules around so struct isn't
2569 * deleted while pointer is still in use elsewhere
2570 */
2571 if (insn->opcode == O_FORWARD_IP) {
2572 mark_inactive(&prev, &rule);
2573 }
2574 else {
2575 rule = delete_rule(chain, prev, rule);
2576 }
2577 }
2578 else {
2579 prev = rule;
2580 rule = rule->next;
2581 }
2582 }
2583 break;
2584
2585 case 2: /* move rules with given number to new set */
2586 for (; rule->rulenum < IPFW_DEFAULT_RULE; rule = rule->next)
2587 if (rule->rulenum == rulenum)
2588 rule->set = new_set;
2589 break;
2590
2591 case 3: /* move rules with given set number to new set */
2592 for (; rule->rulenum < IPFW_DEFAULT_RULE; rule = rule->next)
2593 if (rule->set == rulenum)
2594 rule->set = new_set;
2595 break;
2596
2597 case 4: /* swap two sets */
2598 for (; rule->rulenum < IPFW_DEFAULT_RULE; rule = rule->next)
2599 if (rule->set == rulenum)
2600 rule->set = new_set;
2601 else if (rule->set == new_set)
2602 rule->set = rulenum;
2603 break;
2604 }
2605 return 0;
2606 }
2607
2608 /*
2609 * Clear counters for a specific rule.
2610 */
2611 static void
2612 clear_counters(struct ip_fw *rule, int log_only)
2613 {
2614 ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule);
2615
2616 if (log_only == 0) {
2617 rule->bcnt = rule->pcnt = 0;
2618 rule->timestamp = 0;
2619 }
2620 if (l->o.opcode == O_LOG)
2621 l->log_left = l->max_log;
2622 }
2623
2624 /**
2625 * Reset some or all counters on firewall rules.
2626 * @arg frwl is null to clear all entries, or contains a specific
2627 * rule number.
2628 * @arg log_only is 1 if we only want to reset logs, zero otherwise.
2629 */
2630 static int
2631 zero_entry(int rulenum, int log_only)
2632 {
2633 struct ip_fw *rule;
2634 const char *msg;
2635
2636 if (rulenum == 0) {
2637 norule_counter = 0;
2638 for (rule = layer3_chain; rule; rule = rule->next)
2639 clear_counters(rule, log_only);
2640 msg = log_only ? "ipfw: All logging counts reset.\n" :
2641 "ipfw: Accounting cleared.\n";
2642 } else {
2643 int cleared = 0;
2644 /*
2645 * We can have multiple rules with the same number, so we
2646 * need to clear them all.
2647 */
2648 for (rule = layer3_chain; rule; rule = rule->next)
2649 if (rule->rulenum == rulenum) {
2650 while (rule && rule->rulenum == rulenum) {
2651 clear_counters(rule, log_only);
2652 rule = rule->next;
2653 }
2654 cleared = 1;
2655 break;
2656 }
2657 if (!cleared) /* we did not find any matching rules */
2658 return (EINVAL);
2659 msg = log_only ? "ipfw: Entry %d logging count reset.\n" :
2660 "ipfw: Entry %d cleared.\n";
2661 }
2662 if (fw_verbose)
2663 {
2664 dolog((LOG_AUTHPRIV | LOG_NOTICE, msg, rulenum));
2665 }
2666 return (0);
2667 }
2668
2669 /*
2670 * Check validity of the structure before insert.
2671 * Fortunately rules are simple, so this mostly need to check rule sizes.
2672 */
2673 static int
2674 check_ipfw_struct(struct ip_fw *rule, int size)
2675 {
2676 int l, cmdlen = 0;
2677 int have_action=0;
2678 ipfw_insn *cmd;
2679
2680 if (size < sizeof(*rule)) {
2681 printf("ipfw: rule too short\n");
2682 return (EINVAL);
2683 }
2684 /* first, check for valid size */
2685 l = RULESIZE(rule);
2686 if (l != size) {
2687 printf("ipfw: size mismatch (have %d want %d)\n", size, l);
2688 return (EINVAL);
2689 }
2690 /*
2691 * Now go for the individual checks. Very simple ones, basically only
2692 * instruction sizes.
2693 */
2694 for (l = rule->cmd_len, cmd = rule->cmd ;
2695 l > 0 ; l -= cmdlen, cmd += cmdlen) {
2696 cmdlen = F_LEN(cmd);
2697 if (cmdlen > l) {
2698 printf("ipfw: opcode %d size truncated\n",
2699 cmd->opcode);
2700 return EINVAL;
2701 }
2702 DEB(printf("ipfw: opcode %d\n", cmd->opcode);)
2703 switch (cmd->opcode) {
2704 case O_PROBE_STATE:
2705 case O_KEEP_STATE:
2706 case O_PROTO:
2707 case O_IP_SRC_ME:
2708 case O_IP_DST_ME:
2709 case O_LAYER2:
2710 case O_IN:
2711 case O_FRAG:
2712 case O_IPOPT:
2713 case O_IPTOS:
2714 case O_IPPRECEDENCE:
2715 case O_IPVER:
2716 case O_TCPWIN:
2717 case O_TCPFLAGS:
2718 case O_TCPOPTS:
2719 case O_ESTAB:
2720 case O_VERREVPATH:
2721 case O_IPSEC:
2722 if (cmdlen != F_INSN_SIZE(ipfw_insn))
2723 goto bad_size;
2724 break;
2725 case O_UID:
2726 #ifndef __APPLE__
2727 case O_GID:
2728 #endif /* __APPLE__ */
2729 case O_IP_SRC:
2730 case O_IP_DST:
2731 case O_TCPSEQ:
2732 case O_TCPACK:
2733 case O_PROB:
2734 case O_ICMPTYPE:
2735 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32))
2736 goto bad_size;
2737 break;
2738
2739 case O_LIMIT:
2740 if (cmdlen != F_INSN_SIZE(ipfw_insn_limit))
2741 goto bad_size;
2742 break;
2743
2744 case O_LOG:
2745 if (cmdlen != F_INSN_SIZE(ipfw_insn_log))
2746 goto bad_size;
2747
2748 /* enforce logging limit */
2749 if (fw_verbose &&
2750 ((ipfw_insn_log *)cmd)->max_log == 0 && verbose_limit != 0) {
2751 ((ipfw_insn_log *)cmd)->max_log = verbose_limit;
2752 }
2753
2754 ((ipfw_insn_log *)cmd)->log_left =
2755 ((ipfw_insn_log *)cmd)->max_log;
2756
2757 break;
2758
2759 case O_IP_SRC_MASK:
2760 case O_IP_DST_MASK:
2761 /* only odd command lengths */
2762 if ( !(cmdlen & 1) || cmdlen > 31)
2763 goto bad_size;
2764 break;
2765
2766 case O_IP_SRC_SET:
2767 case O_IP_DST_SET:
2768 if (cmd->arg1 == 0 || cmd->arg1 > 256) {
2769 printf("ipfw: invalid set size %d\n",
2770 cmd->arg1);
2771 return EINVAL;
2772 }
2773 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
2774 (cmd->arg1+31)/32 )
2775 goto bad_size;
2776 break;
2777
2778 case O_MACADDR2:
2779 if (cmdlen != F_INSN_SIZE(ipfw_insn_mac))
2780 goto bad_size;
2781 break;
2782
2783 case O_NOP:
2784 case O_IPID:
2785 case O_IPTTL:
2786 case O_IPLEN:
2787 if (cmdlen < 1 || cmdlen > 31)
2788 goto bad_size;
2789 break;
2790
2791 case O_MAC_TYPE:
2792 case O_IP_SRCPORT:
2793 case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */
2794 if (cmdlen < 2 || cmdlen > 31)
2795 goto bad_size;
2796 break;
2797
2798 case O_RECV:
2799 case O_XMIT:
2800 case O_VIA:
2801 if (cmdlen != F_INSN_SIZE(ipfw_insn_if))
2802 goto bad_size;
2803 break;
2804
2805 case O_PIPE:
2806 case O_QUEUE:
2807 if (cmdlen != F_INSN_SIZE(ipfw_insn_pipe))
2808 goto bad_size;
2809 goto check_action;
2810
2811 case O_FORWARD_IP:
2812 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa))
2813 goto bad_size;
2814 goto check_action;
2815
2816 case O_FORWARD_MAC: /* XXX not implemented yet */
2817 case O_CHECK_STATE:
2818 case O_COUNT:
2819 case O_ACCEPT:
2820 case O_DENY:
2821 case O_REJECT:
2822 case O_SKIPTO:
2823 case O_DIVERT:
2824 case O_TEE:
2825 if (cmdlen != F_INSN_SIZE(ipfw_insn))
2826 goto bad_size;
2827 check_action:
2828 if (have_action) {
2829 printf("ipfw: opcode %d, multiple actions"
2830 " not allowed\n",
2831 cmd->opcode);
2832 return EINVAL;
2833 }
2834 have_action = 1;
2835 if (l != cmdlen) {
2836 printf("ipfw: opcode %d, action must be"
2837 " last opcode\n",
2838 cmd->opcode);
2839 return EINVAL;
2840 }
2841 break;
2842 default:
2843 printf("ipfw: opcode %d, unknown opcode\n",
2844 cmd->opcode);
2845 return EINVAL;
2846 }
2847 }
2848 if (have_action == 0) {
2849 printf("ipfw: missing action\n");
2850 return EINVAL;
2851 }
2852 return 0;
2853
2854 bad_size:
2855 printf("ipfw: opcode %d size %d wrong\n",
2856 cmd->opcode, cmdlen);
2857 return EINVAL;
2858 }
2859
2860
2861 static void
2862 ipfw_kev_post_msg(u_int32_t event_code)
2863 {
2864 struct kev_msg ev_msg;
2865
2866 bzero(&ev_msg, sizeof(struct kev_msg));
2867
2868 ev_msg.vendor_code = KEV_VENDOR_APPLE;
2869 ev_msg.kev_class = KEV_FIREWALL_CLASS;
2870 ev_msg.kev_subclass = KEV_IPFW_SUBCLASS;
2871 ev_msg.event_code = event_code;
2872
2873 kev_post_msg(&ev_msg);
2874
2875 }
2876
2877 /**
2878 * {set|get}sockopt parser.
2879 */
2880 static int
2881 ipfw_ctl(struct sockopt *sopt)
2882 {
2883 #define RULE_MAXSIZE (256*sizeof(u_int32_t))
2884 u_int32_t api_version;
2885 int command;
2886 int error;
2887 size_t size;
2888 struct ip_fw *bp , *buf, *rule;
2889
2890 /* copy of orig sopt to send to ipfw_get_command_and_version() */
2891 struct sockopt tmp_sopt = *sopt;
2892 struct timeval timenow;
2893
2894 getmicrotime(&timenow);
2895
2896 /*
2897 * Disallow modifications in really-really secure mode, but still allow
2898 * the logging counters to be reset.
2899 */
2900 if (sopt->sopt_name == IP_FW_ADD ||
2901 (sopt->sopt_dir == SOPT_SET && sopt->sopt_name != IP_FW_RESETLOG)) {
2902 #if __FreeBSD_version >= 500034
2903 error = securelevel_ge(sopt->sopt_td->td_ucred, 3);
2904 if (error)
2905 return (error);
2906 #else /* FreeBSD 4.x */
2907 if (securelevel >= 3)
2908 return (EPERM);
2909 #endif
2910 }
2911
2912 /* first get the command and version, then do conversion as necessary */
2913 error = ipfw_get_command_and_version(&tmp_sopt, &command, &api_version);
2914
2915 if (error) {
2916 /* error getting the version */
2917 return error;
2918 }
2919
2920 switch (command) {
2921 case IP_FW_GET:
2922 /*
2923 * pass up a copy of the current rules. Static rules
2924 * come first (the last of which has number IPFW_DEFAULT_RULE),
2925 * followed by a possibly empty list of dynamic rule.
2926 * The last dynamic rule has NULL in the "next" field.
2927 */
2928 lck_mtx_lock(ipfw_mutex);
2929 size = static_len; /* size of static rules */
2930 if (ipfw_dyn_v) /* add size of dyn.rules */
2931 size += (dyn_count * sizeof(ipfw_dyn_rule));
2932
2933 /*
2934 * XXX todo: if the user passes a short length just to know
2935 * how much room is needed, do not bother filling up the
2936 * buffer, just jump to the sooptcopyout.
2937 */
2938 buf = _MALLOC(size, M_TEMP, M_WAITOK);
2939 if (buf == 0) {
2940 lck_mtx_unlock(ipfw_mutex);
2941 error = ENOBUFS;
2942 break;
2943 }
2944
2945 bzero(buf, size);
2946
2947 bp = buf;
2948 for (rule = layer3_chain; rule ; rule = rule->next) {
2949 int i = RULESIZE(rule);
2950
2951 if (rule->reserved_1 == IPFW_RULE_INACTIVE) {
2952 continue;
2953 }
2954 bcopy(rule, bp, i);
2955 bcopy(&set_disable, &(bp->next_rule),
2956 sizeof(set_disable));
2957 bp = (struct ip_fw *)((char *)bp + i);
2958 }
2959 if (ipfw_dyn_v) {
2960 int i;
2961 ipfw_dyn_rule *p, *dst, *last = NULL;
2962
2963 dst = (ipfw_dyn_rule *)bp;
2964 for (i = 0 ; i < curr_dyn_buckets ; i++ )
2965 for ( p = ipfw_dyn_v[i] ; p != NULL ;
2966 p = p->next, dst++ ) {
2967 bcopy(p, dst, sizeof *p);
2968 bcopy(&(p->rule->rulenum), &(dst->rule),
2969 sizeof(p->rule->rulenum));
2970 /*
2971 * store a non-null value in "next".
2972 * The userland code will interpret a
2973 * NULL here as a marker
2974 * for the last dynamic rule.
2975 */
2976 bcopy(&dst, &dst->next, sizeof(dst));
2977 last = dst ;
2978 dst->expire =
2979 TIME_LEQ(dst->expire, timenow.tv_sec) ?
2980 0 : dst->expire - timenow.tv_sec ;
2981 }
2982 if (last != NULL) /* mark last dynamic rule */
2983 bzero(&last->next, sizeof(last));
2984 }
2985 lck_mtx_unlock(ipfw_mutex);
2986
2987 /* convert back if necessary and copyout */
2988 if (api_version == IP_FW_VERSION_0) {
2989 int i, len = 0;
2990 struct ip_old_fw *buf2, *rule_vers0;
2991
2992 lck_mtx_lock(ipfw_mutex);
2993 buf2 = _MALLOC(static_count * sizeof(struct ip_old_fw), M_TEMP, M_WAITOK);
2994 if (buf2 == 0) {
2995 lck_mtx_unlock(ipfw_mutex);
2996 error = ENOBUFS;
2997 }
2998
2999 if (!error) {
3000 bp = buf;
3001 rule_vers0 = buf2;
3002
3003 for (i = 0; i < static_count; i++) {
3004 /* static rules have different sizes */
3005 int j = RULESIZE(bp);
3006 ipfw_convert_from_latest(bp, rule_vers0, api_version);
3007 bp = (struct ip_fw *)((char *)bp + j);
3008 len += sizeof(*rule_vers0);
3009 rule_vers0++;
3010 }
3011 lck_mtx_unlock(ipfw_mutex);
3012 error = sooptcopyout(sopt, buf2, len);
3013 _FREE(buf2, M_TEMP);
3014 }
3015 } else if (api_version == IP_FW_VERSION_1) {
3016 int i, len = 0, buf_size;
3017 struct ip_fw_compat *buf2, *rule_vers1;
3018 struct ipfw_dyn_rule_compat *dyn_rule_vers1, *dyn_last = NULL;
3019 ipfw_dyn_rule *p;
3020
3021 lck_mtx_lock(ipfw_mutex);
3022 buf_size = static_count * sizeof(struct ip_fw_compat) +
3023 dyn_count * sizeof(struct ipfw_dyn_rule_compat);
3024
3025 buf2 = _MALLOC(buf_size, M_TEMP, M_WAITOK);
3026 if (buf2 == 0) {
3027 lck_mtx_unlock(ipfw_mutex);
3028 error = ENOBUFS;
3029 }
3030
3031 if (!error) {
3032 bp = buf;
3033 rule_vers1 = buf2;
3034
3035 /* first do static rules */
3036 for (i = 0; i < static_count; i++) {
3037 /* static rules have different sizes */
3038 int j = RULESIZE(bp);
3039 ipfw_convert_from_latest(bp, rule_vers1, api_version);
3040 bp = (struct ip_fw *)((char *)bp + j);
3041 len += sizeof(*rule_vers1);
3042 rule_vers1++;
3043 }
3044
3045 /* now do dynamic rules */
3046 dyn_rule_vers1 = (struct ipfw_dyn_rule_compat *)rule_vers1;
3047 if (ipfw_dyn_v) {
3048 for (i = 0; i < curr_dyn_buckets; i++) {
3049 for ( p = ipfw_dyn_v[i] ; p != NULL ; p = p->next) {
3050 dyn_rule_vers1->chain = p->rule->rulenum;
3051 dyn_rule_vers1->id = p->id;
3052 dyn_rule_vers1->mask = p->id;
3053 dyn_rule_vers1->type = p->dyn_type;
3054 dyn_rule_vers1->expire = p->expire;
3055 dyn_rule_vers1->pcnt = p->pcnt;
3056 dyn_rule_vers1->bcnt = p->bcnt;
3057 dyn_rule_vers1->bucket = p->bucket;
3058 dyn_rule_vers1->state = p->state;
3059
3060 dyn_rule_vers1->next = (struct ipfw_dyn_rule *) dyn_rule_vers1;
3061 dyn_last = dyn_rule_vers1;
3062
3063 len += sizeof(*dyn_rule_vers1);
3064 dyn_rule_vers1++;
3065 }
3066 }
3067
3068 if (dyn_last != NULL) {
3069 dyn_last->next = NULL;
3070 }
3071 }
3072 lck_mtx_unlock(ipfw_mutex);
3073
3074 error = sooptcopyout(sopt, buf2, len);
3075 _FREE(buf2, M_TEMP);
3076 }
3077 } else {
3078 error = sooptcopyout(sopt, buf, size);
3079 }
3080
3081 _FREE(buf, M_TEMP);
3082 break;
3083
3084 case IP_FW_FLUSH:
3085 /*
3086 * Normally we cannot release the lock on each iteration.
3087 * We could do it here only because we start from the head all
3088 * the times so there is no risk of missing some entries.
3089 * On the other hand, the risk is that we end up with
3090 * a very inconsistent ruleset, so better keep the lock
3091 * around the whole cycle.
3092 *
3093 * XXX this code can be improved by resetting the head of
3094 * the list to point to the default rule, and then freeing
3095 * the old list without the need for a lock.
3096 */
3097
3098 lck_mtx_lock(ipfw_mutex);
3099 free_chain(&layer3_chain, 0 /* keep default rule */);
3100 fw_bypass = 1;
3101 #if DEBUG_INACTIVE_RULES
3102 print_chain(&layer3_chain);
3103 #endif
3104 lck_mtx_unlock(ipfw_mutex);
3105 break;
3106
3107 case IP_FW_ADD:
3108 rule = _MALLOC(RULE_MAXSIZE, M_TEMP, M_WAITOK);
3109 if (rule == 0) {
3110 error = ENOBUFS;
3111 break;
3112 }
3113
3114 bzero(rule, RULE_MAXSIZE);
3115
3116 if (api_version != IP_FW_CURRENT_API_VERSION) {
3117 error = ipfw_convert_to_latest(sopt, rule, api_version);
3118 }
3119 else {
3120 error = sooptcopyin(sopt, rule, RULE_MAXSIZE,
3121 sizeof(struct ip_fw) );
3122 }
3123
3124 if (!error) {
3125 if ((api_version == IP_FW_VERSION_0) || (api_version == IP_FW_VERSION_1)) {
3126 /* the rule has already been checked so just
3127 * adjust sopt_valsize to match what would be expected.
3128 */
3129 sopt->sopt_valsize = RULESIZE(rule);
3130 }
3131 error = check_ipfw_struct(rule, sopt->sopt_valsize);
3132 if (!error) {
3133 lck_mtx_lock(ipfw_mutex);
3134 error = add_rule(&layer3_chain, rule);
3135 if (!error && fw_bypass)
3136 fw_bypass = 0;
3137 lck_mtx_unlock(ipfw_mutex);
3138
3139 size = RULESIZE(rule);
3140 if (!error && sopt->sopt_dir == SOPT_GET) {
3141 /* convert back if necessary and copyout */
3142 if (api_version == IP_FW_VERSION_0) {
3143 struct ip_old_fw rule_vers0;
3144
3145 ipfw_convert_from_latest(rule, &rule_vers0, api_version);
3146 sopt->sopt_valsize = sizeof(struct ip_old_fw);
3147
3148 error = sooptcopyout(sopt, &rule_vers0, sizeof(struct ip_old_fw));
3149 } else if (api_version == IP_FW_VERSION_1) {
3150 struct ip_fw_compat rule_vers1;
3151
3152 ipfw_convert_from_latest(rule, &rule_vers1, api_version);
3153 sopt->sopt_valsize = sizeof(struct ip_fw_compat);
3154
3155 error = sooptcopyout(sopt, &rule_vers1, sizeof(struct ip_fw_compat));
3156 } else {
3157 error = sooptcopyout(sopt, rule, size);
3158 }
3159 }
3160 }
3161 }
3162
3163 _FREE(rule, M_TEMP);
3164 break;
3165
3166 case IP_FW_DEL:
3167 {
3168 /*
3169 * IP_FW_DEL is used for deleting single rules or sets,
3170 * and (ab)used to atomically manipulate sets.
3171 * rule->rulenum != 0 indicates single rule delete
3172 * rule->set_masks used to manipulate sets
3173 * rule->set_masks[0] contains info on sets to be
3174 * disabled, swapped, or moved
3175 * rule->set_masks[1] contains sets to be enabled.
3176 */
3177
3178 /* there is only a simple rule passed in
3179 * (no cmds), so use a temp struct to copy
3180 */
3181 struct ip_fw temp_rule;
3182 u_int32_t arg;
3183 u_int8_t cmd;
3184
3185 bzero(&temp_rule, sizeof(struct ip_fw));
3186 if (api_version != IP_FW_CURRENT_API_VERSION) {
3187 error = ipfw_convert_to_latest(sopt, &temp_rule, api_version);
3188 }
3189 else {
3190 error = sooptcopyin(sopt, &temp_rule, sizeof(struct ip_fw),
3191 sizeof(struct ip_fw) );
3192 }
3193
3194 if (!error) {
3195 /* set_masks is used to distinguish between deleting
3196 * single rules or atomically manipulating sets
3197 */
3198 lck_mtx_lock(ipfw_mutex);
3199
3200 arg = temp_rule.set_masks[0];
3201 cmd = (arg >> 24) & 0xff;
3202
3203 if (temp_rule.rulenum) {
3204 /* single rule */
3205 error = del_entry(&layer3_chain, temp_rule.rulenum);
3206 #if DEBUG_INACTIVE_RULES
3207 print_chain(&layer3_chain);
3208 #endif
3209 }
3210 else if (cmd) {
3211 /* set reassignment - see comment above del_entry() for details */
3212 error = del_entry(&layer3_chain, temp_rule.set_masks[0]);
3213 #if DEBUG_INACTIVE_RULES
3214 print_chain(&layer3_chain);
3215 #endif
3216 }
3217 else if (temp_rule.set_masks[0] != 0 ||
3218 temp_rule.set_masks[1] != 0) {
3219 /* set enable/disable */
3220 set_disable =
3221 (set_disable | temp_rule.set_masks[0]) & ~temp_rule.set_masks[1] &
3222 ~(1<<RESVD_SET); /* set RESVD_SET always enabled */
3223 }
3224
3225 if (!layer3_chain->next)
3226 fw_bypass = 1;
3227 lck_mtx_unlock(ipfw_mutex);
3228 }
3229 break;
3230 }
3231 case IP_FW_ZERO:
3232 case IP_FW_RESETLOG: /* using rule->rulenum */
3233 {
3234 /* there is only a simple rule passed in
3235 * (no cmds), so use a temp struct to copy
3236 */
3237 struct ip_fw temp_rule;
3238
3239 bzero(&temp_rule, sizeof(struct ip_fw));
3240
3241 if (api_version != IP_FW_CURRENT_API_VERSION) {
3242 error = ipfw_convert_to_latest(sopt, &temp_rule, api_version);
3243 }
3244 else {
3245 if (sopt->sopt_val != 0) {
3246 error = sooptcopyin(sopt, &temp_rule, sizeof(struct ip_fw),
3247 sizeof(struct ip_fw) );
3248 }
3249 }
3250
3251 if (!error) {
3252 lck_mtx_lock(ipfw_mutex);
3253 error = zero_entry(temp_rule.rulenum, sopt->sopt_name == IP_FW_RESETLOG);
3254 lck_mtx_unlock(ipfw_mutex);
3255 }
3256 break;
3257 }
3258 default:
3259 printf("ipfw: ipfw_ctl invalid option %d\n", sopt->sopt_name);
3260 error = EINVAL;
3261 }
3262
3263 if (error != EINVAL) {
3264 switch (command) {
3265 case IP_FW_ADD:
3266 case IP_OLD_FW_ADD:
3267 ipfw_kev_post_msg(KEV_IPFW_ADD);
3268 break;
3269 case IP_OLD_FW_DEL:
3270 case IP_FW_DEL:
3271 ipfw_kev_post_msg(KEV_IPFW_DEL);
3272 break;
3273 case IP_FW_FLUSH:
3274 case IP_OLD_FW_FLUSH:
3275 ipfw_kev_post_msg(KEV_IPFW_FLUSH);
3276 break;
3277
3278 default:
3279 break;
3280 }
3281 }
3282
3283 return (error);
3284 }
3285
3286 /**
3287 * dummynet needs a reference to the default rule, because rules can be
3288 * deleted while packets hold a reference to them. When this happens,
3289 * dummynet changes the reference to the default rule (it could well be a
3290 * NULL pointer, but this way we do not need to check for the special
3291 * case, plus here he have info on the default behaviour).
3292 */
3293 struct ip_fw *ip_fw_default_rule;
3294
3295 /*
3296 * This procedure is only used to handle keepalives. It is invoked
3297 * every dyn_keepalive_period
3298 */
3299 static void
3300 ipfw_tick(__unused void * unused)
3301 {
3302 int i;
3303 ipfw_dyn_rule *q;
3304 struct timeval timenow;
3305
3306
3307 if (dyn_keepalive == 0 || ipfw_dyn_v == NULL || dyn_count == 0)
3308 goto done;
3309
3310 getmicrotime(&timenow);
3311
3312 lck_mtx_lock(ipfw_mutex);
3313 for (i = 0 ; i < curr_dyn_buckets ; i++) {
3314 for (q = ipfw_dyn_v[i] ; q ; q = q->next ) {
3315 if (q->dyn_type == O_LIMIT_PARENT)
3316 continue;
3317 if (q->id.proto != IPPROTO_TCP)
3318 continue;
3319 if ( (q->state & BOTH_SYN) != BOTH_SYN)
3320 continue;
3321 if (TIME_LEQ( timenow.tv_sec+dyn_keepalive_interval,
3322 q->expire))
3323 continue; /* too early */
3324 if (TIME_LEQ(q->expire, timenow.tv_sec))
3325 continue; /* too late, rule expired */
3326
3327 send_pkt(&(q->id), q->ack_rev - 1, q->ack_fwd, TH_SYN);
3328 send_pkt(&(q->id), q->ack_fwd - 1, q->ack_rev, 0);
3329 }
3330 }
3331 lck_mtx_unlock(ipfw_mutex);
3332 done:
3333 timeout(ipfw_tick, NULL, dyn_keepalive_period*hz);
3334 }
3335
3336 void
3337 ipfw_init(void)
3338 {
3339 struct ip_fw default_rule;
3340
3341 /* setup locks */
3342 ipfw_mutex_grp_attr = lck_grp_attr_alloc_init();
3343 ipfw_mutex_grp = lck_grp_alloc_init("ipfw", ipfw_mutex_grp_attr);
3344 ipfw_mutex_attr = lck_attr_alloc_init();
3345
3346 if ((ipfw_mutex = lck_mtx_alloc_init(ipfw_mutex_grp, ipfw_mutex_attr)) == NULL) {
3347 printf("ipfw_init: can't alloc ipfw_mutex\n");
3348 return;
3349 }
3350
3351 layer3_chain = NULL;
3352
3353 bzero(&default_rule, sizeof default_rule);
3354
3355 default_rule.act_ofs = 0;
3356 default_rule.rulenum = IPFW_DEFAULT_RULE;
3357 default_rule.cmd_len = 1;
3358 default_rule.set = RESVD_SET;
3359
3360 default_rule.cmd[0].len = 1;
3361 default_rule.cmd[0].opcode =
3362 #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT
3363 1 ? O_ACCEPT :
3364 #endif
3365 O_DENY;
3366
3367 if (add_rule(&layer3_chain, &default_rule)) {
3368 printf("ipfw2: add_rule failed adding default rule\n");
3369 printf("ipfw2 failed initialization!!\n");
3370 fw_enable = 0;
3371 }
3372 else {
3373 ip_fw_default_rule = layer3_chain;
3374
3375 #ifdef IPFIREWALL_VERBOSE
3376 fw_verbose = 1;
3377 #endif
3378 #ifdef IPFIREWALL_VERBOSE_LIMIT
3379 verbose_limit = IPFIREWALL_VERBOSE_LIMIT;
3380 #endif
3381 if (fw_verbose) {
3382 if (!verbose_limit)
3383 printf("ipfw2 verbose logging enabled: unlimited logging by default\n");
3384 else
3385 printf("ipfw2 verbose logging enabled: limited to %d packets/entry by default\n",
3386 verbose_limit);
3387 }
3388 }
3389
3390 ip_fw_chk_ptr = ipfw_chk;
3391 ip_fw_ctl_ptr = ipfw_ctl;
3392
3393 ipfwstringlen = strlen( ipfwstring );
3394
3395 timeout(ipfw_tick, NULL, hz);
3396 }
3397
3398 #endif /* IPFW2 */
3399