2 * Copyright (c) 2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $apfw: pf_ioctl.c,v 1.16 2008/08/27 00:01:32 jhw Exp $ */
30 /* $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * - Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * - Redistributions in binary form must reproduce the above
44 * copyright notice, this list of conditions and the following
45 * disclaimer in the documentation and/or other materials provided
46 * with the distribution.
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
71 #include <sys/filio.h>
72 #include <sys/fcntl.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/kernel.h>
77 #include <sys/proc_internal.h>
78 #include <sys/malloc.h>
79 #include <sys/kauth.h>
81 #include <sys/mcache.h>
83 #include <mach/vm_param.h>
86 #include <net/if_types.h>
87 #include <net/route.h>
89 #include <netinet/in.h>
90 #include <netinet/in_var.h>
91 #include <netinet/in_systm.h>
92 #include <netinet/ip.h>
93 #include <netinet/ip_var.h>
94 #include <netinet/ip_icmp.h>
95 #include <netinet/if_ether.h>
97 #include <libkern/crypto/md5.h>
99 #include <miscfs/devfs/devfs.h>
101 #include <net/pfvar.h>
104 #include <net/if_pfsync.h>
108 #include <net/if_pflog.h>
112 #include <netinet/ip6.h>
113 #include <netinet/in_pcb.h>
117 #include <altq/altq.h>
121 static void pfdetach(void);
123 static int pfopen(dev_t
, int, int, struct proc
*);
124 static int pfclose(dev_t
, int, int, struct proc
*);
125 static int pfioctl(dev_t
, u_long
, caddr_t
, int, struct proc
*);
126 static struct pf_pool
*pf_get_pool(char *, u_int32_t
, u_int8_t
, u_int32_t
,
127 u_int8_t
, u_int8_t
, u_int8_t
);
129 static void pf_mv_pool(struct pf_palist
*, struct pf_palist
*);
130 static void pf_empty_pool(struct pf_palist
*);
132 static int pf_begin_altq(u_int32_t
*);
133 static int pf_rollback_altq(u_int32_t
);
134 static int pf_commit_altq(u_int32_t
);
135 static int pf_enable_altq(struct pf_altq
*);
136 static int pf_disable_altq(struct pf_altq
*);
138 static int pf_begin_rules(u_int32_t
*, int, const char *);
139 static int pf_rollback_rules(u_int32_t
, int, char *);
140 static int pf_setup_pfsync_matching(struct pf_ruleset
*);
141 static void pf_hash_rule(MD5_CTX
*, struct pf_rule
*);
142 #ifndef NO_APPLE_EXTENSIONS
143 static void pf_hash_rule_addr(MD5_CTX
*, struct pf_rule_addr
*, u_int8_t
);
145 static void pf_hash_rule_addr(MD5_CTX
*, struct pf_rule_addr
*);
147 static int pf_commit_rules(u_int32_t
, int, char *);
148 static void pf_state_export(struct pfsync_state
*, struct pf_state_key
*,
150 static void pf_state_import(struct pfsync_state
*, struct pf_state_key
*,
153 #define PF_CDEV_MAJOR (-1)
155 static struct cdevsw pf_cdevsw
= {
158 /* read */ eno_rdwrt
,
159 /* write */ eno_rdwrt
,
162 /* reset */ eno_reset
,
164 /* select */ eno_select
,
166 /* strategy */ eno_strat
,
172 static void pf_attach_hooks(void);
173 static void pf_detach_hooks(void);
174 static int pf_hooks_attached
= 0;
176 struct pf_rule pf_default_rule
;
178 static int pf_altq_running
;
181 #define TAGID_MAX 50000
182 static TAILQ_HEAD(pf_tags
, pf_tagname
) pf_tags
=
183 TAILQ_HEAD_INITIALIZER(pf_tags
);
185 static TAILQ_HEAD(pf_tags
, pf_tagname
) pf_qids
=
186 TAILQ_HEAD_INITIALIZER(pf_qids
);
189 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
190 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
192 static u_int16_t
tagname2tag(struct pf_tags
*, char *);
193 static void tag2tagname(struct pf_tags
*, u_int16_t
, char *);
194 static void tag_unref(struct pf_tags
*, u_int16_t
);
195 static int pf_rtlabel_add(struct pf_addr_wrap
*);
196 static void pf_rtlabel_remove(struct pf_addr_wrap
*);
197 static void pf_rtlabel_copyout(struct pf_addr_wrap
*);
200 static int pf_inet_hook(struct ifnet
*, struct mbuf
**, int);
203 static int pf_inet6_hook(struct ifnet
*, struct mbuf
**, int);
206 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
208 static lck_attr_t
*pf_perim_lock_attr
;
209 static lck_grp_t
*pf_perim_lock_grp
;
210 static lck_grp_attr_t
*pf_perim_lock_grp_attr
;
212 static lck_attr_t
*pf_lock_attr
;
213 static lck_grp_t
*pf_lock_grp
;
214 static lck_grp_attr_t
*pf_lock_grp_attr
;
216 struct thread
*pf_purge_thread
;
218 extern void pfi_kifaddr_update(void *);
223 u_int32_t
*t
= pf_default_rule
.timeout
;
226 pf_perim_lock_grp_attr
= lck_grp_attr_alloc_init();
227 pf_perim_lock_grp
= lck_grp_alloc_init("pf_perim",
228 pf_perim_lock_grp_attr
);
229 pf_perim_lock_attr
= lck_attr_alloc_init();
230 pf_perim_lock
= lck_rw_alloc_init(pf_perim_lock_grp
,
233 pf_lock_grp_attr
= lck_grp_attr_alloc_init();
234 pf_lock_grp
= lck_grp_alloc_init("pf", pf_lock_grp_attr
);
235 pf_lock_attr
= lck_attr_alloc_init();
236 pf_lock
= lck_mtx_alloc_init(pf_lock_grp
, pf_lock_attr
);
238 pool_init(&pf_rule_pl
, sizeof (struct pf_rule
), 0, 0, 0, "pfrulepl",
240 pool_init(&pf_src_tree_pl
, sizeof (struct pf_src_node
), 0, 0, 0,
242 pool_init(&pf_state_pl
, sizeof (struct pf_state
), 0, 0, 0, "pfstatepl",
244 pool_init(&pf_state_key_pl
, sizeof (struct pf_state_key
), 0, 0, 0,
245 "pfstatekeypl", NULL
);
246 #ifndef NO_APPLE_EXTENSIONS
247 pool_init(&pf_app_state_pl
, sizeof (struct pf_app_state
), 0, 0, 0,
248 "pfappstatepl", NULL
);
251 pool_init(&pf_altq_pl
, sizeof (struct pf_altq
), 0, 0, 0, "pfaltqpl",
254 pool_init(&pf_pooladdr_pl
, sizeof (struct pf_pooladdr
), 0, 0, 0,
255 "pfpooladdrpl", NULL
);
258 pf_osfp_initialize();
260 pool_sethardlimit(pf_pool_limits
[PF_LIMIT_STATES
].pp
,
261 pf_pool_limits
[PF_LIMIT_STATES
].limit
, NULL
, 0);
263 if (max_mem
<= 256*1024*1024)
264 pf_pool_limits
[PF_LIMIT_TABLE_ENTRIES
].limit
=
265 PFR_KENTRY_HIWAT_SMALL
;
267 RB_INIT(&tree_src_tracking
);
268 RB_INIT(&pf_anchors
);
269 pf_init_ruleset(&pf_main_ruleset
);
270 TAILQ_INIT(&pf_pabuf
);
271 TAILQ_INIT(&state_list
);
273 TAILQ_INIT(&pf_altqs
[0]);
274 TAILQ_INIT(&pf_altqs
[1]);
275 pf_altqs_active
= &pf_altqs
[0];
276 pf_altqs_inactive
= &pf_altqs
[1];
279 /* default rule should never be garbage collected */
280 pf_default_rule
.entries
.tqe_prev
= &pf_default_rule
.entries
.tqe_next
;
281 pf_default_rule
.action
= PF_PASS
;
282 pf_default_rule
.nr
= -1;
283 pf_default_rule
.rtableid
= IFSCOPE_NONE
;
285 /* initialize default timeouts */
286 t
[PFTM_TCP_FIRST_PACKET
] = PFTM_TCP_FIRST_PACKET_VAL
;
287 t
[PFTM_TCP_OPENING
] = PFTM_TCP_OPENING_VAL
;
288 t
[PFTM_TCP_ESTABLISHED
] = PFTM_TCP_ESTABLISHED_VAL
;
289 t
[PFTM_TCP_CLOSING
] = PFTM_TCP_CLOSING_VAL
;
290 t
[PFTM_TCP_FIN_WAIT
] = PFTM_TCP_FIN_WAIT_VAL
;
291 t
[PFTM_TCP_CLOSED
] = PFTM_TCP_CLOSED_VAL
;
292 t
[PFTM_UDP_FIRST_PACKET
] = PFTM_UDP_FIRST_PACKET_VAL
;
293 t
[PFTM_UDP_SINGLE
] = PFTM_UDP_SINGLE_VAL
;
294 t
[PFTM_UDP_MULTIPLE
] = PFTM_UDP_MULTIPLE_VAL
;
295 t
[PFTM_ICMP_FIRST_PACKET
] = PFTM_ICMP_FIRST_PACKET_VAL
;
296 t
[PFTM_ICMP_ERROR_REPLY
] = PFTM_ICMP_ERROR_REPLY_VAL
;
297 #ifndef NO_APPLE_EXTENSIONS
298 t
[PFTM_GREv1_FIRST_PACKET
] = PFTM_GREv1_FIRST_PACKET_VAL
;
299 t
[PFTM_GREv1_INITIATING
] = PFTM_GREv1_INITIATING_VAL
;
300 t
[PFTM_GREv1_ESTABLISHED
] = PFTM_GREv1_ESTABLISHED_VAL
;
301 t
[PFTM_ESP_FIRST_PACKET
] = PFTM_ESP_FIRST_PACKET_VAL
;
302 t
[PFTM_ESP_INITIATING
] = PFTM_ESP_INITIATING_VAL
;
303 t
[PFTM_ESP_ESTABLISHED
] = PFTM_ESP_ESTABLISHED_VAL
;
305 t
[PFTM_OTHER_FIRST_PACKET
] = PFTM_OTHER_FIRST_PACKET_VAL
;
306 t
[PFTM_OTHER_SINGLE
] = PFTM_OTHER_SINGLE_VAL
;
307 t
[PFTM_OTHER_MULTIPLE
] = PFTM_OTHER_MULTIPLE_VAL
;
308 t
[PFTM_FRAG
] = PFTM_FRAG_VAL
;
309 t
[PFTM_INTERVAL
] = PFTM_INTERVAL_VAL
;
310 t
[PFTM_SRC_NODE
] = PFTM_SRC_NODE_VAL
;
311 t
[PFTM_TS_DIFF
] = PFTM_TS_DIFF_VAL
;
312 t
[PFTM_ADAPTIVE_START
] = PFSTATE_ADAPT_START
;
313 t
[PFTM_ADAPTIVE_END
] = PFSTATE_ADAPT_END
;
316 bzero(&pf_status
, sizeof (pf_status
));
317 pf_status
.debug
= PF_DEBUG_URGENT
;
319 /* XXX do our best to avoid a conflict */
320 pf_status
.hostid
= random();
322 if (kernel_thread_start(pf_purge_thread_fn
, NULL
,
323 &pf_purge_thread
) != 0) {
324 printf("%s: unable to start purge thread!", __func__
);
328 maj
= cdevsw_add(PF_CDEV_MAJOR
, &pf_cdevsw
);
330 printf("%s: failed to allocate major number!\n", __func__
);
333 (void) devfs_make_node(makedev(maj
, 0), DEVFS_CHAR
,
334 UID_ROOT
, GID_WHEEL
, 0600, "pf", 0);
341 struct pf_anchor
*anchor
;
342 struct pf_state
*state
;
343 struct pf_src_node
*node
;
344 struct pfioc_table pt
;
349 pf_status
.running
= 0;
350 wakeup(pf_purge_thread_fn
);
352 /* clear the rulesets */
353 for (i
= 0; i
< PF_RULESET_MAX
; i
++)
354 if (pf_begin_rules(&ticket
, i
, &r
) == 0)
355 pf_commit_rules(ticket
, i
, &r
);
357 if (pf_begin_altq(&ticket
) == 0)
358 pf_commit_altq(ticket
);
362 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
363 state
->timeout
= PFTM_PURGE
;
365 state
->sync_flags
= PFSTATE_NOSYNC
;
368 pf_purge_expired_states(pf_status
.states
);
371 pfsync_clear_states(pf_status
.hostid
, NULL
);
374 /* clear source nodes */
375 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
376 state
->src_node
= NULL
;
377 state
->nat_src_node
= NULL
;
379 RB_FOREACH(node
, pf_src_tree
, &tree_src_tracking
) {
383 pf_purge_expired_src_nodes();
386 memset(&pt
, '\0', sizeof (pt
));
387 pfr_clr_tables(&pt
.pfrio_table
, &pt
.pfrio_ndel
, pt
.pfrio_flags
);
389 /* destroy anchors */
390 while ((anchor
= RB_MIN(pf_anchor_global
, &pf_anchors
)) != NULL
) {
391 for (i
= 0; i
< PF_RULESET_MAX
; i
++)
392 if (pf_begin_rules(&ticket
, i
, anchor
->name
) == 0)
393 pf_commit_rules(ticket
, i
, anchor
->name
);
396 /* destroy main ruleset */
397 pf_remove_if_empty_ruleset(&pf_main_ruleset
);
399 /* destroy the pools */
400 pool_destroy(&pf_pooladdr_pl
);
402 pool_destroy(&pf_altq_pl
);
404 pool_destroy(&pf_state_pl
);
405 pool_destroy(&pf_rule_pl
);
406 pool_destroy(&pf_src_tree_pl
);
408 /* destroy subsystems */
409 pf_normalize_destroy();
417 pfopen(dev_t dev
, int flags
, int fmt
, struct proc
*p
)
419 #pragma unused(flags, fmt, p)
426 pfclose(dev_t dev
, int flags
, int fmt
, struct proc
*p
)
428 #pragma unused(flags, fmt, p)
434 static struct pf_pool
*
435 pf_get_pool(char *anchor
, u_int32_t ticket
, u_int8_t rule_action
,
436 u_int32_t rule_number
, u_int8_t r_last
, u_int8_t active
,
437 u_int8_t check_ticket
)
439 struct pf_ruleset
*ruleset
;
440 struct pf_rule
*rule
;
443 ruleset
= pf_find_ruleset(anchor
);
446 rs_num
= pf_get_ruleset_number(rule_action
);
447 if (rs_num
>= PF_RULESET_MAX
)
450 if (check_ticket
&& ticket
!=
451 ruleset
->rules
[rs_num
].active
.ticket
)
454 rule
= TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
,
457 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
459 if (check_ticket
&& ticket
!=
460 ruleset
->rules
[rs_num
].inactive
.ticket
)
463 rule
= TAILQ_LAST(ruleset
->rules
[rs_num
].inactive
.ptr
,
466 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].inactive
.ptr
);
469 while ((rule
!= NULL
) && (rule
->nr
!= rule_number
))
470 rule
= TAILQ_NEXT(rule
, entries
);
475 return (&rule
->rpool
);
479 pf_mv_pool(struct pf_palist
*poola
, struct pf_palist
*poolb
)
481 struct pf_pooladdr
*mv_pool_pa
;
483 while ((mv_pool_pa
= TAILQ_FIRST(poola
)) != NULL
) {
484 TAILQ_REMOVE(poola
, mv_pool_pa
, entries
);
485 TAILQ_INSERT_TAIL(poolb
, mv_pool_pa
, entries
);
490 pf_empty_pool(struct pf_palist
*poola
)
492 struct pf_pooladdr
*empty_pool_pa
;
494 while ((empty_pool_pa
= TAILQ_FIRST(poola
)) != NULL
) {
495 pfi_dynaddr_remove(&empty_pool_pa
->addr
);
496 pf_tbladdr_remove(&empty_pool_pa
->addr
);
497 pfi_kif_unref(empty_pool_pa
->kif
, PFI_KIF_REF_RULE
);
498 TAILQ_REMOVE(poola
, empty_pool_pa
, entries
);
499 pool_put(&pf_pooladdr_pl
, empty_pool_pa
);
504 pf_rm_rule(struct pf_rulequeue
*rulequeue
, struct pf_rule
*rule
)
506 if (rulequeue
!= NULL
) {
507 if (rule
->states
<= 0) {
509 * XXX - we need to remove the table *before* detaching
510 * the rule to make sure the table code does not delete
511 * the anchor under our feet.
513 pf_tbladdr_remove(&rule
->src
.addr
);
514 pf_tbladdr_remove(&rule
->dst
.addr
);
515 if (rule
->overload_tbl
)
516 pfr_detach_table(rule
->overload_tbl
);
518 TAILQ_REMOVE(rulequeue
, rule
, entries
);
519 rule
->entries
.tqe_prev
= NULL
;
523 if (rule
->states
> 0 || rule
->src_nodes
> 0 ||
524 rule
->entries
.tqe_prev
!= NULL
)
526 pf_tag_unref(rule
->tag
);
527 pf_tag_unref(rule
->match_tag
);
529 if (rule
->pqid
!= rule
->qid
)
530 pf_qid_unref(rule
->pqid
);
531 pf_qid_unref(rule
->qid
);
533 pf_rtlabel_remove(&rule
->src
.addr
);
534 pf_rtlabel_remove(&rule
->dst
.addr
);
535 pfi_dynaddr_remove(&rule
->src
.addr
);
536 pfi_dynaddr_remove(&rule
->dst
.addr
);
537 if (rulequeue
== NULL
) {
538 pf_tbladdr_remove(&rule
->src
.addr
);
539 pf_tbladdr_remove(&rule
->dst
.addr
);
540 if (rule
->overload_tbl
)
541 pfr_detach_table(rule
->overload_tbl
);
543 pfi_kif_unref(rule
->kif
, PFI_KIF_REF_RULE
);
544 pf_anchor_remove(rule
);
545 pf_empty_pool(&rule
->rpool
.list
);
546 pool_put(&pf_rule_pl
, rule
);
550 tagname2tag(struct pf_tags
*head
, char *tagname
)
552 struct pf_tagname
*tag
, *p
= NULL
;
553 u_int16_t new_tagid
= 1;
555 TAILQ_FOREACH(tag
, head
, entries
)
556 if (strcmp(tagname
, tag
->name
) == 0) {
562 * to avoid fragmentation, we do a linear search from the beginning
563 * and take the first free slot we find. if there is none or the list
564 * is empty, append a new entry at the end.
568 if (!TAILQ_EMPTY(head
))
569 for (p
= TAILQ_FIRST(head
); p
!= NULL
&&
570 p
->tag
== new_tagid
; p
= TAILQ_NEXT(p
, entries
))
571 new_tagid
= p
->tag
+ 1;
573 if (new_tagid
> TAGID_MAX
)
576 /* allocate and fill new struct pf_tagname */
577 tag
= _MALLOC(sizeof (*tag
), M_TEMP
, M_WAITOK
|M_ZERO
);
580 strlcpy(tag
->name
, tagname
, sizeof (tag
->name
));
581 tag
->tag
= new_tagid
;
584 if (p
!= NULL
) /* insert new entry before p */
585 TAILQ_INSERT_BEFORE(p
, tag
, entries
);
586 else /* either list empty or no free slot in between */
587 TAILQ_INSERT_TAIL(head
, tag
, entries
);
593 tag2tagname(struct pf_tags
*head
, u_int16_t tagid
, char *p
)
595 struct pf_tagname
*tag
;
597 TAILQ_FOREACH(tag
, head
, entries
)
598 if (tag
->tag
== tagid
) {
599 strlcpy(p
, tag
->name
, PF_TAG_NAME_SIZE
);
605 tag_unref(struct pf_tags
*head
, u_int16_t tag
)
607 struct pf_tagname
*p
, *next
;
612 for (p
= TAILQ_FIRST(head
); p
!= NULL
; p
= next
) {
613 next
= TAILQ_NEXT(p
, entries
);
616 TAILQ_REMOVE(head
, p
, entries
);
625 pf_tagname2tag(char *tagname
)
627 return (tagname2tag(&pf_tags
, tagname
));
631 pf_tag2tagname(u_int16_t tagid
, char *p
)
633 tag2tagname(&pf_tags
, tagid
, p
);
637 pf_tag_ref(u_int16_t tag
)
639 struct pf_tagname
*t
;
641 TAILQ_FOREACH(t
, &pf_tags
, entries
)
649 pf_tag_unref(u_int16_t tag
)
651 tag_unref(&pf_tags
, tag
);
655 pf_rtlabel_add(struct pf_addr_wrap
*a
)
662 pf_rtlabel_remove(struct pf_addr_wrap
*a
)
668 pf_rtlabel_copyout(struct pf_addr_wrap
*a
)
675 pf_qname2qid(char *qname
)
677 return ((u_int32_t
)tagname2tag(&pf_qids
, qname
));
681 pf_qid2qname(u_int32_t qid
, char *p
)
683 tag2tagname(&pf_qids
, (u_int16_t
)qid
, p
);
687 pf_qid_unref(u_int32_t qid
)
689 tag_unref(&pf_qids
, (u_int16_t
)qid
);
693 pf_begin_altq(u_int32_t
*ticket
)
695 struct pf_altq
*altq
;
698 /* Purge the old altq list */
699 while ((altq
= TAILQ_FIRST(pf_altqs_inactive
)) != NULL
) {
700 TAILQ_REMOVE(pf_altqs_inactive
, altq
, entries
);
701 if (altq
->qname
[0] == 0) {
702 /* detach and destroy the discipline */
703 error
= altq_remove(altq
);
705 pf_qid_unref(altq
->qid
);
706 pool_put(&pf_altq_pl
, altq
);
710 *ticket
= ++ticket_altqs_inactive
;
711 altqs_inactive_open
= 1;
716 pf_rollback_altq(u_int32_t ticket
)
718 struct pf_altq
*altq
;
721 if (!altqs_inactive_open
|| ticket
!= ticket_altqs_inactive
)
723 /* Purge the old altq list */
724 while ((altq
= TAILQ_FIRST(pf_altqs_inactive
)) != NULL
) {
725 TAILQ_REMOVE(pf_altqs_inactive
, altq
, entries
);
726 if (altq
->qname
[0] == 0) {
727 /* detach and destroy the discipline */
728 error
= altq_remove(altq
);
730 pf_qid_unref(altq
->qid
);
731 pool_put(&pf_altq_pl
, altq
);
733 altqs_inactive_open
= 0;
738 pf_commit_altq(u_int32_t ticket
)
740 struct pf_altqqueue
*old_altqs
;
741 struct pf_altq
*altq
;
742 int s
, err
, error
= 0;
744 if (!altqs_inactive_open
|| ticket
!= ticket_altqs_inactive
)
747 /* swap altqs, keep the old. */
749 old_altqs
= pf_altqs_active
;
750 pf_altqs_active
= pf_altqs_inactive
;
751 pf_altqs_inactive
= old_altqs
;
752 ticket_altqs_active
= ticket_altqs_inactive
;
754 /* Attach new disciplines */
755 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
) {
756 if (altq
->qname
[0] == 0) {
757 /* attach the discipline */
758 error
= altq_pfattach(altq
);
759 if (error
== 0 && pf_altq_running
)
760 error
= pf_enable_altq(altq
);
768 /* Purge the old altq list */
769 while ((altq
= TAILQ_FIRST(pf_altqs_inactive
)) != NULL
) {
770 TAILQ_REMOVE(pf_altqs_inactive
, altq
, entries
);
771 if (altq
->qname
[0] == 0) {
772 /* detach and destroy the discipline */
774 error
= pf_disable_altq(altq
);
775 err
= altq_pfdetach(altq
);
776 if (err
!= 0 && error
== 0)
778 err
= altq_remove(altq
);
779 if (err
!= 0 && error
== 0)
782 pf_qid_unref(altq
->qid
);
783 pool_put(&pf_altq_pl
, altq
);
787 altqs_inactive_open
= 0;
792 pf_enable_altq(struct pf_altq
*altq
)
795 struct tb_profile tb
;
798 if ((ifp
= ifunit(altq
->ifname
)) == NULL
)
801 if (ifp
->if_snd
.altq_type
!= ALTQT_NONE
)
802 error
= altq_enable(&ifp
->if_snd
);
804 /* set tokenbucket regulator */
805 if (error
== 0 && ifp
!= NULL
&& ALTQ_IS_ENABLED(&ifp
->if_snd
)) {
806 tb
.rate
= altq
->ifbandwidth
;
807 tb
.depth
= altq
->tbrsize
;
809 error
= tbr_set(&ifp
->if_snd
, &tb
);
817 pf_disable_altq(struct pf_altq
*altq
)
820 struct tb_profile tb
;
823 if ((ifp
= ifunit(altq
->ifname
)) == NULL
)
827 * when the discipline is no longer referenced, it was overridden
828 * by a new one. if so, just return.
830 if (altq
->altq_disc
!= ifp
->if_snd
.altq_disc
)
833 error
= altq_disable(&ifp
->if_snd
);
836 /* clear tokenbucket regulator */
839 error
= tbr_set(&ifp
->if_snd
, &tb
);
848 pf_begin_rules(u_int32_t
*ticket
, int rs_num
, const char *anchor
)
850 struct pf_ruleset
*rs
;
851 struct pf_rule
*rule
;
853 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
855 rs
= pf_find_or_create_ruleset(anchor
);
858 while ((rule
= TAILQ_FIRST(rs
->rules
[rs_num
].inactive
.ptr
)) != NULL
) {
859 pf_rm_rule(rs
->rules
[rs_num
].inactive
.ptr
, rule
);
860 rs
->rules
[rs_num
].inactive
.rcount
--;
862 *ticket
= ++rs
->rules
[rs_num
].inactive
.ticket
;
863 rs
->rules
[rs_num
].inactive
.open
= 1;
868 pf_rollback_rules(u_int32_t ticket
, int rs_num
, char *anchor
)
870 struct pf_ruleset
*rs
;
871 struct pf_rule
*rule
;
873 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
875 rs
= pf_find_ruleset(anchor
);
876 if (rs
== NULL
|| !rs
->rules
[rs_num
].inactive
.open
||
877 rs
->rules
[rs_num
].inactive
.ticket
!= ticket
)
879 while ((rule
= TAILQ_FIRST(rs
->rules
[rs_num
].inactive
.ptr
)) != NULL
) {
880 pf_rm_rule(rs
->rules
[rs_num
].inactive
.ptr
, rule
);
881 rs
->rules
[rs_num
].inactive
.rcount
--;
883 rs
->rules
[rs_num
].inactive
.open
= 0;
887 #define PF_MD5_UPD(st, elm) \
888 MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm))
890 #define PF_MD5_UPD_STR(st, elm) \
891 MD5Update(ctx, (u_int8_t *)(st)->elm, strlen((st)->elm))
893 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
894 (stor) = htonl((st)->elm); \
895 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t)); \
898 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
899 (stor) = htons((st)->elm); \
900 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t)); \
903 #ifndef NO_APPLE_EXTENSIONS
905 pf_hash_rule_addr(MD5_CTX
*ctx
, struct pf_rule_addr
*pfr
, u_int8_t proto
)
908 pf_hash_rule_addr(MD5_CTX
*ctx
, struct pf_rule_addr
*pfr
)
911 PF_MD5_UPD(pfr
, addr
.type
);
912 switch (pfr
->addr
.type
) {
913 case PF_ADDR_DYNIFTL
:
914 PF_MD5_UPD(pfr
, addr
.v
.ifname
);
915 PF_MD5_UPD(pfr
, addr
.iflags
);
918 PF_MD5_UPD(pfr
, addr
.v
.tblname
);
920 case PF_ADDR_ADDRMASK
:
922 PF_MD5_UPD(pfr
, addr
.v
.a
.addr
.addr32
);
923 PF_MD5_UPD(pfr
, addr
.v
.a
.mask
.addr32
);
925 case PF_ADDR_RTLABEL
:
926 PF_MD5_UPD(pfr
, addr
.v
.rtlabelname
);
930 #ifndef NO_APPLE_EXTENSIONS
934 PF_MD5_UPD(pfr
, xport
.range
.port
[0]);
935 PF_MD5_UPD(pfr
, xport
.range
.port
[1]);
936 PF_MD5_UPD(pfr
, xport
.range
.op
);
943 PF_MD5_UPD(pfr
, neg
);
945 PF_MD5_UPD(pfr
, port
[0]);
946 PF_MD5_UPD(pfr
, port
[1]);
947 PF_MD5_UPD(pfr
, neg
);
948 PF_MD5_UPD(pfr
, port_op
);
953 pf_hash_rule(MD5_CTX
*ctx
, struct pf_rule
*rule
)
958 #ifndef NO_APPLE_EXTENSIONS
959 pf_hash_rule_addr(ctx
, &rule
->src
, rule
->proto
);
960 pf_hash_rule_addr(ctx
, &rule
->dst
, rule
->proto
);
962 pf_hash_rule_addr(ctx
, &rule
->src
);
963 pf_hash_rule_addr(ctx
, &rule
->dst
);
965 PF_MD5_UPD_STR(rule
, label
);
966 PF_MD5_UPD_STR(rule
, ifname
);
967 PF_MD5_UPD_STR(rule
, match_tagname
);
968 PF_MD5_UPD_HTONS(rule
, match_tag
, x
); /* dup? */
969 PF_MD5_UPD_HTONL(rule
, os_fingerprint
, y
);
970 PF_MD5_UPD_HTONL(rule
, prob
, y
);
971 PF_MD5_UPD_HTONL(rule
, uid
.uid
[0], y
);
972 PF_MD5_UPD_HTONL(rule
, uid
.uid
[1], y
);
973 PF_MD5_UPD(rule
, uid
.op
);
974 PF_MD5_UPD_HTONL(rule
, gid
.gid
[0], y
);
975 PF_MD5_UPD_HTONL(rule
, gid
.gid
[1], y
);
976 PF_MD5_UPD(rule
, gid
.op
);
977 PF_MD5_UPD_HTONL(rule
, rule_flag
, y
);
978 PF_MD5_UPD(rule
, action
);
979 PF_MD5_UPD(rule
, direction
);
980 PF_MD5_UPD(rule
, af
);
981 PF_MD5_UPD(rule
, quick
);
982 PF_MD5_UPD(rule
, ifnot
);
983 PF_MD5_UPD(rule
, match_tag_not
);
984 PF_MD5_UPD(rule
, natpass
);
985 PF_MD5_UPD(rule
, keep_state
);
986 PF_MD5_UPD(rule
, proto
);
987 PF_MD5_UPD(rule
, type
);
988 PF_MD5_UPD(rule
, code
);
989 PF_MD5_UPD(rule
, flags
);
990 PF_MD5_UPD(rule
, flagset
);
991 PF_MD5_UPD(rule
, allow_opts
);
992 PF_MD5_UPD(rule
, rt
);
993 PF_MD5_UPD(rule
, tos
);
997 pf_commit_rules(u_int32_t ticket
, int rs_num
, char *anchor
)
999 struct pf_ruleset
*rs
;
1000 struct pf_rule
*rule
, **old_array
;
1001 struct pf_rulequeue
*old_rules
;
1003 u_int32_t old_rcount
;
1005 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1007 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
1009 rs
= pf_find_ruleset(anchor
);
1010 if (rs
== NULL
|| !rs
->rules
[rs_num
].inactive
.open
||
1011 ticket
!= rs
->rules
[rs_num
].inactive
.ticket
)
1014 /* Calculate checksum for the main ruleset */
1015 if (rs
== &pf_main_ruleset
) {
1016 error
= pf_setup_pfsync_matching(rs
);
1021 /* Swap rules, keep the old. */
1022 old_rules
= rs
->rules
[rs_num
].active
.ptr
;
1023 old_rcount
= rs
->rules
[rs_num
].active
.rcount
;
1024 old_array
= rs
->rules
[rs_num
].active
.ptr_array
;
1026 rs
->rules
[rs_num
].active
.ptr
=
1027 rs
->rules
[rs_num
].inactive
.ptr
;
1028 rs
->rules
[rs_num
].active
.ptr_array
=
1029 rs
->rules
[rs_num
].inactive
.ptr_array
;
1030 rs
->rules
[rs_num
].active
.rcount
=
1031 rs
->rules
[rs_num
].inactive
.rcount
;
1032 rs
->rules
[rs_num
].inactive
.ptr
= old_rules
;
1033 rs
->rules
[rs_num
].inactive
.ptr_array
= old_array
;
1034 rs
->rules
[rs_num
].inactive
.rcount
= old_rcount
;
1036 rs
->rules
[rs_num
].active
.ticket
=
1037 rs
->rules
[rs_num
].inactive
.ticket
;
1038 pf_calc_skip_steps(rs
->rules
[rs_num
].active
.ptr
);
1041 /* Purge the old rule list. */
1042 while ((rule
= TAILQ_FIRST(old_rules
)) != NULL
)
1043 pf_rm_rule(old_rules
, rule
);
1044 if (rs
->rules
[rs_num
].inactive
.ptr_array
)
1045 _FREE(rs
->rules
[rs_num
].inactive
.ptr_array
, M_TEMP
);
1046 rs
->rules
[rs_num
].inactive
.ptr_array
= NULL
;
1047 rs
->rules
[rs_num
].inactive
.rcount
= 0;
1048 rs
->rules
[rs_num
].inactive
.open
= 0;
1049 pf_remove_if_empty_ruleset(rs
);
1054 pf_state_export(struct pfsync_state
*sp
, struct pf_state_key
*sk
,
1057 uint64_t secs
= pf_time_second();
1058 bzero(sp
, sizeof (struct pfsync_state
));
1060 /* copy from state key */
1061 #ifndef NO_APPLE_EXTENSIONS
1062 sp
->lan
.addr
= sk
->lan
.addr
;
1063 sp
->lan
.xport
= sk
->lan
.xport
;
1064 sp
->gwy
.addr
= sk
->gwy
.addr
;
1065 sp
->gwy
.xport
= sk
->gwy
.xport
;
1066 sp
->ext
.addr
= sk
->ext
.addr
;
1067 sp
->ext
.xport
= sk
->ext
.xport
;
1068 sp
->proto_variant
= sk
->proto_variant
;
1071 sp
->lan
.addr
= sk
->lan
.addr
;
1072 sp
->lan
.port
= sk
->lan
.port
;
1073 sp
->gwy
.addr
= sk
->gwy
.addr
;
1074 sp
->gwy
.port
= sk
->gwy
.port
;
1075 sp
->ext
.addr
= sk
->ext
.addr
;
1076 sp
->ext
.port
= sk
->ext
.port
;
1078 sp
->proto
= sk
->proto
;
1080 sp
->direction
= sk
->direction
;
1082 /* copy from state */
1083 memcpy(&sp
->id
, &s
->id
, sizeof (sp
->id
));
1084 sp
->creatorid
= s
->creatorid
;
1085 strlcpy(sp
->ifname
, s
->kif
->pfik_name
, sizeof (sp
->ifname
));
1086 pf_state_peer_to_pfsync(&s
->src
, &sp
->src
);
1087 pf_state_peer_to_pfsync(&s
->dst
, &sp
->dst
);
1089 sp
->rule
= s
->rule
.ptr
->nr
;
1090 sp
->nat_rule
= (s
->nat_rule
.ptr
== NULL
) ?
1091 (unsigned)-1 : s
->nat_rule
.ptr
->nr
;
1092 sp
->anchor
= (s
->anchor
.ptr
== NULL
) ?
1093 (unsigned)-1 : s
->anchor
.ptr
->nr
;
1095 pf_state_counter_to_pfsync(s
->bytes
[0], sp
->bytes
[0]);
1096 pf_state_counter_to_pfsync(s
->bytes
[1], sp
->bytes
[1]);
1097 pf_state_counter_to_pfsync(s
->packets
[0], sp
->packets
[0]);
1098 pf_state_counter_to_pfsync(s
->packets
[1], sp
->packets
[1]);
1099 sp
->creation
= secs
- s
->creation
;
1100 sp
->expire
= pf_state_expires(s
);
1102 sp
->allow_opts
= s
->allow_opts
;
1103 sp
->timeout
= s
->timeout
;
1106 sp
->sync_flags
|= PFSYNC_FLAG_SRCNODE
;
1107 if (s
->nat_src_node
)
1108 sp
->sync_flags
|= PFSYNC_FLAG_NATSRCNODE
;
1110 if (sp
->expire
> secs
)
1118 pf_state_import(struct pfsync_state
*sp
, struct pf_state_key
*sk
,
1121 /* copy to state key */
1122 #ifndef NO_APPLE_EXTENSIONS
1123 sk
->lan
.addr
= sp
->lan
.addr
;
1124 sk
->lan
.xport
= sp
->lan
.xport
;
1125 sk
->gwy
.addr
= sp
->gwy
.addr
;
1126 sk
->gwy
.xport
= sp
->gwy
.xport
;
1127 sk
->ext
.addr
= sp
->ext
.addr
;
1128 sk
->ext
.xport
= sp
->ext
.xport
;
1129 sk
->proto_variant
= sp
->proto_variant
;
1132 sk
->lan
.addr
= sp
->lan
.addr
;
1133 sk
->lan
.port
= sp
->lan
.port
;
1134 sk
->gwy
.addr
= sp
->gwy
.addr
;
1135 sk
->gwy
.port
= sp
->gwy
.port
;
1136 sk
->ext
.addr
= sp
->ext
.addr
;
1137 sk
->ext
.port
= sp
->ext
.port
;
1139 sk
->proto
= sp
->proto
;
1141 sk
->direction
= sp
->direction
;
1144 memcpy(&s
->id
, &sp
->id
, sizeof (sp
->id
));
1145 s
->creatorid
= sp
->creatorid
;
1146 pf_state_peer_from_pfsync(&sp
->src
, &s
->src
);
1147 pf_state_peer_from_pfsync(&sp
->dst
, &s
->dst
);
1149 s
->rule
.ptr
= &pf_default_rule
;
1150 s
->nat_rule
.ptr
= NULL
;
1151 s
->anchor
.ptr
= NULL
;
1153 s
->creation
= pf_time_second();
1154 s
->expire
= pf_time_second();
1156 s
->expire
-= pf_default_rule
.timeout
[sp
->timeout
] - sp
->expire
;
1158 s
->packets
[0] = s
->packets
[1] = 0;
1159 s
->bytes
[0] = s
->bytes
[1] = 0;
1163 pf_setup_pfsync_matching(struct pf_ruleset
*rs
)
1166 struct pf_rule
*rule
;
1168 u_int8_t digest
[PF_MD5_DIGEST_LENGTH
];
1171 for (rs_cnt
= 0; rs_cnt
< PF_RULESET_MAX
; rs_cnt
++) {
1172 /* XXX PF_RULESET_SCRUB as well? */
1173 if (rs_cnt
== PF_RULESET_SCRUB
)
1176 if (rs
->rules
[rs_cnt
].inactive
.ptr_array
)
1177 _FREE(rs
->rules
[rs_cnt
].inactive
.ptr_array
, M_TEMP
);
1178 rs
->rules
[rs_cnt
].inactive
.ptr_array
= NULL
;
1180 if (rs
->rules
[rs_cnt
].inactive
.rcount
) {
1181 rs
->rules
[rs_cnt
].inactive
.ptr_array
=
1182 _MALLOC(sizeof (caddr_t
) *
1183 rs
->rules
[rs_cnt
].inactive
.rcount
,
1186 if (!rs
->rules
[rs_cnt
].inactive
.ptr_array
)
1190 TAILQ_FOREACH(rule
, rs
->rules
[rs_cnt
].inactive
.ptr
,
1192 pf_hash_rule(&ctx
, rule
);
1193 (rs
->rules
[rs_cnt
].inactive
.ptr_array
)[rule
->nr
] = rule
;
1197 MD5Final(digest
, &ctx
);
1198 memcpy(pf_status
.pf_chksum
, digest
, sizeof (pf_status
.pf_chksum
));
1203 pfioctl(dev_t dev
, u_long cmd
, caddr_t addr
, int flags
, struct proc
*p
)
1206 struct pf_pooladdr
*pa
= NULL
;
1207 struct pf_pool
*pool
= NULL
;
1210 if (kauth_cred_issuser(kauth_cred_get()) == 0)
1213 /* XXX keep in sync with switch() below */
1214 if (securelevel
> 1)
1221 case DIOCSETSTATUSIF
:
1227 case DIOCGETTIMEOUT
:
1228 case DIOCCLRRULECTRS
:
1233 case DIOCGETRULESETS
:
1234 case DIOCGETRULESET
:
1235 case DIOCRGETTABLES
:
1236 case DIOCRGETTSTATS
:
1237 case DIOCRCLRTSTATS
:
1243 case DIOCRGETASTATS
:
1244 case DIOCRCLRASTATS
:
1247 case DIOCGETSRCNODES
:
1248 case DIOCCLRSRCNODES
:
1249 case DIOCIGETIFACES
:
1253 case DIOCRCLRTABLES
:
1254 case DIOCRADDTABLES
:
1255 case DIOCRDELTABLES
:
1256 case DIOCRSETTFLAGS
:
1257 if (((struct pfioc_table
*)addr
)->pfrio_flags
&
1259 break; /* dummy operation ok */
1265 if (!(flags
& FWRITE
))
1275 case DIOCGETTIMEOUT
:
1280 case DIOCGETRULESETS
:
1281 case DIOCGETRULESET
:
1283 case DIOCRGETTABLES
:
1284 case DIOCRGETTSTATS
:
1286 case DIOCRGETASTATS
:
1289 case DIOCGETSRCNODES
:
1290 case DIOCIGETIFACES
:
1292 case DIOCRCLRTABLES
:
1293 case DIOCRADDTABLES
:
1294 case DIOCRDELTABLES
:
1295 case DIOCRCLRTSTATS
:
1300 case DIOCRSETTFLAGS
:
1301 if (((struct pfioc_table
*)addr
)->pfrio_flags
&
1303 flags
|= FWRITE
; /* need write lock for dummy */
1304 break; /* dummy operation ok */
1308 if (((struct pfioc_rule
*)addr
)->action
==
1317 lck_rw_lock_exclusive(pf_perim_lock
);
1319 lck_rw_lock_shared(pf_perim_lock
);
1321 lck_mtx_lock(pf_lock
);
1326 if (pf_status
.running
) {
1328 } else if (pf_purge_thread
== NULL
) {
1331 pf_status
.running
= 1;
1332 pf_status
.since
= pf_calendar_time_second();
1333 if (pf_status
.stateid
== 0) {
1334 pf_status
.stateid
= pf_time_second();
1335 pf_status
.stateid
= pf_status
.stateid
<< 32;
1337 mbuf_growth_aggressive();
1339 wakeup(pf_purge_thread_fn
);
1340 DPFPRINTF(PF_DEBUG_MISC
, ("pf: started\n"));
1345 if (!pf_status
.running
) {
1348 mbuf_growth_normal();
1350 pf_status
.running
= 0;
1351 pf_status
.since
= pf_calendar_time_second();
1352 wakeup(pf_purge_thread_fn
);
1353 DPFPRINTF(PF_DEBUG_MISC
, ("pf: stopped\n"));
1358 struct pfioc_rule
*pr
= (struct pfioc_rule
*)addr
;
1359 struct pf_ruleset
*ruleset
;
1360 struct pf_rule
*rule
, *tail
;
1361 struct pf_pooladdr
*apa
;
1364 pr
->anchor
[sizeof (pr
->anchor
) - 1] = 0;
1365 ruleset
= pf_find_ruleset(pr
->anchor
);
1366 if (ruleset
== NULL
) {
1370 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
1371 if (rs_num
>= PF_RULESET_MAX
) {
1375 if (pr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
1379 if (pr
->ticket
!= ruleset
->rules
[rs_num
].inactive
.ticket
) {
1383 if (pr
->pool_ticket
!= ticket_pabuf
) {
1387 rule
= pool_get(&pf_rule_pl
, PR_WAITOK
);
1392 bcopy(&pr
->rule
, rule
, sizeof (struct pf_rule
));
1393 rule
->cuid
= kauth_cred_getuid(p
->p_ucred
);
1394 rule
->cpid
= p
->p_pid
;
1395 rule
->anchor
= NULL
;
1397 TAILQ_INIT(&rule
->rpool
.list
);
1398 /* initialize refcounting */
1400 rule
->src_nodes
= 0;
1401 rule
->entries
.tqe_prev
= NULL
;
1403 if (rule
->af
== AF_INET
) {
1404 pool_put(&pf_rule_pl
, rule
);
1405 error
= EAFNOSUPPORT
;
1410 if (rule
->af
== AF_INET6
) {
1411 pool_put(&pf_rule_pl
, rule
);
1412 error
= EAFNOSUPPORT
;
1416 tail
= TAILQ_LAST(ruleset
->rules
[rs_num
].inactive
.ptr
,
1419 rule
->nr
= tail
->nr
+ 1;
1422 if (rule
->ifname
[0]) {
1423 rule
->kif
= pfi_kif_get(rule
->ifname
);
1424 if (rule
->kif
== NULL
) {
1425 pool_put(&pf_rule_pl
, rule
);
1429 pfi_kif_ref(rule
->kif
, PFI_KIF_REF_RULE
);
1434 if (rule
->qname
[0] != 0) {
1435 if ((rule
->qid
= pf_qname2qid(rule
->qname
)) == 0)
1437 else if (rule
->pqname
[0] != 0) {
1439 pf_qname2qid(rule
->pqname
)) == 0)
1442 rule
->pqid
= rule
->qid
;
1445 if (rule
->tagname
[0])
1446 if ((rule
->tag
= pf_tagname2tag(rule
->tagname
)) == 0)
1448 if (rule
->match_tagname
[0])
1449 if ((rule
->match_tag
=
1450 pf_tagname2tag(rule
->match_tagname
)) == 0)
1452 if (rule
->rt
&& !rule
->direction
)
1457 if (rule
->logif
>= PFLOGIFS_MAX
)
1460 if (pf_rtlabel_add(&rule
->src
.addr
) ||
1461 pf_rtlabel_add(&rule
->dst
.addr
))
1463 if (pfi_dynaddr_setup(&rule
->src
.addr
, rule
->af
))
1465 if (pfi_dynaddr_setup(&rule
->dst
.addr
, rule
->af
))
1467 if (pf_tbladdr_setup(ruleset
, &rule
->src
.addr
))
1469 if (pf_tbladdr_setup(ruleset
, &rule
->dst
.addr
))
1471 if (pf_anchor_setup(rule
, ruleset
, pr
->anchor_call
))
1473 TAILQ_FOREACH(apa
, &pf_pabuf
, entries
)
1474 if (pf_tbladdr_setup(ruleset
, &apa
->addr
))
1477 if (rule
->overload_tblname
[0]) {
1478 if ((rule
->overload_tbl
= pfr_attach_table(ruleset
,
1479 rule
->overload_tblname
)) == NULL
)
1482 rule
->overload_tbl
->pfrkt_flags
|=
1486 pf_mv_pool(&pf_pabuf
, &rule
->rpool
.list
);
1487 if (((((rule
->action
== PF_NAT
) || (rule
->action
== PF_RDR
) ||
1488 (rule
->action
== PF_BINAT
)) && rule
->anchor
== NULL
) ||
1489 (rule
->rt
> PF_FASTROUTE
)) &&
1490 (TAILQ_FIRST(&rule
->rpool
.list
) == NULL
))
1494 pf_rm_rule(NULL
, rule
);
1497 rule
->rpool
.cur
= TAILQ_FIRST(&rule
->rpool
.list
);
1498 rule
->evaluations
= rule
->packets
[0] = rule
->packets
[1] =
1499 rule
->bytes
[0] = rule
->bytes
[1] = 0;
1500 TAILQ_INSERT_TAIL(ruleset
->rules
[rs_num
].inactive
.ptr
,
1502 ruleset
->rules
[rs_num
].inactive
.rcount
++;
1506 case DIOCGETRULES
: {
1507 struct pfioc_rule
*pr
= (struct pfioc_rule
*)addr
;
1508 struct pf_ruleset
*ruleset
;
1509 struct pf_rule
*tail
;
1512 pr
->anchor
[sizeof (pr
->anchor
) - 1] = 0;
1513 ruleset
= pf_find_ruleset(pr
->anchor
);
1514 if (ruleset
== NULL
) {
1518 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
1519 if (rs_num
>= PF_RULESET_MAX
) {
1523 tail
= TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
,
1526 pr
->nr
= tail
->nr
+ 1;
1529 pr
->ticket
= ruleset
->rules
[rs_num
].active
.ticket
;
1534 struct pfioc_rule
*pr
= (struct pfioc_rule
*)addr
;
1535 struct pf_ruleset
*ruleset
;
1536 struct pf_rule
*rule
;
1539 pr
->anchor
[sizeof (pr
->anchor
) - 1] = 0;
1540 ruleset
= pf_find_ruleset(pr
->anchor
);
1541 if (ruleset
== NULL
) {
1545 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
1546 if (rs_num
>= PF_RULESET_MAX
) {
1550 if (pr
->ticket
!= ruleset
->rules
[rs_num
].active
.ticket
) {
1554 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
1555 while ((rule
!= NULL
) && (rule
->nr
!= pr
->nr
))
1556 rule
= TAILQ_NEXT(rule
, entries
);
1561 bcopy(rule
, &pr
->rule
, sizeof (struct pf_rule
));
1562 if (pf_anchor_copyout(ruleset
, rule
, pr
)) {
1566 pfi_dynaddr_copyout(&pr
->rule
.src
.addr
);
1567 pfi_dynaddr_copyout(&pr
->rule
.dst
.addr
);
1568 pf_tbladdr_copyout(&pr
->rule
.src
.addr
);
1569 pf_tbladdr_copyout(&pr
->rule
.dst
.addr
);
1570 pf_rtlabel_copyout(&pr
->rule
.src
.addr
);
1571 pf_rtlabel_copyout(&pr
->rule
.dst
.addr
);
1572 for (i
= 0; i
< PF_SKIP_COUNT
; ++i
)
1573 if (rule
->skip
[i
].ptr
== NULL
)
1574 pr
->rule
.skip
[i
].nr
= -1;
1576 pr
->rule
.skip
[i
].nr
=
1577 rule
->skip
[i
].ptr
->nr
;
1579 if (pr
->action
== PF_GET_CLR_CNTR
) {
1580 rule
->evaluations
= 0;
1581 rule
->packets
[0] = rule
->packets
[1] = 0;
1582 rule
->bytes
[0] = rule
->bytes
[1] = 0;
1587 case DIOCCHANGERULE
: {
1588 struct pfioc_rule
*pcr
= (struct pfioc_rule
*)addr
;
1589 struct pf_ruleset
*ruleset
;
1590 struct pf_rule
*oldrule
= NULL
, *newrule
= NULL
;
1594 if (!(pcr
->action
== PF_CHANGE_REMOVE
||
1595 pcr
->action
== PF_CHANGE_GET_TICKET
) &&
1596 pcr
->pool_ticket
!= ticket_pabuf
) {
1601 if (pcr
->action
< PF_CHANGE_ADD_HEAD
||
1602 pcr
->action
> PF_CHANGE_GET_TICKET
) {
1606 ruleset
= pf_find_ruleset(pcr
->anchor
);
1607 if (ruleset
== NULL
) {
1611 rs_num
= pf_get_ruleset_number(pcr
->rule
.action
);
1612 if (rs_num
>= PF_RULESET_MAX
) {
1617 if (pcr
->action
== PF_CHANGE_GET_TICKET
) {
1618 pcr
->ticket
= ++ruleset
->rules
[rs_num
].active
.ticket
;
1622 ruleset
->rules
[rs_num
].active
.ticket
) {
1626 if (pcr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
1632 if (pcr
->action
!= PF_CHANGE_REMOVE
) {
1633 newrule
= pool_get(&pf_rule_pl
, PR_WAITOK
);
1634 if (newrule
== NULL
) {
1638 bcopy(&pcr
->rule
, newrule
, sizeof (struct pf_rule
));
1639 newrule
->cuid
= kauth_cred_getuid(p
->p_ucred
);
1640 newrule
->cpid
= p
->p_pid
;
1641 TAILQ_INIT(&newrule
->rpool
.list
);
1642 /* initialize refcounting */
1643 newrule
->states
= 0;
1644 newrule
->entries
.tqe_prev
= NULL
;
1646 if (newrule
->af
== AF_INET
) {
1647 pool_put(&pf_rule_pl
, newrule
);
1648 error
= EAFNOSUPPORT
;
1653 if (newrule
->af
== AF_INET6
) {
1654 pool_put(&pf_rule_pl
, newrule
);
1655 error
= EAFNOSUPPORT
;
1659 if (newrule
->ifname
[0]) {
1660 newrule
->kif
= pfi_kif_get(newrule
->ifname
);
1661 if (newrule
->kif
== NULL
) {
1662 pool_put(&pf_rule_pl
, newrule
);
1666 pfi_kif_ref(newrule
->kif
, PFI_KIF_REF_RULE
);
1668 newrule
->kif
= NULL
;
1672 if (newrule
->qname
[0] != 0) {
1674 pf_qname2qid(newrule
->qname
)) == 0)
1676 else if (newrule
->pqname
[0] != 0) {
1677 if ((newrule
->pqid
=
1678 pf_qname2qid(newrule
->pqname
)) == 0)
1681 newrule
->pqid
= newrule
->qid
;
1684 if (newrule
->tagname
[0])
1686 pf_tagname2tag(newrule
->tagname
)) == 0)
1688 if (newrule
->match_tagname
[0])
1689 if ((newrule
->match_tag
= pf_tagname2tag(
1690 newrule
->match_tagname
)) == 0)
1692 if (newrule
->rt
&& !newrule
->direction
)
1697 if (newrule
->logif
>= PFLOGIFS_MAX
)
1700 if (pf_rtlabel_add(&newrule
->src
.addr
) ||
1701 pf_rtlabel_add(&newrule
->dst
.addr
))
1703 if (pfi_dynaddr_setup(&newrule
->src
.addr
, newrule
->af
))
1705 if (pfi_dynaddr_setup(&newrule
->dst
.addr
, newrule
->af
))
1707 if (pf_tbladdr_setup(ruleset
, &newrule
->src
.addr
))
1709 if (pf_tbladdr_setup(ruleset
, &newrule
->dst
.addr
))
1711 if (pf_anchor_setup(newrule
, ruleset
, pcr
->anchor_call
))
1713 TAILQ_FOREACH(pa
, &pf_pabuf
, entries
)
1714 if (pf_tbladdr_setup(ruleset
, &pa
->addr
))
1717 if (newrule
->overload_tblname
[0]) {
1718 if ((newrule
->overload_tbl
= pfr_attach_table(
1719 ruleset
, newrule
->overload_tblname
)) ==
1723 newrule
->overload_tbl
->pfrkt_flags
|=
1727 pf_mv_pool(&pf_pabuf
, &newrule
->rpool
.list
);
1728 if (((((newrule
->action
== PF_NAT
) ||
1729 (newrule
->action
== PF_RDR
) ||
1730 (newrule
->action
== PF_BINAT
) ||
1731 (newrule
->rt
> PF_FASTROUTE
)) &&
1732 !newrule
->anchor
)) &&
1733 (TAILQ_FIRST(&newrule
->rpool
.list
) == NULL
))
1737 pf_rm_rule(NULL
, newrule
);
1740 newrule
->rpool
.cur
= TAILQ_FIRST(&newrule
->rpool
.list
);
1741 newrule
->evaluations
= 0;
1742 newrule
->packets
[0] = newrule
->packets
[1] = 0;
1743 newrule
->bytes
[0] = newrule
->bytes
[1] = 0;
1745 pf_empty_pool(&pf_pabuf
);
1747 if (pcr
->action
== PF_CHANGE_ADD_HEAD
)
1748 oldrule
= TAILQ_FIRST(
1749 ruleset
->rules
[rs_num
].active
.ptr
);
1750 else if (pcr
->action
== PF_CHANGE_ADD_TAIL
)
1751 oldrule
= TAILQ_LAST(
1752 ruleset
->rules
[rs_num
].active
.ptr
, pf_rulequeue
);
1754 oldrule
= TAILQ_FIRST(
1755 ruleset
->rules
[rs_num
].active
.ptr
);
1756 while ((oldrule
!= NULL
) && (oldrule
->nr
!= pcr
->nr
))
1757 oldrule
= TAILQ_NEXT(oldrule
, entries
);
1758 if (oldrule
== NULL
) {
1759 if (newrule
!= NULL
)
1760 pf_rm_rule(NULL
, newrule
);
1766 if (pcr
->action
== PF_CHANGE_REMOVE
) {
1767 pf_rm_rule(ruleset
->rules
[rs_num
].active
.ptr
, oldrule
);
1768 ruleset
->rules
[rs_num
].active
.rcount
--;
1770 if (oldrule
== NULL
)
1772 ruleset
->rules
[rs_num
].active
.ptr
,
1774 else if (pcr
->action
== PF_CHANGE_ADD_HEAD
||
1775 pcr
->action
== PF_CHANGE_ADD_BEFORE
)
1776 TAILQ_INSERT_BEFORE(oldrule
, newrule
, entries
);
1779 ruleset
->rules
[rs_num
].active
.ptr
,
1780 oldrule
, newrule
, entries
);
1781 ruleset
->rules
[rs_num
].active
.rcount
++;
1785 TAILQ_FOREACH(oldrule
,
1786 ruleset
->rules
[rs_num
].active
.ptr
, entries
)
1789 ruleset
->rules
[rs_num
].active
.ticket
++;
1791 pf_calc_skip_steps(ruleset
->rules
[rs_num
].active
.ptr
);
1792 pf_remove_if_empty_ruleset(ruleset
);
1797 case DIOCCLRSTATES
: {
1798 struct pf_state
*s
, *nexts
;
1799 struct pfioc_state_kill
*psk
= (struct pfioc_state_kill
*)addr
;
1802 for (s
= RB_MIN(pf_state_tree_id
, &tree_id
); s
; s
= nexts
) {
1803 nexts
= RB_NEXT(pf_state_tree_id
, &tree_id
, s
);
1805 if (!psk
->psk_ifname
[0] || strcmp(psk
->psk_ifname
,
1806 s
->kif
->pfik_name
) == 0) {
1808 /* don't send out individual delete messages */
1809 s
->sync_flags
= PFSTATE_NOSYNC
;
1815 psk
->psk_af
= killed
;
1817 pfsync_clear_states(pf_status
.hostid
, psk
->psk_ifname
);
1822 case DIOCKILLSTATES
: {
1823 struct pf_state
*s
, *nexts
;
1824 struct pf_state_key
*sk
;
1825 struct pf_state_host
*src
, *dst
;
1826 struct pfioc_state_kill
*psk
= (struct pfioc_state_kill
*)addr
;
1829 for (s
= RB_MIN(pf_state_tree_id
, &tree_id
); s
;
1831 nexts
= RB_NEXT(pf_state_tree_id
, &tree_id
, s
);
1834 if (sk
->direction
== PF_OUT
) {
1841 if ((!psk
->psk_af
|| sk
->af
== psk
->psk_af
) &&
1842 (!psk
->psk_proto
|| psk
->psk_proto
== sk
->proto
) &&
1843 PF_MATCHA(psk
->psk_src
.neg
,
1844 &psk
->psk_src
.addr
.v
.a
.addr
,
1845 &psk
->psk_src
.addr
.v
.a
.mask
,
1846 &src
->addr
, sk
->af
) &&
1847 PF_MATCHA(psk
->psk_dst
.neg
,
1848 &psk
->psk_dst
.addr
.v
.a
.addr
,
1849 &psk
->psk_dst
.addr
.v
.a
.mask
,
1850 &dst
->addr
, sk
->af
) &&
1851 #ifndef NO_APPLE_EXTENSIONS
1852 (pf_match_xport(psk
->psk_proto
,
1853 psk
->psk_proto_variant
, &psk
->psk_src
.xport
,
1855 (pf_match_xport(psk
->psk_proto
,
1856 psk
->psk_proto_variant
, &psk
->psk_dst
.xport
,
1859 (psk
->psk_src
.port_op
== 0 ||
1860 pf_match_port(psk
->psk_src
.port_op
,
1861 psk
->psk_src
.port
[0], psk
->psk_src
.port
[1],
1863 (psk
->psk_dst
.port_op
== 0 ||
1864 pf_match_port(psk
->psk_dst
.port_op
,
1865 psk
->psk_dst
.port
[0], psk
->psk_dst
.port
[1],
1868 (!psk
->psk_ifname
[0] || strcmp(psk
->psk_ifname
,
1869 s
->kif
->pfik_name
) == 0)) {
1871 /* send immediate delete of state */
1872 pfsync_delete_state(s
);
1873 s
->sync_flags
|= PFSTATE_NOSYNC
;
1879 psk
->psk_af
= killed
;
1883 case DIOCADDSTATE
: {
1884 struct pfioc_state
*ps
= (struct pfioc_state
*)addr
;
1885 struct pfsync_state
*sp
= &ps
->state
;
1887 struct pf_state_key
*sk
;
1888 struct pfi_kif
*kif
;
1890 if (sp
->timeout
>= PFTM_MAX
&&
1891 sp
->timeout
!= PFTM_UNTIL_PACKET
) {
1895 s
= pool_get(&pf_state_pl
, PR_WAITOK
);
1900 bzero(s
, sizeof (struct pf_state
));
1901 if ((sk
= pf_alloc_state_key(s
)) == NULL
) {
1902 pool_put(&pf_state_pl
, s
);
1906 pf_state_import(sp
, sk
, s
);
1907 kif
= pfi_kif_get(sp
->ifname
);
1909 pool_put(&pf_state_pl
, s
);
1910 pool_put(&pf_state_key_pl
, sk
);
1914 #ifndef NO_APPLE_EXTENSIONS
1915 TAILQ_INIT(&s
->unlink_hooks
);
1916 s
->state_key
->app_state
= 0;
1918 if (pf_insert_state(kif
, s
)) {
1919 pfi_kif_unref(kif
, PFI_KIF_REF_NONE
);
1920 pool_put(&pf_state_pl
, s
);
1924 pf_default_rule
.states
++;
1925 VERIFY(pf_default_rule
.states
!= 0);
1929 case DIOCGETSTATE
: {
1930 struct pfioc_state
*ps
= (struct pfioc_state
*)addr
;
1932 struct pf_state_cmp id_key
;
1934 bcopy(ps
->state
.id
, &id_key
.id
, sizeof (id_key
.id
));
1935 id_key
.creatorid
= ps
->state
.creatorid
;
1937 s
= pf_find_state_byid(&id_key
);
1943 pf_state_export(&ps
->state
, s
->state_key
, s
);
1947 case DIOCGETSTATES
: {
1948 struct pfioc_states
*ps
= (struct pfioc_states
*)addr
;
1949 struct pf_state
*state
;
1950 struct pfsync_state
*y
, *pstore
;
1953 if (ps
->ps_len
== 0) {
1954 nr
= pf_status
.states
;
1955 ps
->ps_len
= sizeof (struct pfsync_state
) * nr
;
1959 pstore
= _MALLOC(sizeof (*pstore
), M_TEMP
, M_WAITOK
);
1963 state
= TAILQ_FIRST(&state_list
);
1965 if (state
->timeout
!= PFTM_UNLINKED
) {
1966 if ((nr
+1) * sizeof (*y
) > (unsigned)ps
->ps_len
)
1969 pf_state_export(pstore
,
1970 state
->state_key
, state
);
1971 error
= copyout(pstore
, CAST_USER_ADDR_T(y
),
1974 _FREE(pstore
, M_TEMP
);
1980 state
= TAILQ_NEXT(state
, entry_list
);
1983 ps
->ps_len
= sizeof (struct pfsync_state
) * nr
;
1985 _FREE(pstore
, M_TEMP
);
1989 case DIOCGETSTATUS
: {
1990 struct pf_status
*s
= (struct pf_status
*)addr
;
1991 bcopy(&pf_status
, s
, sizeof (struct pf_status
));
1992 pfi_update_status(s
->ifname
, s
);
1996 case DIOCSETSTATUSIF
: {
1997 struct pfioc_if
*pi
= (struct pfioc_if
*)addr
;
1999 if (pi
->ifname
[0] == 0) {
2000 bzero(pf_status
.ifname
, IFNAMSIZ
);
2003 strlcpy(pf_status
.ifname
, pi
->ifname
, IFNAMSIZ
);
2007 case DIOCCLRSTATUS
: {
2008 bzero(pf_status
.counters
, sizeof (pf_status
.counters
));
2009 bzero(pf_status
.fcounters
, sizeof (pf_status
.fcounters
));
2010 bzero(pf_status
.scounters
, sizeof (pf_status
.scounters
));
2011 pf_status
.since
= pf_calendar_time_second();
2012 if (*pf_status
.ifname
)
2013 pfi_update_status(pf_status
.ifname
, NULL
);
2018 struct pfioc_natlook
*pnl
= (struct pfioc_natlook
*)addr
;
2019 struct pf_state_key
*sk
;
2020 struct pf_state
*state
;
2021 struct pf_state_key_cmp key
;
2022 int m
= 0, direction
= pnl
->direction
;
2025 key
.proto
= pnl
->proto
;
2027 #ifndef NO_APPLE_EXTENSIONS
2028 key
.proto_variant
= pnl
->proto_variant
;
2032 PF_AZERO(&pnl
->saddr
, pnl
->af
) ||
2033 PF_AZERO(&pnl
->daddr
, pnl
->af
) ||
2034 ((pnl
->proto
== IPPROTO_TCP
||
2035 pnl
->proto
== IPPROTO_UDP
) &&
2036 #ifndef NO_APPLE_EXTENSIONS
2037 (!pnl
->dxport
.port
|| !pnl
->sxport
.port
)))
2039 (!pnl
->dport
|| !pnl
->sport
)))
2044 * userland gives us source and dest of connection,
2045 * reverse the lookup so we ask for what happens with
2046 * the return traffic, enabling us to find it in the
2049 if (direction
== PF_IN
) {
2050 PF_ACPY(&key
.ext
.addr
, &pnl
->daddr
, pnl
->af
);
2051 #ifndef NO_APPLE_EXTENSIONS
2052 memcpy(&key
.ext
.xport
, &pnl
->dxport
,
2053 sizeof (key
.ext
.xport
));
2055 key
.ext
.port
= pnl
->dport
;
2057 PF_ACPY(&key
.gwy
.addr
, &pnl
->saddr
, pnl
->af
);
2058 #ifndef NO_APPLE_EXTENSIONS
2059 memcpy(&key
.gwy
.xport
, &pnl
->sxport
,
2060 sizeof (key
.gwy
.xport
));
2062 key
.gwy
.port
= pnl
->sport
;
2064 state
= pf_find_state_all(&key
, PF_IN
, &m
);
2066 PF_ACPY(&key
.lan
.addr
, &pnl
->daddr
, pnl
->af
);
2067 #ifndef NO_APPLE_EXTENSIONS
2068 memcpy(&key
.lan
.xport
, &pnl
->dxport
,
2069 sizeof (key
.lan
.xport
));
2071 key
.lan
.port
= pnl
->dport
;
2073 PF_ACPY(&key
.ext
.addr
, &pnl
->saddr
, pnl
->af
);
2074 #ifndef NO_APPLE_EXTENSIONS
2075 memcpy(&key
.ext
.xport
, &pnl
->sxport
,
2076 sizeof (key
.ext
.xport
));
2078 key
.ext
.port
= pnl
->sport
;
2080 state
= pf_find_state_all(&key
, PF_OUT
, &m
);
2083 error
= E2BIG
; /* more than one state */
2084 else if (state
!= NULL
) {
2085 sk
= state
->state_key
;
2086 if (direction
== PF_IN
) {
2087 PF_ACPY(&pnl
->rsaddr
, &sk
->lan
.addr
,
2089 #ifndef NO_APPLE_EXTENSIONS
2090 memcpy(&pnl
->rsxport
, &sk
->lan
.xport
,
2091 sizeof (pnl
->rsxport
));
2093 pnl
->rsport
= sk
->lan
.port
;
2095 PF_ACPY(&pnl
->rdaddr
, &pnl
->daddr
,
2097 #ifndef NO_APPLE_EXTENSIONS
2098 memcpy(&pnl
->rdxport
, &pnl
->dxport
,
2099 sizeof (pnl
->rdxport
));
2101 pnl
->rdport
= pnl
->dport
;
2104 PF_ACPY(&pnl
->rdaddr
, &sk
->gwy
.addr
,
2106 #ifndef NO_APPLE_EXTENSIONS
2107 memcpy(&pnl
->rdxport
, &sk
->gwy
.xport
,
2108 sizeof (pnl
->rdxport
));
2110 pnl
->rdport
= sk
->gwy
.port
;
2112 PF_ACPY(&pnl
->rsaddr
, &pnl
->saddr
,
2114 #ifndef NO_APPLE_EXTENSIONS
2115 memcpy(&pnl
->rsxport
, &pnl
->sxport
,
2116 sizeof (pnl
->rsxport
));
2118 pnl
->rsport
= pnl
->sport
;
2127 case DIOCSETTIMEOUT
: {
2128 struct pfioc_tm
*pt
= (struct pfioc_tm
*)addr
;
2131 if (pt
->timeout
< 0 || pt
->timeout
>= PFTM_MAX
||
2136 old
= pf_default_rule
.timeout
[pt
->timeout
];
2137 if (pt
->timeout
== PFTM_INTERVAL
&& pt
->seconds
== 0)
2139 pf_default_rule
.timeout
[pt
->timeout
] = pt
->seconds
;
2140 if (pt
->timeout
== PFTM_INTERVAL
&& pt
->seconds
< old
)
2141 wakeup(pf_purge_thread_fn
);
2146 case DIOCGETTIMEOUT
: {
2147 struct pfioc_tm
*pt
= (struct pfioc_tm
*)addr
;
2149 if (pt
->timeout
< 0 || pt
->timeout
>= PFTM_MAX
) {
2153 pt
->seconds
= pf_default_rule
.timeout
[pt
->timeout
];
2157 case DIOCGETLIMIT
: {
2158 struct pfioc_limit
*pl
= (struct pfioc_limit
*)addr
;
2160 if (pl
->index
< 0 || pl
->index
>= PF_LIMIT_MAX
) {
2164 pl
->limit
= pf_pool_limits
[pl
->index
].limit
;
2168 case DIOCSETLIMIT
: {
2169 struct pfioc_limit
*pl
= (struct pfioc_limit
*)addr
;
2172 if (pl
->index
< 0 || pl
->index
>= PF_LIMIT_MAX
||
2173 pf_pool_limits
[pl
->index
].pp
== NULL
) {
2177 pool_sethardlimit(pf_pool_limits
[pl
->index
].pp
,
2178 pl
->limit
, NULL
, 0);
2179 old_limit
= pf_pool_limits
[pl
->index
].limit
;
2180 pf_pool_limits
[pl
->index
].limit
= pl
->limit
;
2181 pl
->limit
= old_limit
;
2185 case DIOCSETDEBUG
: {
2186 u_int32_t
*level
= (u_int32_t
*)addr
;
2188 pf_status
.debug
= *level
;
2192 case DIOCCLRRULECTRS
: {
2193 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
2194 struct pf_ruleset
*ruleset
= &pf_main_ruleset
;
2195 struct pf_rule
*rule
;
2198 ruleset
->rules
[PF_RULESET_FILTER
].active
.ptr
, entries
) {
2199 rule
->evaluations
= 0;
2200 rule
->packets
[0] = rule
->packets
[1] = 0;
2201 rule
->bytes
[0] = rule
->bytes
[1] = 0;
2207 case DIOCSTARTALTQ
: {
2208 struct pf_altq
*altq
;
2210 /* enable all altq interfaces on active list */
2211 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
) {
2212 if (altq
->qname
[0] == 0) {
2213 error
= pf_enable_altq(altq
);
2219 pf_altq_running
= 1;
2220 DPFPRINTF(PF_DEBUG_MISC
, ("altq: started\n"));
2224 case DIOCSTOPALTQ
: {
2225 struct pf_altq
*altq
;
2227 /* disable all altq interfaces on active list */
2228 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
) {
2229 if (altq
->qname
[0] == 0) {
2230 error
= pf_disable_altq(altq
);
2236 pf_altq_running
= 0;
2237 DPFPRINTF(PF_DEBUG_MISC
, ("altq: stopped\n"));
2242 struct pfioc_altq
*pa
= (struct pfioc_altq
*)addr
;
2243 struct pf_altq
*altq
, *a
;
2245 if (pa
->ticket
!= ticket_altqs_inactive
) {
2249 altq
= pool_get(&pf_altq_pl
, PR_WAITOK
);
2254 bcopy(&pa
->altq
, altq
, sizeof (struct pf_altq
));
2257 * if this is for a queue, find the discipline and
2258 * copy the necessary fields
2260 if (altq
->qname
[0] != 0) {
2261 if ((altq
->qid
= pf_qname2qid(altq
->qname
)) == 0) {
2263 pool_put(&pf_altq_pl
, altq
);
2266 altq
->altq_disc
= NULL
;
2267 TAILQ_FOREACH(a
, pf_altqs_inactive
, entries
) {
2268 if (strncmp(a
->ifname
, altq
->ifname
,
2269 IFNAMSIZ
) == 0 && a
->qname
[0] == 0) {
2270 altq
->altq_disc
= a
->altq_disc
;
2276 error
= altq_add(altq
);
2278 pool_put(&pf_altq_pl
, altq
);
2282 TAILQ_INSERT_TAIL(pf_altqs_inactive
, altq
, entries
);
2283 bcopy(altq
, &pa
->altq
, sizeof (struct pf_altq
));
2287 case DIOCGETALTQS
: {
2288 struct pfioc_altq
*pa
= (struct pfioc_altq
*)addr
;
2289 struct pf_altq
*altq
;
2292 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
)
2294 pa
->ticket
= ticket_altqs_active
;
2299 struct pfioc_altq
*pa
= (struct pfioc_altq
*)addr
;
2300 struct pf_altq
*altq
;
2303 if (pa
->ticket
!= ticket_altqs_active
) {
2308 altq
= TAILQ_FIRST(pf_altqs_active
);
2309 while ((altq
!= NULL
) && (nr
< pa
->nr
)) {
2310 altq
= TAILQ_NEXT(altq
, entries
);
2317 bcopy(altq
, &pa
->altq
, sizeof (struct pf_altq
));
2321 case DIOCCHANGEALTQ
:
2322 /* CHANGEALTQ not supported yet! */
2326 case DIOCGETQSTATS
: {
2327 struct pfioc_qstats
*pq
= (struct pfioc_qstats
*)addr
;
2328 struct pf_altq
*altq
;
2332 if (pq
->ticket
!= ticket_altqs_active
) {
2336 nbytes
= pq
->nbytes
;
2338 altq
= TAILQ_FIRST(pf_altqs_active
);
2339 while ((altq
!= NULL
) && (nr
< pq
->nr
)) {
2340 altq
= TAILQ_NEXT(altq
, entries
);
2347 error
= altq_getqstats(altq
, pq
->buf
, &nbytes
);
2349 pq
->scheduler
= altq
->scheduler
;
2350 pq
->nbytes
= nbytes
;
2356 case DIOCBEGINADDRS
: {
2357 struct pfioc_pooladdr
*pp
= (struct pfioc_pooladdr
*)addr
;
2359 pf_empty_pool(&pf_pabuf
);
2360 pp
->ticket
= ++ticket_pabuf
;
2365 struct pfioc_pooladdr
*pp
= (struct pfioc_pooladdr
*)addr
;
2367 if (pp
->ticket
!= ticket_pabuf
) {
2372 if (pp
->af
== AF_INET
) {
2373 error
= EAFNOSUPPORT
;
2378 if (pp
->af
== AF_INET6
) {
2379 error
= EAFNOSUPPORT
;
2383 if (pp
->addr
.addr
.type
!= PF_ADDR_ADDRMASK
&&
2384 pp
->addr
.addr
.type
!= PF_ADDR_DYNIFTL
&&
2385 pp
->addr
.addr
.type
!= PF_ADDR_TABLE
) {
2389 pa
= pool_get(&pf_pooladdr_pl
, PR_WAITOK
);
2394 bcopy(&pp
->addr
, pa
, sizeof (struct pf_pooladdr
));
2395 if (pa
->ifname
[0]) {
2396 pa
->kif
= pfi_kif_get(pa
->ifname
);
2397 if (pa
->kif
== NULL
) {
2398 pool_put(&pf_pooladdr_pl
, pa
);
2402 pfi_kif_ref(pa
->kif
, PFI_KIF_REF_RULE
);
2404 if (pfi_dynaddr_setup(&pa
->addr
, pp
->af
)) {
2405 pfi_dynaddr_remove(&pa
->addr
);
2406 pfi_kif_unref(pa
->kif
, PFI_KIF_REF_RULE
);
2407 pool_put(&pf_pooladdr_pl
, pa
);
2411 TAILQ_INSERT_TAIL(&pf_pabuf
, pa
, entries
);
2415 case DIOCGETADDRS
: {
2416 struct pfioc_pooladdr
*pp
= (struct pfioc_pooladdr
*)addr
;
2419 pool
= pf_get_pool(pp
->anchor
, pp
->ticket
, pp
->r_action
,
2420 pp
->r_num
, 0, 1, 0);
2425 TAILQ_FOREACH(pa
, &pool
->list
, entries
)
2431 struct pfioc_pooladdr
*pp
= (struct pfioc_pooladdr
*)addr
;
2434 pool
= pf_get_pool(pp
->anchor
, pp
->ticket
, pp
->r_action
,
2435 pp
->r_num
, 0, 1, 1);
2440 pa
= TAILQ_FIRST(&pool
->list
);
2441 while ((pa
!= NULL
) && (nr
< pp
->nr
)) {
2442 pa
= TAILQ_NEXT(pa
, entries
);
2449 bcopy(pa
, &pp
->addr
, sizeof (struct pf_pooladdr
));
2450 pfi_dynaddr_copyout(&pp
->addr
.addr
);
2451 pf_tbladdr_copyout(&pp
->addr
.addr
);
2452 pf_rtlabel_copyout(&pp
->addr
.addr
);
2456 case DIOCCHANGEADDR
: {
2457 struct pfioc_pooladdr
*pca
= (struct pfioc_pooladdr
*)addr
;
2458 struct pf_pooladdr
*oldpa
= NULL
, *newpa
= NULL
;
2459 struct pf_ruleset
*ruleset
;
2461 if (pca
->action
< PF_CHANGE_ADD_HEAD
||
2462 pca
->action
> PF_CHANGE_REMOVE
) {
2466 if (pca
->addr
.addr
.type
!= PF_ADDR_ADDRMASK
&&
2467 pca
->addr
.addr
.type
!= PF_ADDR_DYNIFTL
&&
2468 pca
->addr
.addr
.type
!= PF_ADDR_TABLE
) {
2473 ruleset
= pf_find_ruleset(pca
->anchor
);
2474 if (ruleset
== NULL
) {
2478 pool
= pf_get_pool(pca
->anchor
, pca
->ticket
, pca
->r_action
,
2479 pca
->r_num
, pca
->r_last
, 1, 1);
2484 if (pca
->action
!= PF_CHANGE_REMOVE
) {
2485 newpa
= pool_get(&pf_pooladdr_pl
, PR_WAITOK
);
2486 if (newpa
== NULL
) {
2490 bcopy(&pca
->addr
, newpa
, sizeof (struct pf_pooladdr
));
2492 if (pca
->af
== AF_INET
) {
2493 pool_put(&pf_pooladdr_pl
, newpa
);
2494 error
= EAFNOSUPPORT
;
2499 if (pca
->af
== AF_INET6
) {
2500 pool_put(&pf_pooladdr_pl
, newpa
);
2501 error
= EAFNOSUPPORT
;
2505 if (newpa
->ifname
[0]) {
2506 newpa
->kif
= pfi_kif_get(newpa
->ifname
);
2507 if (newpa
->kif
== NULL
) {
2508 pool_put(&pf_pooladdr_pl
, newpa
);
2512 pfi_kif_ref(newpa
->kif
, PFI_KIF_REF_RULE
);
2515 if (pfi_dynaddr_setup(&newpa
->addr
, pca
->af
) ||
2516 pf_tbladdr_setup(ruleset
, &newpa
->addr
)) {
2517 pfi_dynaddr_remove(&newpa
->addr
);
2518 pfi_kif_unref(newpa
->kif
, PFI_KIF_REF_RULE
);
2519 pool_put(&pf_pooladdr_pl
, newpa
);
2525 if (pca
->action
== PF_CHANGE_ADD_HEAD
)
2526 oldpa
= TAILQ_FIRST(&pool
->list
);
2527 else if (pca
->action
== PF_CHANGE_ADD_TAIL
)
2528 oldpa
= TAILQ_LAST(&pool
->list
, pf_palist
);
2532 oldpa
= TAILQ_FIRST(&pool
->list
);
2533 while ((oldpa
!= NULL
) && (i
< (int)pca
->nr
)) {
2534 oldpa
= TAILQ_NEXT(oldpa
, entries
);
2537 if (oldpa
== NULL
) {
2543 if (pca
->action
== PF_CHANGE_REMOVE
) {
2544 TAILQ_REMOVE(&pool
->list
, oldpa
, entries
);
2545 pfi_dynaddr_remove(&oldpa
->addr
);
2546 pf_tbladdr_remove(&oldpa
->addr
);
2547 pfi_kif_unref(oldpa
->kif
, PFI_KIF_REF_RULE
);
2548 pool_put(&pf_pooladdr_pl
, oldpa
);
2551 TAILQ_INSERT_TAIL(&pool
->list
, newpa
, entries
);
2552 else if (pca
->action
== PF_CHANGE_ADD_HEAD
||
2553 pca
->action
== PF_CHANGE_ADD_BEFORE
)
2554 TAILQ_INSERT_BEFORE(oldpa
, newpa
, entries
);
2556 TAILQ_INSERT_AFTER(&pool
->list
, oldpa
,
2560 pool
->cur
= TAILQ_FIRST(&pool
->list
);
2561 PF_ACPY(&pool
->counter
, &pool
->cur
->addr
.v
.a
.addr
,
2566 case DIOCGETRULESETS
: {
2567 struct pfioc_ruleset
*pr
= (struct pfioc_ruleset
*)addr
;
2568 struct pf_ruleset
*ruleset
;
2569 struct pf_anchor
*anchor
;
2571 pr
->path
[sizeof (pr
->path
) - 1] = 0;
2572 if ((ruleset
= pf_find_ruleset(pr
->path
)) == NULL
) {
2577 if (ruleset
->anchor
== NULL
) {
2578 /* XXX kludge for pf_main_ruleset */
2579 RB_FOREACH(anchor
, pf_anchor_global
, &pf_anchors
)
2580 if (anchor
->parent
== NULL
)
2583 RB_FOREACH(anchor
, pf_anchor_node
,
2584 &ruleset
->anchor
->children
)
2590 case DIOCGETRULESET
: {
2591 struct pfioc_ruleset
*pr
= (struct pfioc_ruleset
*)addr
;
2592 struct pf_ruleset
*ruleset
;
2593 struct pf_anchor
*anchor
;
2596 pr
->path
[sizeof (pr
->path
) - 1] = 0;
2597 if ((ruleset
= pf_find_ruleset(pr
->path
)) == NULL
) {
2602 if (ruleset
->anchor
== NULL
) {
2603 /* XXX kludge for pf_main_ruleset */
2604 RB_FOREACH(anchor
, pf_anchor_global
, &pf_anchors
)
2605 if (anchor
->parent
== NULL
&& nr
++ == pr
->nr
) {
2606 strlcpy(pr
->name
, anchor
->name
,
2611 RB_FOREACH(anchor
, pf_anchor_node
,
2612 &ruleset
->anchor
->children
)
2613 if (nr
++ == pr
->nr
) {
2614 strlcpy(pr
->name
, anchor
->name
,
2624 case DIOCRCLRTABLES
: {
2625 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2627 if (io
->pfrio_esize
!= 0) {
2631 error
= pfr_clr_tables(&io
->pfrio_table
, &io
->pfrio_ndel
,
2632 io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2636 case DIOCRADDTABLES
: {
2637 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2639 if (io
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2643 error
= pfr_add_tables(io
->pfrio_buffer
, io
->pfrio_size
,
2644 &io
->pfrio_nadd
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2648 case DIOCRDELTABLES
: {
2649 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2651 if (io
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2655 error
= pfr_del_tables(io
->pfrio_buffer
, io
->pfrio_size
,
2656 &io
->pfrio_ndel
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2660 case DIOCRGETTABLES
: {
2661 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2663 if (io
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2667 error
= pfr_get_tables(&io
->pfrio_table
, io
->pfrio_buffer
,
2668 &io
->pfrio_size
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2672 case DIOCRGETTSTATS
: {
2673 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2675 if (io
->pfrio_esize
!= sizeof (struct pfr_tstats
)) {
2679 error
= pfr_get_tstats(&io
->pfrio_table
, io
->pfrio_buffer
,
2680 &io
->pfrio_size
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2684 case DIOCRCLRTSTATS
: {
2685 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2687 if (io
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2691 error
= pfr_clr_tstats(io
->pfrio_buffer
, io
->pfrio_size
,
2692 &io
->pfrio_nzero
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2696 case DIOCRSETTFLAGS
: {
2697 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2699 if (io
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2703 error
= pfr_set_tflags(io
->pfrio_buffer
, io
->pfrio_size
,
2704 io
->pfrio_setflag
, io
->pfrio_clrflag
, &io
->pfrio_nchange
,
2705 &io
->pfrio_ndel
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2709 case DIOCRCLRADDRS
: {
2710 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2712 if (io
->pfrio_esize
!= 0) {
2716 error
= pfr_clr_addrs(&io
->pfrio_table
, &io
->pfrio_ndel
,
2717 io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2721 case DIOCRADDADDRS
: {
2722 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2724 if (io
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2728 error
= pfr_add_addrs(&io
->pfrio_table
, io
->pfrio_buffer
,
2729 io
->pfrio_size
, &io
->pfrio_nadd
, io
->pfrio_flags
|
2730 PFR_FLAG_USERIOCTL
);
2734 case DIOCRDELADDRS
: {
2735 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2737 if (io
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2741 error
= pfr_del_addrs(&io
->pfrio_table
, io
->pfrio_buffer
,
2742 io
->pfrio_size
, &io
->pfrio_ndel
, io
->pfrio_flags
|
2743 PFR_FLAG_USERIOCTL
);
2747 case DIOCRSETADDRS
: {
2748 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2750 if (io
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2754 error
= pfr_set_addrs(&io
->pfrio_table
, io
->pfrio_buffer
,
2755 io
->pfrio_size
, &io
->pfrio_size2
, &io
->pfrio_nadd
,
2756 &io
->pfrio_ndel
, &io
->pfrio_nchange
, io
->pfrio_flags
|
2757 PFR_FLAG_USERIOCTL
, 0);
2761 case DIOCRGETADDRS
: {
2762 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2764 if (io
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2768 error
= pfr_get_addrs(&io
->pfrio_table
, io
->pfrio_buffer
,
2769 &io
->pfrio_size
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2773 case DIOCRGETASTATS
: {
2774 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2776 if (io
->pfrio_esize
!= sizeof (struct pfr_astats
)) {
2780 error
= pfr_get_astats(&io
->pfrio_table
, io
->pfrio_buffer
,
2781 &io
->pfrio_size
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2785 case DIOCRCLRASTATS
: {
2786 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2788 if (io
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2792 error
= pfr_clr_astats(&io
->pfrio_table
, io
->pfrio_buffer
,
2793 io
->pfrio_size
, &io
->pfrio_nzero
, io
->pfrio_flags
|
2794 PFR_FLAG_USERIOCTL
);
2798 case DIOCRTSTADDRS
: {
2799 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2801 if (io
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2805 error
= pfr_tst_addrs(&io
->pfrio_table
, io
->pfrio_buffer
,
2806 io
->pfrio_size
, &io
->pfrio_nmatch
, io
->pfrio_flags
|
2807 PFR_FLAG_USERIOCTL
);
2811 case DIOCRINADEFINE
: {
2812 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2814 if (io
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2818 error
= pfr_ina_define(&io
->pfrio_table
, io
->pfrio_buffer
,
2819 io
->pfrio_size
, &io
->pfrio_nadd
, &io
->pfrio_naddr
,
2820 io
->pfrio_ticket
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2825 struct pf_osfp_ioctl
*io
= (struct pf_osfp_ioctl
*)addr
;
2826 error
= pf_osfp_add(io
);
2831 struct pf_osfp_ioctl
*io
= (struct pf_osfp_ioctl
*)addr
;
2832 error
= pf_osfp_get(io
);
2837 struct pfioc_trans
*io
= (struct pfioc_trans
*)addr
;
2838 struct pfioc_trans_e
*ioe
;
2839 struct pfr_table
*table
;
2842 if (io
->esize
!= sizeof (*ioe
)) {
2846 ioe
= _MALLOC(sizeof (*ioe
), M_TEMP
, M_WAITOK
);
2847 table
= _MALLOC(sizeof (*table
), M_TEMP
, M_WAITOK
);
2848 for (i
= 0; i
< io
->size
; i
++) {
2849 if (copyin(CAST_USER_ADDR_T(io
->array
+i
), ioe
,
2851 _FREE(table
, M_TEMP
);
2856 switch (ioe
->rs_num
) {
2857 case PF_RULESET_ALTQ
:
2859 if (ioe
->anchor
[0]) {
2860 _FREE(table
, M_TEMP
);
2865 if ((error
= pf_begin_altq(&ioe
->ticket
))) {
2866 _FREE(table
, M_TEMP
);
2872 case PF_RULESET_TABLE
:
2873 bzero(table
, sizeof (*table
));
2874 strlcpy(table
->pfrt_anchor
, ioe
->anchor
,
2875 sizeof (table
->pfrt_anchor
));
2876 if ((error
= pfr_ina_begin(table
,
2877 &ioe
->ticket
, NULL
, 0))) {
2878 _FREE(table
, M_TEMP
);
2884 if ((error
= pf_begin_rules(&ioe
->ticket
,
2885 ioe
->rs_num
, ioe
->anchor
))) {
2886 _FREE(table
, M_TEMP
);
2892 if (copyout(ioe
, CAST_USER_ADDR_T(io
->array
+i
),
2893 sizeof (io
->array
[i
]))) {
2894 _FREE(table
, M_TEMP
);
2900 _FREE(table
, M_TEMP
);
2905 case DIOCXROLLBACK
: {
2906 struct pfioc_trans
*io
= (struct pfioc_trans
*)addr
;
2907 struct pfioc_trans_e
*ioe
;
2908 struct pfr_table
*table
;
2911 if (io
->esize
!= sizeof (*ioe
)) {
2915 ioe
= _MALLOC(sizeof (*ioe
), M_TEMP
, M_WAITOK
);
2916 table
= _MALLOC(sizeof (*table
), M_TEMP
, M_WAITOK
);
2917 for (i
= 0; i
< io
->size
; i
++) {
2918 if (copyin(CAST_USER_ADDR_T(io
->array
+i
), ioe
,
2920 _FREE(table
, M_TEMP
);
2925 switch (ioe
->rs_num
) {
2926 case PF_RULESET_ALTQ
:
2928 if (ioe
->anchor
[0]) {
2929 _FREE(table
, M_TEMP
);
2934 if ((error
= pf_rollback_altq(ioe
->ticket
))) {
2935 _FREE(table
, M_TEMP
);
2937 goto fail
; /* really bad */
2941 case PF_RULESET_TABLE
:
2942 bzero(table
, sizeof (*table
));
2943 strlcpy(table
->pfrt_anchor
, ioe
->anchor
,
2944 sizeof (table
->pfrt_anchor
));
2945 if ((error
= pfr_ina_rollback(table
,
2946 ioe
->ticket
, NULL
, 0))) {
2947 _FREE(table
, M_TEMP
);
2949 goto fail
; /* really bad */
2953 if ((error
= pf_rollback_rules(ioe
->ticket
,
2954 ioe
->rs_num
, ioe
->anchor
))) {
2955 _FREE(table
, M_TEMP
);
2957 goto fail
; /* really bad */
2962 _FREE(table
, M_TEMP
);
2968 struct pfioc_trans
*io
= (struct pfioc_trans
*)addr
;
2969 struct pfioc_trans_e
*ioe
;
2970 struct pfr_table
*table
;
2971 struct pf_ruleset
*rs
;
2974 if (io
->esize
!= sizeof (*ioe
)) {
2978 ioe
= _MALLOC(sizeof (*ioe
), M_TEMP
, M_WAITOK
);
2979 table
= _MALLOC(sizeof (*table
), M_TEMP
, M_WAITOK
);
2980 /* first makes sure everything will succeed */
2981 for (i
= 0; i
< io
->size
; i
++) {
2982 if (copyin(CAST_USER_ADDR_T(io
->array
+i
), ioe
,
2984 _FREE(table
, M_TEMP
);
2989 switch (ioe
->rs_num
) {
2990 case PF_RULESET_ALTQ
:
2992 if (ioe
->anchor
[0]) {
2993 _FREE(table
, M_TEMP
);
2998 if (!altqs_inactive_open
|| ioe
->ticket
!=
2999 ticket_altqs_inactive
) {
3000 _FREE(table
, M_TEMP
);
3007 case PF_RULESET_TABLE
:
3008 rs
= pf_find_ruleset(ioe
->anchor
);
3009 if (rs
== NULL
|| !rs
->topen
|| ioe
->ticket
!=
3011 _FREE(table
, M_TEMP
);
3018 if (ioe
->rs_num
< 0 || ioe
->rs_num
>=
3020 _FREE(table
, M_TEMP
);
3025 rs
= pf_find_ruleset(ioe
->anchor
);
3027 !rs
->rules
[ioe
->rs_num
].inactive
.open
||
3028 rs
->rules
[ioe
->rs_num
].inactive
.ticket
!=
3030 _FREE(table
, M_TEMP
);
3038 /* now do the commit - no errors should happen here */
3039 for (i
= 0; i
< io
->size
; i
++) {
3040 if (copyin(CAST_USER_ADDR_T(io
->array
+i
), ioe
,
3042 _FREE(table
, M_TEMP
);
3047 switch (ioe
->rs_num
) {
3048 case PF_RULESET_ALTQ
:
3050 if ((error
= pf_commit_altq(ioe
->ticket
))) {
3051 _FREE(table
, M_TEMP
);
3053 goto fail
; /* really bad */
3057 case PF_RULESET_TABLE
:
3058 bzero(table
, sizeof (*table
));
3059 strlcpy(table
->pfrt_anchor
, ioe
->anchor
,
3060 sizeof (table
->pfrt_anchor
));
3061 if ((error
= pfr_ina_commit(table
, ioe
->ticket
,
3063 _FREE(table
, M_TEMP
);
3065 goto fail
; /* really bad */
3069 if ((error
= pf_commit_rules(ioe
->ticket
,
3070 ioe
->rs_num
, ioe
->anchor
))) {
3071 _FREE(table
, M_TEMP
);
3073 goto fail
; /* really bad */
3078 _FREE(table
, M_TEMP
);
3083 case DIOCGETSRCNODES
: {
3084 struct pfioc_src_nodes
*psn
= (struct pfioc_src_nodes
*)addr
;
3085 struct pf_src_node
*n
, *sn
, *pstore
;
3087 int space
= psn
->psn_len
;
3090 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
)
3092 psn
->psn_len
= sizeof (struct pf_src_node
) * nr
;
3096 pstore
= _MALLOC(sizeof (*pstore
), M_TEMP
, M_WAITOK
);
3098 sn
= psn
->psn_src_nodes
;
3099 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
) {
3100 uint64_t secs
= pf_time_second(), diff
;
3102 if ((nr
+ 1) * sizeof (*sn
) > (unsigned)psn
->psn_len
)
3105 bcopy(n
, pstore
, sizeof (*pstore
));
3106 if (n
->rule
.ptr
!= NULL
)
3107 pstore
->rule
.nr
= n
->rule
.ptr
->nr
;
3108 pstore
->creation
= secs
- pstore
->creation
;
3109 if (pstore
->expire
> secs
)
3110 pstore
->expire
-= secs
;
3114 /* adjust the connection rate estimate */
3115 diff
= secs
- n
->conn_rate
.last
;
3116 if (diff
>= n
->conn_rate
.seconds
)
3117 pstore
->conn_rate
.count
= 0;
3119 pstore
->conn_rate
.count
-=
3120 n
->conn_rate
.count
* diff
/
3121 n
->conn_rate
.seconds
;
3123 error
= copyout(pstore
, CAST_USER_ADDR_T(sn
),
3126 _FREE(pstore
, M_TEMP
);
3132 psn
->psn_len
= sizeof (struct pf_src_node
) * nr
;
3134 _FREE(pstore
, M_TEMP
);
3138 case DIOCCLRSRCNODES
: {
3139 struct pf_src_node
*n
;
3140 struct pf_state
*state
;
3142 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
3143 state
->src_node
= NULL
;
3144 state
->nat_src_node
= NULL
;
3146 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
) {
3150 pf_purge_expired_src_nodes();
3151 pf_status
.src_nodes
= 0;
3155 case DIOCKILLSRCNODES
: {
3156 struct pf_src_node
*sn
;
3158 struct pfioc_src_node_kill
*psnk
=
3159 (struct pfioc_src_node_kill
*)addr
;
3162 RB_FOREACH(sn
, pf_src_tree
, &tree_src_tracking
) {
3163 if (PF_MATCHA(psnk
->psnk_src
.neg
,
3164 &psnk
->psnk_src
.addr
.v
.a
.addr
,
3165 &psnk
->psnk_src
.addr
.v
.a
.mask
,
3166 &sn
->addr
, sn
->af
) &&
3167 PF_MATCHA(psnk
->psnk_dst
.neg
,
3168 &psnk
->psnk_dst
.addr
.v
.a
.addr
,
3169 &psnk
->psnk_dst
.addr
.v
.a
.mask
,
3170 &sn
->raddr
, sn
->af
)) {
3171 /* Handle state to src_node linkage */
3172 if (sn
->states
!= 0) {
3173 RB_FOREACH(s
, pf_state_tree_id
,
3175 if (s
->src_node
== sn
)
3177 if (s
->nat_src_node
== sn
)
3178 s
->nat_src_node
= NULL
;
3188 pf_purge_expired_src_nodes();
3190 psnk
->psnk_af
= killed
;
3194 case DIOCSETHOSTID
: {
3195 u_int32_t
*hid
= (u_int32_t
*)addr
;
3198 pf_status
.hostid
= random();
3200 pf_status
.hostid
= *hid
;
3208 case DIOCIGETIFACES
: {
3209 struct pfioc_iface
*io
= (struct pfioc_iface
*)addr
;
3211 if (io
->pfiio_esize
!= sizeof (struct pfi_kif
)) {
3215 error
= pfi_get_ifaces(io
->pfiio_name
, io
->pfiio_buffer
,
3220 case DIOCSETIFFLAG
: {
3221 struct pfioc_iface
*io
= (struct pfioc_iface
*)addr
;
3223 error
= pfi_set_flags(io
->pfiio_name
, io
->pfiio_flags
);
3227 case DIOCCLRIFFLAG
: {
3228 struct pfioc_iface
*io
= (struct pfioc_iface
*)addr
;
3230 error
= pfi_clear_flags(io
->pfiio_name
, io
->pfiio_flags
);
3239 lck_mtx_unlock(pf_lock
);
3240 lck_rw_done(pf_perim_lock
);
3246 pf_af_hook(struct ifnet
*ifp
, struct mbuf
**mppn
, struct mbuf
**mp
,
3247 unsigned int af
, int input
)
3249 int error
= 0, reentry
;
3250 struct thread
*curthread
= current_thread();
3251 struct mbuf
*nextpkt
;
3253 reentry
= (ifp
->if_pf_curthread
== curthread
);
3255 lck_rw_lock_shared(pf_perim_lock
);
3256 if (!pf_hooks_attached
)
3259 lck_mtx_lock(pf_lock
);
3260 ifp
->if_pf_curthread
= curthread
;
3263 if (mppn
!= NULL
&& *mppn
!= NULL
)
3264 VERIFY(*mppn
== *mp
);
3265 if ((nextpkt
= (*mp
)->m_nextpkt
) != NULL
)
3266 (*mp
)->m_nextpkt
= NULL
;
3271 error
= pf_inet_hook(ifp
, mp
, input
);
3277 error
= pf_inet6_hook(ifp
, mp
, input
);
3284 if (nextpkt
!= NULL
) {
3286 struct mbuf
*m
= *mp
;
3287 while (m
->m_nextpkt
!= NULL
)
3289 m
->m_nextpkt
= nextpkt
;
3294 if (mppn
!= NULL
&& *mppn
!= NULL
)
3298 ifp
->if_pf_curthread
= NULL
;
3299 lck_mtx_unlock(pf_lock
);
3303 lck_rw_done(pf_perim_lock
);
3311 pf_inet_hook(struct ifnet
*ifp
, struct mbuf
**mp
, int input
)
3313 struct mbuf
*m
= *mp
;
3314 #if BYTE_ORDER != BIG_ENDIAN
3315 struct ip
*ip
= mtod(m
, struct ip
*);
3320 * If the packet is outbound, is originated locally, is flagged for
3321 * delayed UDP/TCP checksum calculation, and is about to be processed
3322 * for an interface that doesn't support the appropriate checksum
3323 * offloading, then calculated the checksum here so that PF can adjust
3326 if (!input
&& m
->m_pkthdr
.rcvif
== NULL
) {
3327 static const int mask
= CSUM_DELAY_DATA
;
3328 const int flags
= m
->m_pkthdr
.csum_flags
&
3329 ~IF_HWASSIST_CSUM_FLAGS(ifp
->if_hwassist
);
3332 in_delayed_cksum(m
);
3333 m
->m_pkthdr
.csum_flags
&= ~mask
;
3337 #if BYTE_ORDER != BIG_ENDIAN
3341 if (pf_test(input
? PF_IN
: PF_OUT
, ifp
, mp
, NULL
) != PF_PASS
) {
3345 error
= EHOSTUNREACH
;
3350 #if BYTE_ORDER != BIG_ENDIAN
3352 ip
= mtod(*mp
, struct ip
*);
3363 pf_inet6_hook(struct ifnet
*ifp
, struct mbuf
**mp
, int input
)
3369 * TODO: once we support IPv6 hardware checksum offload
3372 * If the packet is outbound, is originated locally, is flagged for
3373 * delayed UDP/TCP checksum calculation, and is about to be processed
3374 * for an interface that doesn't support the appropriate checksum
3375 * offloading, then calculated the checksum here so that PF can adjust
3378 if (!input
&& (*mp
)->m_pkthdr
.rcvif
== NULL
) {
3379 static const int mask
= CSUM_DELAY_DATA
;
3380 const int flags
= (*mp
)->m_pkthdr
.csum_flags
&
3381 ~IF_HWASSIST_CSUM_FLAGS(ifp
->if_hwassist
);
3384 in6_delayed_cksum(*mp
);
3385 (*mp
)->m_pkthdr
.csum_flags
&= ~mask
;
3390 if (pf_test6(input
? PF_IN
: PF_OUT
, ifp
, mp
, NULL
) != PF_PASS
) {
3394 error
= EHOSTUNREACH
;
3404 pf_ifaddr_hook(struct ifnet
*ifp
, unsigned long cmd
)
3406 lck_rw_lock_shared(pf_perim_lock
);
3407 if (!pf_hooks_attached
)
3410 lck_mtx_lock(pf_lock
);
3417 case SIOCAIFADDR_IN6
:
3418 case SIOCDIFADDR_IN6
:
3420 if (ifp
->if_pf_kif
!= NULL
)
3421 pfi_kifaddr_update(ifp
->if_pf_kif
);
3424 panic("%s: unexpected ioctl %lu", __func__
, cmd
);
3428 lck_mtx_unlock(pf_lock
);
3430 lck_rw_done(pf_perim_lock
);
3435 * Caller acquires dlil lock as writer (exclusive)
3438 pf_ifnet_hook(struct ifnet
*ifp
, int attach
)
3440 lck_rw_lock_shared(pf_perim_lock
);
3441 if (!pf_hooks_attached
)
3444 lck_mtx_lock(pf_lock
);
3446 pfi_attach_ifnet(ifp
);
3448 pfi_detach_ifnet(ifp
);
3449 lck_mtx_unlock(pf_lock
);
3451 lck_rw_done(pf_perim_lock
);
3455 pf_attach_hooks(void)
3459 if (pf_hooks_attached
)
3462 ifnet_head_lock_shared();
3463 for (i
= 0; i
<= if_index
; i
++) {
3464 struct ifnet
*ifp
= ifindex2ifnet
[i
];
3466 pfi_attach_ifnet(ifp
);
3470 pf_hooks_attached
= 1;
3474 pf_detach_hooks(void)
3478 if (!pf_hooks_attached
)
3481 ifnet_head_lock_shared();
3482 for (i
= 0; i
<= if_index
; i
++) {
3483 struct ifnet
*ifp
= ifindex2ifnet
[i
];
3484 if (ifp
!= NULL
&& ifp
->if_pf_kif
!= NULL
) {
3485 pfi_detach_ifnet(ifp
);
3489 pf_hooks_attached
= 0;