2 * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */
30 /* $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * - Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * - Redistributions in binary form must reproduce the above
44 * copyright notice, this list of conditions and the following
45 * disclaimer in the documentation and/or other materials provided
46 * with the distribution.
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
71 #include <sys/filio.h>
72 #include <sys/fcntl.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/kernel.h>
77 #include <sys/proc_internal.h>
78 #include <sys/malloc.h>
79 #include <sys/kauth.h>
81 #include <sys/mcache.h>
83 #include <mach/vm_param.h>
86 #include <net/if_types.h>
87 #include <net/route.h>
89 #include <netinet/in.h>
90 #include <netinet/in_var.h>
91 #include <netinet/in_systm.h>
92 #include <netinet/ip.h>
93 #include <netinet/ip_var.h>
94 #include <netinet/ip_icmp.h>
95 #include <netinet/if_ether.h>
97 #include <libkern/crypto/md5.h>
99 #include <miscfs/devfs/devfs.h>
101 #include <net/pfvar.h>
104 #include <net/if_pfsync.h>
108 #include <net/if_pflog.h>
112 #include <netinet/ip6.h>
113 #include <netinet/in_pcb.h>
117 #include <altq/altq.h>
121 static void pfdetach(void);
123 static int pfopen(dev_t
, int, int, struct proc
*);
124 static int pfclose(dev_t
, int, int, struct proc
*);
125 static int pfioctl(dev_t
, u_long
, caddr_t
, int, struct proc
*);
126 static struct pf_pool
*pf_get_pool(char *, u_int32_t
, u_int8_t
, u_int32_t
,
127 u_int8_t
, u_int8_t
, u_int8_t
);
129 static void pf_mv_pool(struct pf_palist
*, struct pf_palist
*);
130 static void pf_empty_pool(struct pf_palist
*);
132 static int pf_begin_altq(u_int32_t
*);
133 static int pf_rollback_altq(u_int32_t
);
134 static int pf_commit_altq(u_int32_t
);
135 static int pf_enable_altq(struct pf_altq
*);
136 static int pf_disable_altq(struct pf_altq
*);
138 static int pf_begin_rules(u_int32_t
*, int, const char *);
139 static int pf_rollback_rules(u_int32_t
, int, char *);
140 static int pf_setup_pfsync_matching(struct pf_ruleset
*);
141 static void pf_hash_rule(MD5_CTX
*, struct pf_rule
*);
142 #ifndef NO_APPLE_EXTENSIONS
143 static void pf_hash_rule_addr(MD5_CTX
*, struct pf_rule_addr
*, u_int8_t
);
145 static void pf_hash_rule_addr(MD5_CTX
*, struct pf_rule_addr
*);
147 static int pf_commit_rules(u_int32_t
, int, char *);
148 static void pf_state_export(struct pfsync_state
*, struct pf_state_key
*,
150 static void pf_state_import(struct pfsync_state
*, struct pf_state_key
*,
153 #define PF_CDEV_MAJOR (-1)
155 static struct cdevsw pf_cdevsw
= {
158 /* read */ eno_rdwrt
,
159 /* write */ eno_rdwrt
,
162 /* reset */ eno_reset
,
164 /* select */ eno_select
,
166 /* strategy */ eno_strat
,
172 static void pf_attach_hooks(void);
174 /* currently unused along with pfdetach() */
175 static void pf_detach_hooks(void);
179 * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer,
180 * and used in pf_af_hook() for performance optimization, such that packets
181 * will enter pf_test() or pf_test6() only when PF is running.
183 static int pf_is_enabled
;
185 struct pf_rule pf_default_rule
;
187 static int pf_altq_running
;
190 #define TAGID_MAX 50000
191 static TAILQ_HEAD(pf_tags
, pf_tagname
) pf_tags
=
192 TAILQ_HEAD_INITIALIZER(pf_tags
);
194 static TAILQ_HEAD(pf_tags
, pf_tagname
) pf_qids
=
195 TAILQ_HEAD_INITIALIZER(pf_qids
);
198 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
199 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
201 static u_int16_t
tagname2tag(struct pf_tags
*, char *);
202 static void tag2tagname(struct pf_tags
*, u_int16_t
, char *);
203 static void tag_unref(struct pf_tags
*, u_int16_t
);
204 static int pf_rtlabel_add(struct pf_addr_wrap
*);
205 static void pf_rtlabel_remove(struct pf_addr_wrap
*);
206 static void pf_rtlabel_copyout(struct pf_addr_wrap
*);
209 static int pf_inet_hook(struct ifnet
*, struct mbuf
**, int);
212 static int pf_inet6_hook(struct ifnet
*, struct mbuf
**, int);
215 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
217 #define PF_USER_ADDR(a, s, f) \
218 (proc_is64bit(current_proc()) ? \
219 ((struct s##_64 *)a)->f : ((struct s##_32 *)a)->f)
221 static lck_attr_t
*pf_perim_lock_attr
;
222 static lck_grp_t
*pf_perim_lock_grp
;
223 static lck_grp_attr_t
*pf_perim_lock_grp_attr
;
225 static lck_attr_t
*pf_lock_attr
;
226 static lck_grp_t
*pf_lock_grp
;
227 static lck_grp_attr_t
*pf_lock_grp_attr
;
229 struct thread
*pf_purge_thread
;
231 extern void pfi_kifaddr_update(void *);
236 u_int32_t
*t
= pf_default_rule
.timeout
;
239 pf_perim_lock_grp_attr
= lck_grp_attr_alloc_init();
240 pf_perim_lock_grp
= lck_grp_alloc_init("pf_perim",
241 pf_perim_lock_grp_attr
);
242 pf_perim_lock_attr
= lck_attr_alloc_init();
243 pf_perim_lock
= lck_rw_alloc_init(pf_perim_lock_grp
,
246 pf_lock_grp_attr
= lck_grp_attr_alloc_init();
247 pf_lock_grp
= lck_grp_alloc_init("pf", pf_lock_grp_attr
);
248 pf_lock_attr
= lck_attr_alloc_init();
249 pf_lock
= lck_mtx_alloc_init(pf_lock_grp
, pf_lock_attr
);
251 pool_init(&pf_rule_pl
, sizeof (struct pf_rule
), 0, 0, 0, "pfrulepl",
253 pool_init(&pf_src_tree_pl
, sizeof (struct pf_src_node
), 0, 0, 0,
255 pool_init(&pf_state_pl
, sizeof (struct pf_state
), 0, 0, 0, "pfstatepl",
257 pool_init(&pf_state_key_pl
, sizeof (struct pf_state_key
), 0, 0, 0,
258 "pfstatekeypl", NULL
);
259 #ifndef NO_APPLE_EXTENSIONS
260 pool_init(&pf_app_state_pl
, sizeof (struct pf_app_state
), 0, 0, 0,
261 "pfappstatepl", NULL
);
264 pool_init(&pf_altq_pl
, sizeof (struct pf_altq
), 0, 0, 0, "pfaltqpl",
267 pool_init(&pf_pooladdr_pl
, sizeof (struct pf_pooladdr
), 0, 0, 0,
268 "pfpooladdrpl", NULL
);
271 pf_osfp_initialize();
273 pool_sethardlimit(pf_pool_limits
[PF_LIMIT_STATES
].pp
,
274 pf_pool_limits
[PF_LIMIT_STATES
].limit
, NULL
, 0);
276 if (max_mem
<= 256*1024*1024)
277 pf_pool_limits
[PF_LIMIT_TABLE_ENTRIES
].limit
=
278 PFR_KENTRY_HIWAT_SMALL
;
280 RB_INIT(&tree_src_tracking
);
281 RB_INIT(&pf_anchors
);
282 pf_init_ruleset(&pf_main_ruleset
);
283 TAILQ_INIT(&pf_pabuf
);
284 TAILQ_INIT(&state_list
);
286 TAILQ_INIT(&pf_altqs
[0]);
287 TAILQ_INIT(&pf_altqs
[1]);
288 pf_altqs_active
= &pf_altqs
[0];
289 pf_altqs_inactive
= &pf_altqs
[1];
292 /* default rule should never be garbage collected */
293 pf_default_rule
.entries
.tqe_prev
= &pf_default_rule
.entries
.tqe_next
;
294 pf_default_rule
.action
= PF_PASS
;
295 pf_default_rule
.nr
= -1;
296 pf_default_rule
.rtableid
= IFSCOPE_NONE
;
298 /* initialize default timeouts */
299 t
[PFTM_TCP_FIRST_PACKET
] = PFTM_TCP_FIRST_PACKET_VAL
;
300 t
[PFTM_TCP_OPENING
] = PFTM_TCP_OPENING_VAL
;
301 t
[PFTM_TCP_ESTABLISHED
] = PFTM_TCP_ESTABLISHED_VAL
;
302 t
[PFTM_TCP_CLOSING
] = PFTM_TCP_CLOSING_VAL
;
303 t
[PFTM_TCP_FIN_WAIT
] = PFTM_TCP_FIN_WAIT_VAL
;
304 t
[PFTM_TCP_CLOSED
] = PFTM_TCP_CLOSED_VAL
;
305 t
[PFTM_UDP_FIRST_PACKET
] = PFTM_UDP_FIRST_PACKET_VAL
;
306 t
[PFTM_UDP_SINGLE
] = PFTM_UDP_SINGLE_VAL
;
307 t
[PFTM_UDP_MULTIPLE
] = PFTM_UDP_MULTIPLE_VAL
;
308 t
[PFTM_ICMP_FIRST_PACKET
] = PFTM_ICMP_FIRST_PACKET_VAL
;
309 t
[PFTM_ICMP_ERROR_REPLY
] = PFTM_ICMP_ERROR_REPLY_VAL
;
310 #ifndef NO_APPLE_EXTENSIONS
311 t
[PFTM_GREv1_FIRST_PACKET
] = PFTM_GREv1_FIRST_PACKET_VAL
;
312 t
[PFTM_GREv1_INITIATING
] = PFTM_GREv1_INITIATING_VAL
;
313 t
[PFTM_GREv1_ESTABLISHED
] = PFTM_GREv1_ESTABLISHED_VAL
;
314 t
[PFTM_ESP_FIRST_PACKET
] = PFTM_ESP_FIRST_PACKET_VAL
;
315 t
[PFTM_ESP_INITIATING
] = PFTM_ESP_INITIATING_VAL
;
316 t
[PFTM_ESP_ESTABLISHED
] = PFTM_ESP_ESTABLISHED_VAL
;
318 t
[PFTM_OTHER_FIRST_PACKET
] = PFTM_OTHER_FIRST_PACKET_VAL
;
319 t
[PFTM_OTHER_SINGLE
] = PFTM_OTHER_SINGLE_VAL
;
320 t
[PFTM_OTHER_MULTIPLE
] = PFTM_OTHER_MULTIPLE_VAL
;
321 t
[PFTM_FRAG
] = PFTM_FRAG_VAL
;
322 t
[PFTM_INTERVAL
] = PFTM_INTERVAL_VAL
;
323 t
[PFTM_SRC_NODE
] = PFTM_SRC_NODE_VAL
;
324 t
[PFTM_TS_DIFF
] = PFTM_TS_DIFF_VAL
;
325 t
[PFTM_ADAPTIVE_START
] = PFSTATE_ADAPT_START
;
326 t
[PFTM_ADAPTIVE_END
] = PFSTATE_ADAPT_END
;
329 bzero(&pf_status
, sizeof (pf_status
));
330 pf_status
.debug
= PF_DEBUG_URGENT
;
332 /* XXX do our best to avoid a conflict */
333 pf_status
.hostid
= random();
335 if (kernel_thread_start(pf_purge_thread_fn
, NULL
,
336 &pf_purge_thread
) != 0) {
337 printf("%s: unable to start purge thread!", __func__
);
341 maj
= cdevsw_add(PF_CDEV_MAJOR
, &pf_cdevsw
);
343 printf("%s: failed to allocate major number!\n", __func__
);
346 (void) devfs_make_node(makedev(maj
, 0), DEVFS_CHAR
,
347 UID_ROOT
, GID_WHEEL
, 0600, "pf", 0);
356 struct pf_anchor
*anchor
;
357 struct pf_state
*state
;
358 struct pf_src_node
*node
;
359 struct pfioc_table pt
;
366 pf_status
.running
= 0;
367 wakeup(pf_purge_thread_fn
);
369 /* clear the rulesets */
370 for (i
= 0; i
< PF_RULESET_MAX
; i
++)
371 if (pf_begin_rules(&ticket
, i
, &r
) == 0)
372 pf_commit_rules(ticket
, i
, &r
);
374 if (pf_begin_altq(&ticket
) == 0)
375 pf_commit_altq(ticket
);
379 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
380 state
->timeout
= PFTM_PURGE
;
382 state
->sync_flags
= PFSTATE_NOSYNC
;
385 pf_purge_expired_states(pf_status
.states
);
388 pfsync_clear_states(pf_status
.hostid
, NULL
);
391 /* clear source nodes */
392 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
393 state
->src_node
= NULL
;
394 state
->nat_src_node
= NULL
;
396 RB_FOREACH(node
, pf_src_tree
, &tree_src_tracking
) {
400 pf_purge_expired_src_nodes();
403 memset(&pt
, '\0', sizeof (pt
));
404 pfr_clr_tables(&pt
.pfrio_table
, &pt
.pfrio_ndel
, pt
.pfrio_flags
);
406 /* destroy anchors */
407 while ((anchor
= RB_MIN(pf_anchor_global
, &pf_anchors
)) != NULL
) {
408 for (i
= 0; i
< PF_RULESET_MAX
; i
++)
409 if (pf_begin_rules(&ticket
, i
, anchor
->name
) == 0)
410 pf_commit_rules(ticket
, i
, anchor
->name
);
413 /* destroy main ruleset */
414 pf_remove_if_empty_ruleset(&pf_main_ruleset
);
416 /* destroy the pools */
417 pool_destroy(&pf_pooladdr_pl
);
419 pool_destroy(&pf_altq_pl
);
421 pool_destroy(&pf_state_pl
);
422 pool_destroy(&pf_rule_pl
);
423 pool_destroy(&pf_src_tree_pl
);
425 /* destroy subsystems */
426 pf_normalize_destroy();
434 pfopen(dev_t dev
, int flags
, int fmt
, struct proc
*p
)
436 #pragma unused(flags, fmt, p)
443 pfclose(dev_t dev
, int flags
, int fmt
, struct proc
*p
)
445 #pragma unused(flags, fmt, p)
451 static struct pf_pool
*
452 pf_get_pool(char *anchor
, u_int32_t ticket
, u_int8_t rule_action
,
453 u_int32_t rule_number
, u_int8_t r_last
, u_int8_t active
,
454 u_int8_t check_ticket
)
456 struct pf_ruleset
*ruleset
;
457 struct pf_rule
*rule
;
460 ruleset
= pf_find_ruleset(anchor
);
463 rs_num
= pf_get_ruleset_number(rule_action
);
464 if (rs_num
>= PF_RULESET_MAX
)
467 if (check_ticket
&& ticket
!=
468 ruleset
->rules
[rs_num
].active
.ticket
)
471 rule
= TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
,
474 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
476 if (check_ticket
&& ticket
!=
477 ruleset
->rules
[rs_num
].inactive
.ticket
)
480 rule
= TAILQ_LAST(ruleset
->rules
[rs_num
].inactive
.ptr
,
483 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].inactive
.ptr
);
486 while ((rule
!= NULL
) && (rule
->nr
!= rule_number
))
487 rule
= TAILQ_NEXT(rule
, entries
);
492 return (&rule
->rpool
);
496 pf_mv_pool(struct pf_palist
*poola
, struct pf_palist
*poolb
)
498 struct pf_pooladdr
*mv_pool_pa
;
500 while ((mv_pool_pa
= TAILQ_FIRST(poola
)) != NULL
) {
501 TAILQ_REMOVE(poola
, mv_pool_pa
, entries
);
502 TAILQ_INSERT_TAIL(poolb
, mv_pool_pa
, entries
);
507 pf_empty_pool(struct pf_palist
*poola
)
509 struct pf_pooladdr
*empty_pool_pa
;
511 while ((empty_pool_pa
= TAILQ_FIRST(poola
)) != NULL
) {
512 pfi_dynaddr_remove(&empty_pool_pa
->addr
);
513 pf_tbladdr_remove(&empty_pool_pa
->addr
);
514 pfi_kif_unref(empty_pool_pa
->kif
, PFI_KIF_REF_RULE
);
515 TAILQ_REMOVE(poola
, empty_pool_pa
, entries
);
516 pool_put(&pf_pooladdr_pl
, empty_pool_pa
);
521 pf_rm_rule(struct pf_rulequeue
*rulequeue
, struct pf_rule
*rule
)
523 if (rulequeue
!= NULL
) {
524 if (rule
->states
<= 0) {
526 * XXX - we need to remove the table *before* detaching
527 * the rule to make sure the table code does not delete
528 * the anchor under our feet.
530 pf_tbladdr_remove(&rule
->src
.addr
);
531 pf_tbladdr_remove(&rule
->dst
.addr
);
532 if (rule
->overload_tbl
)
533 pfr_detach_table(rule
->overload_tbl
);
535 TAILQ_REMOVE(rulequeue
, rule
, entries
);
536 rule
->entries
.tqe_prev
= NULL
;
540 if (rule
->states
> 0 || rule
->src_nodes
> 0 ||
541 rule
->entries
.tqe_prev
!= NULL
)
543 pf_tag_unref(rule
->tag
);
544 pf_tag_unref(rule
->match_tag
);
546 if (rule
->pqid
!= rule
->qid
)
547 pf_qid_unref(rule
->pqid
);
548 pf_qid_unref(rule
->qid
);
550 pf_rtlabel_remove(&rule
->src
.addr
);
551 pf_rtlabel_remove(&rule
->dst
.addr
);
552 pfi_dynaddr_remove(&rule
->src
.addr
);
553 pfi_dynaddr_remove(&rule
->dst
.addr
);
554 if (rulequeue
== NULL
) {
555 pf_tbladdr_remove(&rule
->src
.addr
);
556 pf_tbladdr_remove(&rule
->dst
.addr
);
557 if (rule
->overload_tbl
)
558 pfr_detach_table(rule
->overload_tbl
);
560 pfi_kif_unref(rule
->kif
, PFI_KIF_REF_RULE
);
561 pf_anchor_remove(rule
);
562 pf_empty_pool(&rule
->rpool
.list
);
563 pool_put(&pf_rule_pl
, rule
);
567 tagname2tag(struct pf_tags
*head
, char *tagname
)
569 struct pf_tagname
*tag
, *p
= NULL
;
570 u_int16_t new_tagid
= 1;
572 TAILQ_FOREACH(tag
, head
, entries
)
573 if (strcmp(tagname
, tag
->name
) == 0) {
579 * to avoid fragmentation, we do a linear search from the beginning
580 * and take the first free slot we find. if there is none or the list
581 * is empty, append a new entry at the end.
585 if (!TAILQ_EMPTY(head
))
586 for (p
= TAILQ_FIRST(head
); p
!= NULL
&&
587 p
->tag
== new_tagid
; p
= TAILQ_NEXT(p
, entries
))
588 new_tagid
= p
->tag
+ 1;
590 if (new_tagid
> TAGID_MAX
)
593 /* allocate and fill new struct pf_tagname */
594 tag
= _MALLOC(sizeof (*tag
), M_TEMP
, M_WAITOK
|M_ZERO
);
597 strlcpy(tag
->name
, tagname
, sizeof (tag
->name
));
598 tag
->tag
= new_tagid
;
601 if (p
!= NULL
) /* insert new entry before p */
602 TAILQ_INSERT_BEFORE(p
, tag
, entries
);
603 else /* either list empty or no free slot in between */
604 TAILQ_INSERT_TAIL(head
, tag
, entries
);
610 tag2tagname(struct pf_tags
*head
, u_int16_t tagid
, char *p
)
612 struct pf_tagname
*tag
;
614 TAILQ_FOREACH(tag
, head
, entries
)
615 if (tag
->tag
== tagid
) {
616 strlcpy(p
, tag
->name
, PF_TAG_NAME_SIZE
);
622 tag_unref(struct pf_tags
*head
, u_int16_t tag
)
624 struct pf_tagname
*p
, *next
;
629 for (p
= TAILQ_FIRST(head
); p
!= NULL
; p
= next
) {
630 next
= TAILQ_NEXT(p
, entries
);
633 TAILQ_REMOVE(head
, p
, entries
);
642 pf_tagname2tag(char *tagname
)
644 return (tagname2tag(&pf_tags
, tagname
));
648 pf_tag2tagname(u_int16_t tagid
, char *p
)
650 tag2tagname(&pf_tags
, tagid
, p
);
654 pf_tag_ref(u_int16_t tag
)
656 struct pf_tagname
*t
;
658 TAILQ_FOREACH(t
, &pf_tags
, entries
)
666 pf_tag_unref(u_int16_t tag
)
668 tag_unref(&pf_tags
, tag
);
672 pf_rtlabel_add(struct pf_addr_wrap
*a
)
679 pf_rtlabel_remove(struct pf_addr_wrap
*a
)
685 pf_rtlabel_copyout(struct pf_addr_wrap
*a
)
692 pf_qname2qid(char *qname
)
694 return ((u_int32_t
)tagname2tag(&pf_qids
, qname
));
698 pf_qid2qname(u_int32_t qid
, char *p
)
700 tag2tagname(&pf_qids
, (u_int16_t
)qid
, p
);
704 pf_qid_unref(u_int32_t qid
)
706 tag_unref(&pf_qids
, (u_int16_t
)qid
);
710 pf_begin_altq(u_int32_t
*ticket
)
712 struct pf_altq
*altq
;
715 /* Purge the old altq list */
716 while ((altq
= TAILQ_FIRST(pf_altqs_inactive
)) != NULL
) {
717 TAILQ_REMOVE(pf_altqs_inactive
, altq
, entries
);
718 if (altq
->qname
[0] == 0) {
719 /* detach and destroy the discipline */
720 error
= altq_remove(altq
);
722 pf_qid_unref(altq
->qid
);
723 pool_put(&pf_altq_pl
, altq
);
727 *ticket
= ++ticket_altqs_inactive
;
728 altqs_inactive_open
= 1;
733 pf_rollback_altq(u_int32_t ticket
)
735 struct pf_altq
*altq
;
738 if (!altqs_inactive_open
|| ticket
!= ticket_altqs_inactive
)
740 /* Purge the old altq list */
741 while ((altq
= TAILQ_FIRST(pf_altqs_inactive
)) != NULL
) {
742 TAILQ_REMOVE(pf_altqs_inactive
, altq
, entries
);
743 if (altq
->qname
[0] == 0) {
744 /* detach and destroy the discipline */
745 error
= altq_remove(altq
);
747 pf_qid_unref(altq
->qid
);
748 pool_put(&pf_altq_pl
, altq
);
750 altqs_inactive_open
= 0;
755 pf_commit_altq(u_int32_t ticket
)
757 struct pf_altqqueue
*old_altqs
;
758 struct pf_altq
*altq
;
759 int s
, err
, error
= 0;
761 if (!altqs_inactive_open
|| ticket
!= ticket_altqs_inactive
)
764 /* swap altqs, keep the old. */
766 old_altqs
= pf_altqs_active
;
767 pf_altqs_active
= pf_altqs_inactive
;
768 pf_altqs_inactive
= old_altqs
;
769 ticket_altqs_active
= ticket_altqs_inactive
;
771 /* Attach new disciplines */
772 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
) {
773 if (altq
->qname
[0] == 0) {
774 /* attach the discipline */
775 error
= altq_pfattach(altq
);
776 if (error
== 0 && pf_altq_running
)
777 error
= pf_enable_altq(altq
);
785 /* Purge the old altq list */
786 while ((altq
= TAILQ_FIRST(pf_altqs_inactive
)) != NULL
) {
787 TAILQ_REMOVE(pf_altqs_inactive
, altq
, entries
);
788 if (altq
->qname
[0] == 0) {
789 /* detach and destroy the discipline */
791 error
= pf_disable_altq(altq
);
792 err
= altq_pfdetach(altq
);
793 if (err
!= 0 && error
== 0)
795 err
= altq_remove(altq
);
796 if (err
!= 0 && error
== 0)
799 pf_qid_unref(altq
->qid
);
800 pool_put(&pf_altq_pl
, altq
);
804 altqs_inactive_open
= 0;
809 pf_enable_altq(struct pf_altq
*altq
)
812 struct tb_profile tb
;
815 if ((ifp
= ifunit(altq
->ifname
)) == NULL
)
818 if (ifp
->if_snd
.altq_type
!= ALTQT_NONE
)
819 error
= altq_enable(&ifp
->if_snd
);
821 /* set tokenbucket regulator */
822 if (error
== 0 && ifp
!= NULL
&& ALTQ_IS_ENABLED(&ifp
->if_snd
)) {
823 tb
.rate
= altq
->ifbandwidth
;
824 tb
.depth
= altq
->tbrsize
;
826 error
= tbr_set(&ifp
->if_snd
, &tb
);
834 pf_disable_altq(struct pf_altq
*altq
)
837 struct tb_profile tb
;
840 if ((ifp
= ifunit(altq
->ifname
)) == NULL
)
844 * when the discipline is no longer referenced, it was overridden
845 * by a new one. if so, just return.
847 if (altq
->altq_disc
!= ifp
->if_snd
.altq_disc
)
850 error
= altq_disable(&ifp
->if_snd
);
853 /* clear tokenbucket regulator */
856 error
= tbr_set(&ifp
->if_snd
, &tb
);
865 pf_begin_rules(u_int32_t
*ticket
, int rs_num
, const char *anchor
)
867 struct pf_ruleset
*rs
;
868 struct pf_rule
*rule
;
870 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
872 rs
= pf_find_or_create_ruleset(anchor
);
875 while ((rule
= TAILQ_FIRST(rs
->rules
[rs_num
].inactive
.ptr
)) != NULL
) {
876 pf_rm_rule(rs
->rules
[rs_num
].inactive
.ptr
, rule
);
877 rs
->rules
[rs_num
].inactive
.rcount
--;
879 *ticket
= ++rs
->rules
[rs_num
].inactive
.ticket
;
880 rs
->rules
[rs_num
].inactive
.open
= 1;
885 pf_rollback_rules(u_int32_t ticket
, int rs_num
, char *anchor
)
887 struct pf_ruleset
*rs
;
888 struct pf_rule
*rule
;
890 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
892 rs
= pf_find_ruleset(anchor
);
893 if (rs
== NULL
|| !rs
->rules
[rs_num
].inactive
.open
||
894 rs
->rules
[rs_num
].inactive
.ticket
!= ticket
)
896 while ((rule
= TAILQ_FIRST(rs
->rules
[rs_num
].inactive
.ptr
)) != NULL
) {
897 pf_rm_rule(rs
->rules
[rs_num
].inactive
.ptr
, rule
);
898 rs
->rules
[rs_num
].inactive
.rcount
--;
900 rs
->rules
[rs_num
].inactive
.open
= 0;
904 #define PF_MD5_UPD(st, elm) \
905 MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm))
907 #define PF_MD5_UPD_STR(st, elm) \
908 MD5Update(ctx, (u_int8_t *)(st)->elm, strlen((st)->elm))
910 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
911 (stor) = htonl((st)->elm); \
912 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t)); \
915 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
916 (stor) = htons((st)->elm); \
917 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t)); \
920 #ifndef NO_APPLE_EXTENSIONS
922 pf_hash_rule_addr(MD5_CTX
*ctx
, struct pf_rule_addr
*pfr
, u_int8_t proto
)
925 pf_hash_rule_addr(MD5_CTX
*ctx
, struct pf_rule_addr
*pfr
)
928 PF_MD5_UPD(pfr
, addr
.type
);
929 switch (pfr
->addr
.type
) {
930 case PF_ADDR_DYNIFTL
:
931 PF_MD5_UPD(pfr
, addr
.v
.ifname
);
932 PF_MD5_UPD(pfr
, addr
.iflags
);
935 PF_MD5_UPD(pfr
, addr
.v
.tblname
);
937 case PF_ADDR_ADDRMASK
:
939 PF_MD5_UPD(pfr
, addr
.v
.a
.addr
.addr32
);
940 PF_MD5_UPD(pfr
, addr
.v
.a
.mask
.addr32
);
942 case PF_ADDR_RTLABEL
:
943 PF_MD5_UPD(pfr
, addr
.v
.rtlabelname
);
947 #ifndef NO_APPLE_EXTENSIONS
951 PF_MD5_UPD(pfr
, xport
.range
.port
[0]);
952 PF_MD5_UPD(pfr
, xport
.range
.port
[1]);
953 PF_MD5_UPD(pfr
, xport
.range
.op
);
960 PF_MD5_UPD(pfr
, neg
);
962 PF_MD5_UPD(pfr
, port
[0]);
963 PF_MD5_UPD(pfr
, port
[1]);
964 PF_MD5_UPD(pfr
, neg
);
965 PF_MD5_UPD(pfr
, port_op
);
970 pf_hash_rule(MD5_CTX
*ctx
, struct pf_rule
*rule
)
975 #ifndef NO_APPLE_EXTENSIONS
976 pf_hash_rule_addr(ctx
, &rule
->src
, rule
->proto
);
977 pf_hash_rule_addr(ctx
, &rule
->dst
, rule
->proto
);
979 pf_hash_rule_addr(ctx
, &rule
->src
);
980 pf_hash_rule_addr(ctx
, &rule
->dst
);
982 PF_MD5_UPD_STR(rule
, label
);
983 PF_MD5_UPD_STR(rule
, ifname
);
984 PF_MD5_UPD_STR(rule
, match_tagname
);
985 PF_MD5_UPD_HTONS(rule
, match_tag
, x
); /* dup? */
986 PF_MD5_UPD_HTONL(rule
, os_fingerprint
, y
);
987 PF_MD5_UPD_HTONL(rule
, prob
, y
);
988 PF_MD5_UPD_HTONL(rule
, uid
.uid
[0], y
);
989 PF_MD5_UPD_HTONL(rule
, uid
.uid
[1], y
);
990 PF_MD5_UPD(rule
, uid
.op
);
991 PF_MD5_UPD_HTONL(rule
, gid
.gid
[0], y
);
992 PF_MD5_UPD_HTONL(rule
, gid
.gid
[1], y
);
993 PF_MD5_UPD(rule
, gid
.op
);
994 PF_MD5_UPD_HTONL(rule
, rule_flag
, y
);
995 PF_MD5_UPD(rule
, action
);
996 PF_MD5_UPD(rule
, direction
);
997 PF_MD5_UPD(rule
, af
);
998 PF_MD5_UPD(rule
, quick
);
999 PF_MD5_UPD(rule
, ifnot
);
1000 PF_MD5_UPD(rule
, match_tag_not
);
1001 PF_MD5_UPD(rule
, natpass
);
1002 PF_MD5_UPD(rule
, keep_state
);
1003 PF_MD5_UPD(rule
, proto
);
1004 PF_MD5_UPD(rule
, type
);
1005 PF_MD5_UPD(rule
, code
);
1006 PF_MD5_UPD(rule
, flags
);
1007 PF_MD5_UPD(rule
, flagset
);
1008 PF_MD5_UPD(rule
, allow_opts
);
1009 PF_MD5_UPD(rule
, rt
);
1010 PF_MD5_UPD(rule
, tos
);
1014 pf_commit_rules(u_int32_t ticket
, int rs_num
, char *anchor
)
1016 struct pf_ruleset
*rs
;
1017 struct pf_rule
*rule
, **old_array
;
1018 struct pf_rulequeue
*old_rules
;
1020 u_int32_t old_rcount
;
1022 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1024 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
1026 rs
= pf_find_ruleset(anchor
);
1027 if (rs
== NULL
|| !rs
->rules
[rs_num
].inactive
.open
||
1028 ticket
!= rs
->rules
[rs_num
].inactive
.ticket
)
1031 /* Calculate checksum for the main ruleset */
1032 if (rs
== &pf_main_ruleset
) {
1033 error
= pf_setup_pfsync_matching(rs
);
1038 /* Swap rules, keep the old. */
1039 old_rules
= rs
->rules
[rs_num
].active
.ptr
;
1040 old_rcount
= rs
->rules
[rs_num
].active
.rcount
;
1041 old_array
= rs
->rules
[rs_num
].active
.ptr_array
;
1043 rs
->rules
[rs_num
].active
.ptr
=
1044 rs
->rules
[rs_num
].inactive
.ptr
;
1045 rs
->rules
[rs_num
].active
.ptr_array
=
1046 rs
->rules
[rs_num
].inactive
.ptr_array
;
1047 rs
->rules
[rs_num
].active
.rcount
=
1048 rs
->rules
[rs_num
].inactive
.rcount
;
1049 rs
->rules
[rs_num
].inactive
.ptr
= old_rules
;
1050 rs
->rules
[rs_num
].inactive
.ptr_array
= old_array
;
1051 rs
->rules
[rs_num
].inactive
.rcount
= old_rcount
;
1053 rs
->rules
[rs_num
].active
.ticket
=
1054 rs
->rules
[rs_num
].inactive
.ticket
;
1055 pf_calc_skip_steps(rs
->rules
[rs_num
].active
.ptr
);
1058 /* Purge the old rule list. */
1059 while ((rule
= TAILQ_FIRST(old_rules
)) != NULL
)
1060 pf_rm_rule(old_rules
, rule
);
1061 if (rs
->rules
[rs_num
].inactive
.ptr_array
)
1062 _FREE(rs
->rules
[rs_num
].inactive
.ptr_array
, M_TEMP
);
1063 rs
->rules
[rs_num
].inactive
.ptr_array
= NULL
;
1064 rs
->rules
[rs_num
].inactive
.rcount
= 0;
1065 rs
->rules
[rs_num
].inactive
.open
= 0;
1066 pf_remove_if_empty_ruleset(rs
);
1071 pf_state_export(struct pfsync_state
*sp
, struct pf_state_key
*sk
,
1074 uint64_t secs
= pf_time_second();
1075 bzero(sp
, sizeof (struct pfsync_state
));
1077 /* copy from state key */
1078 #ifndef NO_APPLE_EXTENSIONS
1079 sp
->lan
.addr
= sk
->lan
.addr
;
1080 sp
->lan
.xport
= sk
->lan
.xport
;
1081 sp
->gwy
.addr
= sk
->gwy
.addr
;
1082 sp
->gwy
.xport
= sk
->gwy
.xport
;
1083 sp
->ext
.addr
= sk
->ext
.addr
;
1084 sp
->ext
.xport
= sk
->ext
.xport
;
1085 sp
->proto_variant
= sk
->proto_variant
;
1088 sp
->lan
.addr
= sk
->lan
.addr
;
1089 sp
->lan
.port
= sk
->lan
.port
;
1090 sp
->gwy
.addr
= sk
->gwy
.addr
;
1091 sp
->gwy
.port
= sk
->gwy
.port
;
1092 sp
->ext
.addr
= sk
->ext
.addr
;
1093 sp
->ext
.port
= sk
->ext
.port
;
1095 sp
->proto
= sk
->proto
;
1097 sp
->direction
= sk
->direction
;
1099 /* copy from state */
1100 memcpy(&sp
->id
, &s
->id
, sizeof (sp
->id
));
1101 sp
->creatorid
= s
->creatorid
;
1102 strlcpy(sp
->ifname
, s
->kif
->pfik_name
, sizeof (sp
->ifname
));
1103 pf_state_peer_to_pfsync(&s
->src
, &sp
->src
);
1104 pf_state_peer_to_pfsync(&s
->dst
, &sp
->dst
);
1106 sp
->rule
= s
->rule
.ptr
->nr
;
1107 sp
->nat_rule
= (s
->nat_rule
.ptr
== NULL
) ?
1108 (unsigned)-1 : s
->nat_rule
.ptr
->nr
;
1109 sp
->anchor
= (s
->anchor
.ptr
== NULL
) ?
1110 (unsigned)-1 : s
->anchor
.ptr
->nr
;
1112 pf_state_counter_to_pfsync(s
->bytes
[0], sp
->bytes
[0]);
1113 pf_state_counter_to_pfsync(s
->bytes
[1], sp
->bytes
[1]);
1114 pf_state_counter_to_pfsync(s
->packets
[0], sp
->packets
[0]);
1115 pf_state_counter_to_pfsync(s
->packets
[1], sp
->packets
[1]);
1116 sp
->creation
= secs
- s
->creation
;
1117 sp
->expire
= pf_state_expires(s
);
1119 sp
->allow_opts
= s
->allow_opts
;
1120 sp
->timeout
= s
->timeout
;
1123 sp
->sync_flags
|= PFSYNC_FLAG_SRCNODE
;
1124 if (s
->nat_src_node
)
1125 sp
->sync_flags
|= PFSYNC_FLAG_NATSRCNODE
;
1127 if (sp
->expire
> secs
)
1135 pf_state_import(struct pfsync_state
*sp
, struct pf_state_key
*sk
,
1138 /* copy to state key */
1139 #ifndef NO_APPLE_EXTENSIONS
1140 sk
->lan
.addr
= sp
->lan
.addr
;
1141 sk
->lan
.xport
= sp
->lan
.xport
;
1142 sk
->gwy
.addr
= sp
->gwy
.addr
;
1143 sk
->gwy
.xport
= sp
->gwy
.xport
;
1144 sk
->ext
.addr
= sp
->ext
.addr
;
1145 sk
->ext
.xport
= sp
->ext
.xport
;
1146 sk
->proto_variant
= sp
->proto_variant
;
1149 sk
->lan
.addr
= sp
->lan
.addr
;
1150 sk
->lan
.port
= sp
->lan
.port
;
1151 sk
->gwy
.addr
= sp
->gwy
.addr
;
1152 sk
->gwy
.port
= sp
->gwy
.port
;
1153 sk
->ext
.addr
= sp
->ext
.addr
;
1154 sk
->ext
.port
= sp
->ext
.port
;
1156 sk
->proto
= sp
->proto
;
1158 sk
->direction
= sp
->direction
;
1161 memcpy(&s
->id
, &sp
->id
, sizeof (sp
->id
));
1162 s
->creatorid
= sp
->creatorid
;
1163 pf_state_peer_from_pfsync(&sp
->src
, &s
->src
);
1164 pf_state_peer_from_pfsync(&sp
->dst
, &s
->dst
);
1166 s
->rule
.ptr
= &pf_default_rule
;
1167 s
->nat_rule
.ptr
= NULL
;
1168 s
->anchor
.ptr
= NULL
;
1170 s
->creation
= pf_time_second();
1171 s
->expire
= pf_time_second();
1173 s
->expire
-= pf_default_rule
.timeout
[sp
->timeout
] - sp
->expire
;
1175 s
->packets
[0] = s
->packets
[1] = 0;
1176 s
->bytes
[0] = s
->bytes
[1] = 0;
1180 pf_setup_pfsync_matching(struct pf_ruleset
*rs
)
1183 struct pf_rule
*rule
;
1185 u_int8_t digest
[PF_MD5_DIGEST_LENGTH
];
1188 for (rs_cnt
= 0; rs_cnt
< PF_RULESET_MAX
; rs_cnt
++) {
1189 /* XXX PF_RULESET_SCRUB as well? */
1190 if (rs_cnt
== PF_RULESET_SCRUB
)
1193 if (rs
->rules
[rs_cnt
].inactive
.ptr_array
)
1194 _FREE(rs
->rules
[rs_cnt
].inactive
.ptr_array
, M_TEMP
);
1195 rs
->rules
[rs_cnt
].inactive
.ptr_array
= NULL
;
1197 if (rs
->rules
[rs_cnt
].inactive
.rcount
) {
1198 rs
->rules
[rs_cnt
].inactive
.ptr_array
=
1199 _MALLOC(sizeof (caddr_t
) *
1200 rs
->rules
[rs_cnt
].inactive
.rcount
,
1203 if (!rs
->rules
[rs_cnt
].inactive
.ptr_array
)
1207 TAILQ_FOREACH(rule
, rs
->rules
[rs_cnt
].inactive
.ptr
,
1209 pf_hash_rule(&ctx
, rule
);
1210 (rs
->rules
[rs_cnt
].inactive
.ptr_array
)[rule
->nr
] = rule
;
1214 MD5Final(digest
, &ctx
);
1215 memcpy(pf_status
.pf_chksum
, digest
, sizeof (pf_status
.pf_chksum
));
1220 pfioctl(dev_t dev
, u_long cmd
, caddr_t addr
, int flags
, struct proc
*p
)
1223 struct pf_pooladdr
*pa
= NULL
;
1224 struct pf_pool
*pool
= NULL
;
1227 if (kauth_cred_issuser(kauth_cred_get()) == 0)
1230 /* XXX keep in sync with switch() below */
1231 if (securelevel
> 1)
1238 case DIOCSETSTATUSIF
:
1244 case DIOCGETTIMEOUT
:
1245 case DIOCCLRRULECTRS
:
1250 case DIOCGETRULESETS
:
1251 case DIOCGETRULESET
:
1252 case DIOCRGETTABLES
:
1253 case DIOCRGETTSTATS
:
1254 case DIOCRCLRTSTATS
:
1260 case DIOCRGETASTATS
:
1261 case DIOCRCLRASTATS
:
1264 case DIOCGETSRCNODES
:
1265 case DIOCCLRSRCNODES
:
1266 case DIOCIGETIFACES
:
1270 case DIOCRCLRTABLES
:
1271 case DIOCRADDTABLES
:
1272 case DIOCRDELTABLES
:
1273 case DIOCRSETTFLAGS
:
1274 if (((struct pfioc_table
*)addr
)->pfrio_flags
&
1276 break; /* dummy operation ok */
1282 if (!(flags
& FWRITE
))
1292 case DIOCGETTIMEOUT
:
1297 case DIOCGETRULESETS
:
1298 case DIOCGETRULESET
:
1300 case DIOCRGETTABLES
:
1301 case DIOCRGETTSTATS
:
1303 case DIOCRGETASTATS
:
1306 case DIOCGETSRCNODES
:
1307 case DIOCIGETIFACES
:
1309 case DIOCRCLRTABLES
:
1310 case DIOCRADDTABLES
:
1311 case DIOCRDELTABLES
:
1312 case DIOCRCLRTSTATS
:
1317 case DIOCRSETTFLAGS
:
1318 if (((struct pfioc_table
*)addr
)->pfrio_flags
&
1320 flags
|= FWRITE
; /* need write lock for dummy */
1321 break; /* dummy operation ok */
1325 if (((struct pfioc_rule
*)addr
)->action
==
1334 lck_rw_lock_exclusive(pf_perim_lock
);
1336 lck_rw_lock_shared(pf_perim_lock
);
1338 lck_mtx_lock(pf_lock
);
1343 if (pf_status
.running
) {
1345 } else if (pf_purge_thread
== NULL
) {
1349 pf_status
.running
= 1;
1350 pf_status
.since
= pf_calendar_time_second();
1351 if (pf_status
.stateid
== 0) {
1352 pf_status
.stateid
= pf_time_second();
1353 pf_status
.stateid
= pf_status
.stateid
<< 32;
1355 mbuf_growth_aggressive();
1356 wakeup(pf_purge_thread_fn
);
1357 DPFPRINTF(PF_DEBUG_MISC
, ("pf: started\n"));
1362 if (!pf_status
.running
) {
1365 mbuf_growth_normal();
1366 pf_status
.running
= 0;
1368 pf_status
.since
= pf_calendar_time_second();
1369 wakeup(pf_purge_thread_fn
);
1370 DPFPRINTF(PF_DEBUG_MISC
, ("pf: stopped\n"));
1375 struct pfioc_rule
*pr
= (struct pfioc_rule
*)addr
;
1376 struct pf_ruleset
*ruleset
;
1377 struct pf_rule
*rule
, *tail
;
1378 struct pf_pooladdr
*apa
;
1381 pr
->anchor
[sizeof (pr
->anchor
) - 1] = 0;
1382 ruleset
= pf_find_ruleset(pr
->anchor
);
1383 if (ruleset
== NULL
) {
1387 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
1388 if (rs_num
>= PF_RULESET_MAX
) {
1392 if (pr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
1396 if (pr
->ticket
!= ruleset
->rules
[rs_num
].inactive
.ticket
) {
1400 if (pr
->pool_ticket
!= ticket_pabuf
) {
1404 rule
= pool_get(&pf_rule_pl
, PR_WAITOK
);
1409 bcopy(&pr
->rule
, rule
, sizeof (struct pf_rule
));
1410 rule
->cuid
= kauth_cred_getuid(p
->p_ucred
);
1411 rule
->cpid
= p
->p_pid
;
1412 rule
->anchor
= NULL
;
1414 TAILQ_INIT(&rule
->rpool
.list
);
1415 /* initialize refcounting */
1417 rule
->src_nodes
= 0;
1418 rule
->entries
.tqe_prev
= NULL
;
1420 if (rule
->af
== AF_INET
) {
1421 pool_put(&pf_rule_pl
, rule
);
1422 error
= EAFNOSUPPORT
;
1427 if (rule
->af
== AF_INET6
) {
1428 pool_put(&pf_rule_pl
, rule
);
1429 error
= EAFNOSUPPORT
;
1433 tail
= TAILQ_LAST(ruleset
->rules
[rs_num
].inactive
.ptr
,
1436 rule
->nr
= tail
->nr
+ 1;
1439 if (rule
->ifname
[0]) {
1440 rule
->kif
= pfi_kif_get(rule
->ifname
);
1441 if (rule
->kif
== NULL
) {
1442 pool_put(&pf_rule_pl
, rule
);
1446 pfi_kif_ref(rule
->kif
, PFI_KIF_REF_RULE
);
1451 if (rule
->qname
[0] != 0) {
1452 if ((rule
->qid
= pf_qname2qid(rule
->qname
)) == 0)
1454 else if (rule
->pqname
[0] != 0) {
1456 pf_qname2qid(rule
->pqname
)) == 0)
1459 rule
->pqid
= rule
->qid
;
1462 if (rule
->tagname
[0])
1463 if ((rule
->tag
= pf_tagname2tag(rule
->tagname
)) == 0)
1465 if (rule
->match_tagname
[0])
1466 if ((rule
->match_tag
=
1467 pf_tagname2tag(rule
->match_tagname
)) == 0)
1469 if (rule
->rt
&& !rule
->direction
)
1474 if (rule
->logif
>= PFLOGIFS_MAX
)
1477 if (pf_rtlabel_add(&rule
->src
.addr
) ||
1478 pf_rtlabel_add(&rule
->dst
.addr
))
1480 if (pfi_dynaddr_setup(&rule
->src
.addr
, rule
->af
))
1482 if (pfi_dynaddr_setup(&rule
->dst
.addr
, rule
->af
))
1484 if (pf_tbladdr_setup(ruleset
, &rule
->src
.addr
))
1486 if (pf_tbladdr_setup(ruleset
, &rule
->dst
.addr
))
1488 if (pf_anchor_setup(rule
, ruleset
, pr
->anchor_call
))
1490 TAILQ_FOREACH(apa
, &pf_pabuf
, entries
)
1491 if (pf_tbladdr_setup(ruleset
, &apa
->addr
))
1494 if (rule
->overload_tblname
[0]) {
1495 if ((rule
->overload_tbl
= pfr_attach_table(ruleset
,
1496 rule
->overload_tblname
)) == NULL
)
1499 rule
->overload_tbl
->pfrkt_flags
|=
1503 pf_mv_pool(&pf_pabuf
, &rule
->rpool
.list
);
1504 if (((((rule
->action
== PF_NAT
) || (rule
->action
== PF_RDR
) ||
1505 (rule
->action
== PF_BINAT
)) && rule
->anchor
== NULL
) ||
1506 (rule
->rt
> PF_FASTROUTE
)) &&
1507 (TAILQ_FIRST(&rule
->rpool
.list
) == NULL
))
1511 pf_rm_rule(NULL
, rule
);
1514 rule
->rpool
.cur
= TAILQ_FIRST(&rule
->rpool
.list
);
1515 rule
->evaluations
= rule
->packets
[0] = rule
->packets
[1] =
1516 rule
->bytes
[0] = rule
->bytes
[1] = 0;
1517 TAILQ_INSERT_TAIL(ruleset
->rules
[rs_num
].inactive
.ptr
,
1519 ruleset
->rules
[rs_num
].inactive
.rcount
++;
1523 case DIOCGETRULES
: {
1524 struct pfioc_rule
*pr
= (struct pfioc_rule
*)addr
;
1525 struct pf_ruleset
*ruleset
;
1526 struct pf_rule
*tail
;
1529 pr
->anchor
[sizeof (pr
->anchor
) - 1] = 0;
1530 ruleset
= pf_find_ruleset(pr
->anchor
);
1531 if (ruleset
== NULL
) {
1535 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
1536 if (rs_num
>= PF_RULESET_MAX
) {
1540 tail
= TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
,
1543 pr
->nr
= tail
->nr
+ 1;
1546 pr
->ticket
= ruleset
->rules
[rs_num
].active
.ticket
;
1551 struct pfioc_rule
*pr
= (struct pfioc_rule
*)addr
;
1552 struct pf_ruleset
*ruleset
;
1553 struct pf_rule
*rule
;
1556 pr
->anchor
[sizeof (pr
->anchor
) - 1] = 0;
1557 ruleset
= pf_find_ruleset(pr
->anchor
);
1558 if (ruleset
== NULL
) {
1562 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
1563 if (rs_num
>= PF_RULESET_MAX
) {
1567 if (pr
->ticket
!= ruleset
->rules
[rs_num
].active
.ticket
) {
1571 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
1572 while ((rule
!= NULL
) && (rule
->nr
!= pr
->nr
))
1573 rule
= TAILQ_NEXT(rule
, entries
);
1578 bcopy(rule
, &pr
->rule
, sizeof (struct pf_rule
));
1579 if (pf_anchor_copyout(ruleset
, rule
, pr
)) {
1583 pfi_dynaddr_copyout(&pr
->rule
.src
.addr
);
1584 pfi_dynaddr_copyout(&pr
->rule
.dst
.addr
);
1585 pf_tbladdr_copyout(&pr
->rule
.src
.addr
);
1586 pf_tbladdr_copyout(&pr
->rule
.dst
.addr
);
1587 pf_rtlabel_copyout(&pr
->rule
.src
.addr
);
1588 pf_rtlabel_copyout(&pr
->rule
.dst
.addr
);
1589 for (i
= 0; i
< PF_SKIP_COUNT
; ++i
)
1590 if (rule
->skip
[i
].ptr
== NULL
)
1591 pr
->rule
.skip
[i
].nr
= -1;
1593 pr
->rule
.skip
[i
].nr
=
1594 rule
->skip
[i
].ptr
->nr
;
1596 if (pr
->action
== PF_GET_CLR_CNTR
) {
1597 rule
->evaluations
= 0;
1598 rule
->packets
[0] = rule
->packets
[1] = 0;
1599 rule
->bytes
[0] = rule
->bytes
[1] = 0;
1604 case DIOCCHANGERULE
: {
1605 struct pfioc_rule
*pcr
= (struct pfioc_rule
*)addr
;
1606 struct pf_ruleset
*ruleset
;
1607 struct pf_rule
*oldrule
= NULL
, *newrule
= NULL
;
1611 if (!(pcr
->action
== PF_CHANGE_REMOVE
||
1612 pcr
->action
== PF_CHANGE_GET_TICKET
) &&
1613 pcr
->pool_ticket
!= ticket_pabuf
) {
1618 if (pcr
->action
< PF_CHANGE_ADD_HEAD
||
1619 pcr
->action
> PF_CHANGE_GET_TICKET
) {
1623 ruleset
= pf_find_ruleset(pcr
->anchor
);
1624 if (ruleset
== NULL
) {
1628 rs_num
= pf_get_ruleset_number(pcr
->rule
.action
);
1629 if (rs_num
>= PF_RULESET_MAX
) {
1634 if (pcr
->action
== PF_CHANGE_GET_TICKET
) {
1635 pcr
->ticket
= ++ruleset
->rules
[rs_num
].active
.ticket
;
1639 ruleset
->rules
[rs_num
].active
.ticket
) {
1643 if (pcr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
1649 if (pcr
->action
!= PF_CHANGE_REMOVE
) {
1650 newrule
= pool_get(&pf_rule_pl
, PR_WAITOK
);
1651 if (newrule
== NULL
) {
1655 bcopy(&pcr
->rule
, newrule
, sizeof (struct pf_rule
));
1656 newrule
->cuid
= kauth_cred_getuid(p
->p_ucred
);
1657 newrule
->cpid
= p
->p_pid
;
1658 TAILQ_INIT(&newrule
->rpool
.list
);
1659 /* initialize refcounting */
1660 newrule
->states
= 0;
1661 newrule
->entries
.tqe_prev
= NULL
;
1663 if (newrule
->af
== AF_INET
) {
1664 pool_put(&pf_rule_pl
, newrule
);
1665 error
= EAFNOSUPPORT
;
1670 if (newrule
->af
== AF_INET6
) {
1671 pool_put(&pf_rule_pl
, newrule
);
1672 error
= EAFNOSUPPORT
;
1676 if (newrule
->ifname
[0]) {
1677 newrule
->kif
= pfi_kif_get(newrule
->ifname
);
1678 if (newrule
->kif
== NULL
) {
1679 pool_put(&pf_rule_pl
, newrule
);
1683 pfi_kif_ref(newrule
->kif
, PFI_KIF_REF_RULE
);
1685 newrule
->kif
= NULL
;
1689 if (newrule
->qname
[0] != 0) {
1691 pf_qname2qid(newrule
->qname
)) == 0)
1693 else if (newrule
->pqname
[0] != 0) {
1694 if ((newrule
->pqid
=
1695 pf_qname2qid(newrule
->pqname
)) == 0)
1698 newrule
->pqid
= newrule
->qid
;
1701 if (newrule
->tagname
[0])
1703 pf_tagname2tag(newrule
->tagname
)) == 0)
1705 if (newrule
->match_tagname
[0])
1706 if ((newrule
->match_tag
= pf_tagname2tag(
1707 newrule
->match_tagname
)) == 0)
1709 if (newrule
->rt
&& !newrule
->direction
)
1714 if (newrule
->logif
>= PFLOGIFS_MAX
)
1717 if (pf_rtlabel_add(&newrule
->src
.addr
) ||
1718 pf_rtlabel_add(&newrule
->dst
.addr
))
1720 if (pfi_dynaddr_setup(&newrule
->src
.addr
, newrule
->af
))
1722 if (pfi_dynaddr_setup(&newrule
->dst
.addr
, newrule
->af
))
1724 if (pf_tbladdr_setup(ruleset
, &newrule
->src
.addr
))
1726 if (pf_tbladdr_setup(ruleset
, &newrule
->dst
.addr
))
1728 if (pf_anchor_setup(newrule
, ruleset
, pcr
->anchor_call
))
1730 TAILQ_FOREACH(pa
, &pf_pabuf
, entries
)
1731 if (pf_tbladdr_setup(ruleset
, &pa
->addr
))
1734 if (newrule
->overload_tblname
[0]) {
1735 if ((newrule
->overload_tbl
= pfr_attach_table(
1736 ruleset
, newrule
->overload_tblname
)) ==
1740 newrule
->overload_tbl
->pfrkt_flags
|=
1744 pf_mv_pool(&pf_pabuf
, &newrule
->rpool
.list
);
1745 if (((((newrule
->action
== PF_NAT
) ||
1746 (newrule
->action
== PF_RDR
) ||
1747 (newrule
->action
== PF_BINAT
) ||
1748 (newrule
->rt
> PF_FASTROUTE
)) &&
1749 !newrule
->anchor
)) &&
1750 (TAILQ_FIRST(&newrule
->rpool
.list
) == NULL
))
1754 pf_rm_rule(NULL
, newrule
);
1757 newrule
->rpool
.cur
= TAILQ_FIRST(&newrule
->rpool
.list
);
1758 newrule
->evaluations
= 0;
1759 newrule
->packets
[0] = newrule
->packets
[1] = 0;
1760 newrule
->bytes
[0] = newrule
->bytes
[1] = 0;
1762 pf_empty_pool(&pf_pabuf
);
1764 if (pcr
->action
== PF_CHANGE_ADD_HEAD
)
1765 oldrule
= TAILQ_FIRST(
1766 ruleset
->rules
[rs_num
].active
.ptr
);
1767 else if (pcr
->action
== PF_CHANGE_ADD_TAIL
)
1768 oldrule
= TAILQ_LAST(
1769 ruleset
->rules
[rs_num
].active
.ptr
, pf_rulequeue
);
1771 oldrule
= TAILQ_FIRST(
1772 ruleset
->rules
[rs_num
].active
.ptr
);
1773 while ((oldrule
!= NULL
) && (oldrule
->nr
!= pcr
->nr
))
1774 oldrule
= TAILQ_NEXT(oldrule
, entries
);
1775 if (oldrule
== NULL
) {
1776 if (newrule
!= NULL
)
1777 pf_rm_rule(NULL
, newrule
);
1783 if (pcr
->action
== PF_CHANGE_REMOVE
) {
1784 pf_rm_rule(ruleset
->rules
[rs_num
].active
.ptr
, oldrule
);
1785 ruleset
->rules
[rs_num
].active
.rcount
--;
1787 if (oldrule
== NULL
)
1789 ruleset
->rules
[rs_num
].active
.ptr
,
1791 else if (pcr
->action
== PF_CHANGE_ADD_HEAD
||
1792 pcr
->action
== PF_CHANGE_ADD_BEFORE
)
1793 TAILQ_INSERT_BEFORE(oldrule
, newrule
, entries
);
1796 ruleset
->rules
[rs_num
].active
.ptr
,
1797 oldrule
, newrule
, entries
);
1798 ruleset
->rules
[rs_num
].active
.rcount
++;
1802 TAILQ_FOREACH(oldrule
,
1803 ruleset
->rules
[rs_num
].active
.ptr
, entries
)
1806 ruleset
->rules
[rs_num
].active
.ticket
++;
1808 pf_calc_skip_steps(ruleset
->rules
[rs_num
].active
.ptr
);
1809 pf_remove_if_empty_ruleset(ruleset
);
1814 case DIOCCLRSTATES
: {
1815 struct pf_state
*s
, *nexts
;
1816 struct pfioc_state_kill
*psk
= (struct pfioc_state_kill
*)addr
;
1819 for (s
= RB_MIN(pf_state_tree_id
, &tree_id
); s
; s
= nexts
) {
1820 nexts
= RB_NEXT(pf_state_tree_id
, &tree_id
, s
);
1822 if (!psk
->psk_ifname
[0] || strcmp(psk
->psk_ifname
,
1823 s
->kif
->pfik_name
) == 0) {
1825 /* don't send out individual delete messages */
1826 s
->sync_flags
= PFSTATE_NOSYNC
;
1832 psk
->psk_af
= killed
;
1834 pfsync_clear_states(pf_status
.hostid
, psk
->psk_ifname
);
1839 case DIOCKILLSTATES
: {
1840 struct pf_state
*s
, *nexts
;
1841 struct pf_state_key
*sk
;
1842 struct pf_state_host
*src
, *dst
;
1843 struct pfioc_state_kill
*psk
= (struct pfioc_state_kill
*)addr
;
1846 for (s
= RB_MIN(pf_state_tree_id
, &tree_id
); s
;
1848 nexts
= RB_NEXT(pf_state_tree_id
, &tree_id
, s
);
1851 if (sk
->direction
== PF_OUT
) {
1858 if ((!psk
->psk_af
|| sk
->af
== psk
->psk_af
) &&
1859 (!psk
->psk_proto
|| psk
->psk_proto
== sk
->proto
) &&
1860 PF_MATCHA(psk
->psk_src
.neg
,
1861 &psk
->psk_src
.addr
.v
.a
.addr
,
1862 &psk
->psk_src
.addr
.v
.a
.mask
,
1863 &src
->addr
, sk
->af
) &&
1864 PF_MATCHA(psk
->psk_dst
.neg
,
1865 &psk
->psk_dst
.addr
.v
.a
.addr
,
1866 &psk
->psk_dst
.addr
.v
.a
.mask
,
1867 &dst
->addr
, sk
->af
) &&
1868 #ifndef NO_APPLE_EXTENSIONS
1869 (pf_match_xport(psk
->psk_proto
,
1870 psk
->psk_proto_variant
, &psk
->psk_src
.xport
,
1872 (pf_match_xport(psk
->psk_proto
,
1873 psk
->psk_proto_variant
, &psk
->psk_dst
.xport
,
1876 (psk
->psk_src
.port_op
== 0 ||
1877 pf_match_port(psk
->psk_src
.port_op
,
1878 psk
->psk_src
.port
[0], psk
->psk_src
.port
[1],
1880 (psk
->psk_dst
.port_op
== 0 ||
1881 pf_match_port(psk
->psk_dst
.port_op
,
1882 psk
->psk_dst
.port
[0], psk
->psk_dst
.port
[1],
1885 (!psk
->psk_ifname
[0] || strcmp(psk
->psk_ifname
,
1886 s
->kif
->pfik_name
) == 0)) {
1888 /* send immediate delete of state */
1889 pfsync_delete_state(s
);
1890 s
->sync_flags
|= PFSTATE_NOSYNC
;
1896 psk
->psk_af
= killed
;
1900 case DIOCADDSTATE
: {
1901 struct pfioc_state
*ps
= (struct pfioc_state
*)addr
;
1902 struct pfsync_state
*sp
= &ps
->state
;
1904 struct pf_state_key
*sk
;
1905 struct pfi_kif
*kif
;
1907 if (sp
->timeout
>= PFTM_MAX
&&
1908 sp
->timeout
!= PFTM_UNTIL_PACKET
) {
1912 s
= pool_get(&pf_state_pl
, PR_WAITOK
);
1917 bzero(s
, sizeof (struct pf_state
));
1918 if ((sk
= pf_alloc_state_key(s
)) == NULL
) {
1919 pool_put(&pf_state_pl
, s
);
1923 pf_state_import(sp
, sk
, s
);
1924 kif
= pfi_kif_get(sp
->ifname
);
1926 pool_put(&pf_state_pl
, s
);
1927 pool_put(&pf_state_key_pl
, sk
);
1931 #ifndef NO_APPLE_EXTENSIONS
1932 TAILQ_INIT(&s
->unlink_hooks
);
1933 s
->state_key
->app_state
= 0;
1935 if (pf_insert_state(kif
, s
)) {
1936 pfi_kif_unref(kif
, PFI_KIF_REF_NONE
);
1937 pool_put(&pf_state_pl
, s
);
1941 pf_default_rule
.states
++;
1942 VERIFY(pf_default_rule
.states
!= 0);
1946 case DIOCGETSTATE
: {
1947 struct pfioc_state
*ps
= (struct pfioc_state
*)addr
;
1949 struct pf_state_cmp id_key
;
1951 bcopy(ps
->state
.id
, &id_key
.id
, sizeof (id_key
.id
));
1952 id_key
.creatorid
= ps
->state
.creatorid
;
1954 s
= pf_find_state_byid(&id_key
);
1960 pf_state_export(&ps
->state
, s
->state_key
, s
);
1964 case DIOCGETSTATES
: {
1965 struct pfioc_states
*ps
= (struct pfioc_states
*)addr
;
1966 struct pf_state
*state
;
1967 struct pfsync_state
*pstore
;
1971 if (ps
->ps_len
== 0) {
1972 nr
= pf_status
.states
;
1973 ps
->ps_len
= sizeof (struct pfsync_state
) * nr
;
1977 pstore
= _MALLOC(sizeof (*pstore
), M_TEMP
, M_WAITOK
);
1978 buf
= PF_USER_ADDR(addr
, pfioc_states
, ps_buf
);
1980 state
= TAILQ_FIRST(&state_list
);
1982 if (state
->timeout
!= PFTM_UNLINKED
) {
1983 if ((nr
+ 1) * sizeof (*pstore
) >
1984 (unsigned)ps
->ps_len
)
1987 pf_state_export(pstore
,
1988 state
->state_key
, state
);
1989 error
= copyout(pstore
, buf
, sizeof (*pstore
));
1991 _FREE(pstore
, M_TEMP
);
1994 buf
+= sizeof (*pstore
);
1997 state
= TAILQ_NEXT(state
, entry_list
);
2000 ps
->ps_len
= sizeof (struct pfsync_state
) * nr
;
2002 _FREE(pstore
, M_TEMP
);
2006 case DIOCGETSTATUS
: {
2007 struct pf_status
*s
= (struct pf_status
*)addr
;
2008 bcopy(&pf_status
, s
, sizeof (struct pf_status
));
2009 pfi_update_status(s
->ifname
, s
);
2013 case DIOCSETSTATUSIF
: {
2014 struct pfioc_if
*pi
= (struct pfioc_if
*)addr
;
2016 if (pi
->ifname
[0] == 0) {
2017 bzero(pf_status
.ifname
, IFNAMSIZ
);
2020 strlcpy(pf_status
.ifname
, pi
->ifname
, IFNAMSIZ
);
2024 case DIOCCLRSTATUS
: {
2025 bzero(pf_status
.counters
, sizeof (pf_status
.counters
));
2026 bzero(pf_status
.fcounters
, sizeof (pf_status
.fcounters
));
2027 bzero(pf_status
.scounters
, sizeof (pf_status
.scounters
));
2028 pf_status
.since
= pf_calendar_time_second();
2029 if (*pf_status
.ifname
)
2030 pfi_update_status(pf_status
.ifname
, NULL
);
2035 struct pfioc_natlook
*pnl
= (struct pfioc_natlook
*)addr
;
2036 struct pf_state_key
*sk
;
2037 struct pf_state
*state
;
2038 struct pf_state_key_cmp key
;
2039 int m
= 0, direction
= pnl
->direction
;
2042 key
.proto
= pnl
->proto
;
2044 #ifndef NO_APPLE_EXTENSIONS
2045 key
.proto_variant
= pnl
->proto_variant
;
2049 PF_AZERO(&pnl
->saddr
, pnl
->af
) ||
2050 PF_AZERO(&pnl
->daddr
, pnl
->af
) ||
2051 ((pnl
->proto
== IPPROTO_TCP
||
2052 pnl
->proto
== IPPROTO_UDP
) &&
2053 #ifndef NO_APPLE_EXTENSIONS
2054 (!pnl
->dxport
.port
|| !pnl
->sxport
.port
)))
2056 (!pnl
->dport
|| !pnl
->sport
)))
2061 * userland gives us source and dest of connection,
2062 * reverse the lookup so we ask for what happens with
2063 * the return traffic, enabling us to find it in the
2066 if (direction
== PF_IN
) {
2067 PF_ACPY(&key
.ext
.addr
, &pnl
->daddr
, pnl
->af
);
2068 #ifndef NO_APPLE_EXTENSIONS
2069 memcpy(&key
.ext
.xport
, &pnl
->dxport
,
2070 sizeof (key
.ext
.xport
));
2072 key
.ext
.port
= pnl
->dport
;
2074 PF_ACPY(&key
.gwy
.addr
, &pnl
->saddr
, pnl
->af
);
2075 #ifndef NO_APPLE_EXTENSIONS
2076 memcpy(&key
.gwy
.xport
, &pnl
->sxport
,
2077 sizeof (key
.gwy
.xport
));
2079 key
.gwy
.port
= pnl
->sport
;
2081 state
= pf_find_state_all(&key
, PF_IN
, &m
);
2083 PF_ACPY(&key
.lan
.addr
, &pnl
->daddr
, pnl
->af
);
2084 #ifndef NO_APPLE_EXTENSIONS
2085 memcpy(&key
.lan
.xport
, &pnl
->dxport
,
2086 sizeof (key
.lan
.xport
));
2088 key
.lan
.port
= pnl
->dport
;
2090 PF_ACPY(&key
.ext
.addr
, &pnl
->saddr
, pnl
->af
);
2091 #ifndef NO_APPLE_EXTENSIONS
2092 memcpy(&key
.ext
.xport
, &pnl
->sxport
,
2093 sizeof (key
.ext
.xport
));
2095 key
.ext
.port
= pnl
->sport
;
2097 state
= pf_find_state_all(&key
, PF_OUT
, &m
);
2100 error
= E2BIG
; /* more than one state */
2101 else if (state
!= NULL
) {
2102 sk
= state
->state_key
;
2103 if (direction
== PF_IN
) {
2104 PF_ACPY(&pnl
->rsaddr
, &sk
->lan
.addr
,
2106 #ifndef NO_APPLE_EXTENSIONS
2107 memcpy(&pnl
->rsxport
, &sk
->lan
.xport
,
2108 sizeof (pnl
->rsxport
));
2110 pnl
->rsport
= sk
->lan
.port
;
2112 PF_ACPY(&pnl
->rdaddr
, &pnl
->daddr
,
2114 #ifndef NO_APPLE_EXTENSIONS
2115 memcpy(&pnl
->rdxport
, &pnl
->dxport
,
2116 sizeof (pnl
->rdxport
));
2118 pnl
->rdport
= pnl
->dport
;
2121 PF_ACPY(&pnl
->rdaddr
, &sk
->gwy
.addr
,
2123 #ifndef NO_APPLE_EXTENSIONS
2124 memcpy(&pnl
->rdxport
, &sk
->gwy
.xport
,
2125 sizeof (pnl
->rdxport
));
2127 pnl
->rdport
= sk
->gwy
.port
;
2129 PF_ACPY(&pnl
->rsaddr
, &pnl
->saddr
,
2131 #ifndef NO_APPLE_EXTENSIONS
2132 memcpy(&pnl
->rsxport
, &pnl
->sxport
,
2133 sizeof (pnl
->rsxport
));
2135 pnl
->rsport
= pnl
->sport
;
2144 case DIOCSETTIMEOUT
: {
2145 struct pfioc_tm
*pt
= (struct pfioc_tm
*)addr
;
2148 if (pt
->timeout
< 0 || pt
->timeout
>= PFTM_MAX
||
2153 old
= pf_default_rule
.timeout
[pt
->timeout
];
2154 if (pt
->timeout
== PFTM_INTERVAL
&& pt
->seconds
== 0)
2156 pf_default_rule
.timeout
[pt
->timeout
] = pt
->seconds
;
2157 if (pt
->timeout
== PFTM_INTERVAL
&& pt
->seconds
< old
)
2158 wakeup(pf_purge_thread_fn
);
2163 case DIOCGETTIMEOUT
: {
2164 struct pfioc_tm
*pt
= (struct pfioc_tm
*)addr
;
2166 if (pt
->timeout
< 0 || pt
->timeout
>= PFTM_MAX
) {
2170 pt
->seconds
= pf_default_rule
.timeout
[pt
->timeout
];
2174 case DIOCGETLIMIT
: {
2175 struct pfioc_limit
*pl
= (struct pfioc_limit
*)addr
;
2177 if (pl
->index
< 0 || pl
->index
>= PF_LIMIT_MAX
) {
2181 pl
->limit
= pf_pool_limits
[pl
->index
].limit
;
2185 case DIOCSETLIMIT
: {
2186 struct pfioc_limit
*pl
= (struct pfioc_limit
*)addr
;
2189 if (pl
->index
< 0 || pl
->index
>= PF_LIMIT_MAX
||
2190 pf_pool_limits
[pl
->index
].pp
== NULL
) {
2194 pool_sethardlimit(pf_pool_limits
[pl
->index
].pp
,
2195 pl
->limit
, NULL
, 0);
2196 old_limit
= pf_pool_limits
[pl
->index
].limit
;
2197 pf_pool_limits
[pl
->index
].limit
= pl
->limit
;
2198 pl
->limit
= old_limit
;
2202 case DIOCSETDEBUG
: {
2203 u_int32_t
*level
= (u_int32_t
*)addr
;
2205 pf_status
.debug
= *level
;
2209 case DIOCCLRRULECTRS
: {
2210 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
2211 struct pf_ruleset
*ruleset
= &pf_main_ruleset
;
2212 struct pf_rule
*rule
;
2215 ruleset
->rules
[PF_RULESET_FILTER
].active
.ptr
, entries
) {
2216 rule
->evaluations
= 0;
2217 rule
->packets
[0] = rule
->packets
[1] = 0;
2218 rule
->bytes
[0] = rule
->bytes
[1] = 0;
2224 case DIOCSTARTALTQ
: {
2225 struct pf_altq
*altq
;
2227 /* enable all altq interfaces on active list */
2228 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
) {
2229 if (altq
->qname
[0] == 0) {
2230 error
= pf_enable_altq(altq
);
2236 pf_altq_running
= 1;
2237 DPFPRINTF(PF_DEBUG_MISC
, ("altq: started\n"));
2241 case DIOCSTOPALTQ
: {
2242 struct pf_altq
*altq
;
2244 /* disable all altq interfaces on active list */
2245 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
) {
2246 if (altq
->qname
[0] == 0) {
2247 error
= pf_disable_altq(altq
);
2253 pf_altq_running
= 0;
2254 DPFPRINTF(PF_DEBUG_MISC
, ("altq: stopped\n"));
2259 struct pfioc_altq
*pa
= (struct pfioc_altq
*)addr
;
2260 struct pf_altq
*altq
, *a
;
2262 if (pa
->ticket
!= ticket_altqs_inactive
) {
2266 altq
= pool_get(&pf_altq_pl
, PR_WAITOK
);
2271 bcopy(&pa
->altq
, altq
, sizeof (struct pf_altq
));
2274 * if this is for a queue, find the discipline and
2275 * copy the necessary fields
2277 if (altq
->qname
[0] != 0) {
2278 if ((altq
->qid
= pf_qname2qid(altq
->qname
)) == 0) {
2280 pool_put(&pf_altq_pl
, altq
);
2283 altq
->altq_disc
= NULL
;
2284 TAILQ_FOREACH(a
, pf_altqs_inactive
, entries
) {
2285 if (strncmp(a
->ifname
, altq
->ifname
,
2286 IFNAMSIZ
) == 0 && a
->qname
[0] == 0) {
2287 altq
->altq_disc
= a
->altq_disc
;
2293 error
= altq_add(altq
);
2295 pool_put(&pf_altq_pl
, altq
);
2299 TAILQ_INSERT_TAIL(pf_altqs_inactive
, altq
, entries
);
2300 bcopy(altq
, &pa
->altq
, sizeof (struct pf_altq
));
2304 case DIOCGETALTQS
: {
2305 struct pfioc_altq
*pa
= (struct pfioc_altq
*)addr
;
2306 struct pf_altq
*altq
;
2309 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
)
2311 pa
->ticket
= ticket_altqs_active
;
2316 struct pfioc_altq
*pa
= (struct pfioc_altq
*)addr
;
2317 struct pf_altq
*altq
;
2320 if (pa
->ticket
!= ticket_altqs_active
) {
2325 altq
= TAILQ_FIRST(pf_altqs_active
);
2326 while ((altq
!= NULL
) && (nr
< pa
->nr
)) {
2327 altq
= TAILQ_NEXT(altq
, entries
);
2334 bcopy(altq
, &pa
->altq
, sizeof (struct pf_altq
));
2338 case DIOCCHANGEALTQ
:
2339 /* CHANGEALTQ not supported yet! */
2343 case DIOCGETQSTATS
: {
2344 struct pfioc_qstats
*pq
= (struct pfioc_qstats
*)addr
;
2345 struct pf_altq
*altq
;
2349 if (pq
->ticket
!= ticket_altqs_active
) {
2353 nbytes
= pq
->nbytes
;
2355 altq
= TAILQ_FIRST(pf_altqs_active
);
2356 while ((altq
!= NULL
) && (nr
< pq
->nr
)) {
2357 altq
= TAILQ_NEXT(altq
, entries
);
2364 error
= altq_getqstats(altq
, pq
->buf
, &nbytes
);
2366 pq
->scheduler
= altq
->scheduler
;
2367 pq
->nbytes
= nbytes
;
2373 case DIOCBEGINADDRS
: {
2374 struct pfioc_pooladdr
*pp
= (struct pfioc_pooladdr
*)addr
;
2376 pf_empty_pool(&pf_pabuf
);
2377 pp
->ticket
= ++ticket_pabuf
;
2382 struct pfioc_pooladdr
*pp
= (struct pfioc_pooladdr
*)addr
;
2384 if (pp
->ticket
!= ticket_pabuf
) {
2389 if (pp
->af
== AF_INET
) {
2390 error
= EAFNOSUPPORT
;
2395 if (pp
->af
== AF_INET6
) {
2396 error
= EAFNOSUPPORT
;
2400 if (pp
->addr
.addr
.type
!= PF_ADDR_ADDRMASK
&&
2401 pp
->addr
.addr
.type
!= PF_ADDR_DYNIFTL
&&
2402 pp
->addr
.addr
.type
!= PF_ADDR_TABLE
) {
2406 pa
= pool_get(&pf_pooladdr_pl
, PR_WAITOK
);
2411 bcopy(&pp
->addr
, pa
, sizeof (struct pf_pooladdr
));
2412 if (pa
->ifname
[0]) {
2413 pa
->kif
= pfi_kif_get(pa
->ifname
);
2414 if (pa
->kif
== NULL
) {
2415 pool_put(&pf_pooladdr_pl
, pa
);
2419 pfi_kif_ref(pa
->kif
, PFI_KIF_REF_RULE
);
2421 if (pfi_dynaddr_setup(&pa
->addr
, pp
->af
)) {
2422 pfi_dynaddr_remove(&pa
->addr
);
2423 pfi_kif_unref(pa
->kif
, PFI_KIF_REF_RULE
);
2424 pool_put(&pf_pooladdr_pl
, pa
);
2428 TAILQ_INSERT_TAIL(&pf_pabuf
, pa
, entries
);
2432 case DIOCGETADDRS
: {
2433 struct pfioc_pooladdr
*pp
= (struct pfioc_pooladdr
*)addr
;
2436 pool
= pf_get_pool(pp
->anchor
, pp
->ticket
, pp
->r_action
,
2437 pp
->r_num
, 0, 1, 0);
2442 TAILQ_FOREACH(pa
, &pool
->list
, entries
)
2448 struct pfioc_pooladdr
*pp
= (struct pfioc_pooladdr
*)addr
;
2451 pool
= pf_get_pool(pp
->anchor
, pp
->ticket
, pp
->r_action
,
2452 pp
->r_num
, 0, 1, 1);
2457 pa
= TAILQ_FIRST(&pool
->list
);
2458 while ((pa
!= NULL
) && (nr
< pp
->nr
)) {
2459 pa
= TAILQ_NEXT(pa
, entries
);
2466 bcopy(pa
, &pp
->addr
, sizeof (struct pf_pooladdr
));
2467 pfi_dynaddr_copyout(&pp
->addr
.addr
);
2468 pf_tbladdr_copyout(&pp
->addr
.addr
);
2469 pf_rtlabel_copyout(&pp
->addr
.addr
);
2473 case DIOCCHANGEADDR
: {
2474 struct pfioc_pooladdr
*pca
= (struct pfioc_pooladdr
*)addr
;
2475 struct pf_pooladdr
*oldpa
= NULL
, *newpa
= NULL
;
2476 struct pf_ruleset
*ruleset
;
2478 if (pca
->action
< PF_CHANGE_ADD_HEAD
||
2479 pca
->action
> PF_CHANGE_REMOVE
) {
2483 if (pca
->addr
.addr
.type
!= PF_ADDR_ADDRMASK
&&
2484 pca
->addr
.addr
.type
!= PF_ADDR_DYNIFTL
&&
2485 pca
->addr
.addr
.type
!= PF_ADDR_TABLE
) {
2490 ruleset
= pf_find_ruleset(pca
->anchor
);
2491 if (ruleset
== NULL
) {
2495 pool
= pf_get_pool(pca
->anchor
, pca
->ticket
, pca
->r_action
,
2496 pca
->r_num
, pca
->r_last
, 1, 1);
2501 if (pca
->action
!= PF_CHANGE_REMOVE
) {
2502 newpa
= pool_get(&pf_pooladdr_pl
, PR_WAITOK
);
2503 if (newpa
== NULL
) {
2507 bcopy(&pca
->addr
, newpa
, sizeof (struct pf_pooladdr
));
2509 if (pca
->af
== AF_INET
) {
2510 pool_put(&pf_pooladdr_pl
, newpa
);
2511 error
= EAFNOSUPPORT
;
2516 if (pca
->af
== AF_INET6
) {
2517 pool_put(&pf_pooladdr_pl
, newpa
);
2518 error
= EAFNOSUPPORT
;
2522 if (newpa
->ifname
[0]) {
2523 newpa
->kif
= pfi_kif_get(newpa
->ifname
);
2524 if (newpa
->kif
== NULL
) {
2525 pool_put(&pf_pooladdr_pl
, newpa
);
2529 pfi_kif_ref(newpa
->kif
, PFI_KIF_REF_RULE
);
2532 if (pfi_dynaddr_setup(&newpa
->addr
, pca
->af
) ||
2533 pf_tbladdr_setup(ruleset
, &newpa
->addr
)) {
2534 pfi_dynaddr_remove(&newpa
->addr
);
2535 pfi_kif_unref(newpa
->kif
, PFI_KIF_REF_RULE
);
2536 pool_put(&pf_pooladdr_pl
, newpa
);
2542 if (pca
->action
== PF_CHANGE_ADD_HEAD
)
2543 oldpa
= TAILQ_FIRST(&pool
->list
);
2544 else if (pca
->action
== PF_CHANGE_ADD_TAIL
)
2545 oldpa
= TAILQ_LAST(&pool
->list
, pf_palist
);
2549 oldpa
= TAILQ_FIRST(&pool
->list
);
2550 while ((oldpa
!= NULL
) && (i
< (int)pca
->nr
)) {
2551 oldpa
= TAILQ_NEXT(oldpa
, entries
);
2554 if (oldpa
== NULL
) {
2560 if (pca
->action
== PF_CHANGE_REMOVE
) {
2561 TAILQ_REMOVE(&pool
->list
, oldpa
, entries
);
2562 pfi_dynaddr_remove(&oldpa
->addr
);
2563 pf_tbladdr_remove(&oldpa
->addr
);
2564 pfi_kif_unref(oldpa
->kif
, PFI_KIF_REF_RULE
);
2565 pool_put(&pf_pooladdr_pl
, oldpa
);
2568 TAILQ_INSERT_TAIL(&pool
->list
, newpa
, entries
);
2569 else if (pca
->action
== PF_CHANGE_ADD_HEAD
||
2570 pca
->action
== PF_CHANGE_ADD_BEFORE
)
2571 TAILQ_INSERT_BEFORE(oldpa
, newpa
, entries
);
2573 TAILQ_INSERT_AFTER(&pool
->list
, oldpa
,
2577 pool
->cur
= TAILQ_FIRST(&pool
->list
);
2578 PF_ACPY(&pool
->counter
, &pool
->cur
->addr
.v
.a
.addr
,
2583 case DIOCGETRULESETS
: {
2584 struct pfioc_ruleset
*pr
= (struct pfioc_ruleset
*)addr
;
2585 struct pf_ruleset
*ruleset
;
2586 struct pf_anchor
*anchor
;
2588 pr
->path
[sizeof (pr
->path
) - 1] = 0;
2589 if ((ruleset
= pf_find_ruleset(pr
->path
)) == NULL
) {
2594 if (ruleset
->anchor
== NULL
) {
2595 /* XXX kludge for pf_main_ruleset */
2596 RB_FOREACH(anchor
, pf_anchor_global
, &pf_anchors
)
2597 if (anchor
->parent
== NULL
)
2600 RB_FOREACH(anchor
, pf_anchor_node
,
2601 &ruleset
->anchor
->children
)
2607 case DIOCGETRULESET
: {
2608 struct pfioc_ruleset
*pr
= (struct pfioc_ruleset
*)addr
;
2609 struct pf_ruleset
*ruleset
;
2610 struct pf_anchor
*anchor
;
2613 pr
->path
[sizeof (pr
->path
) - 1] = 0;
2614 if ((ruleset
= pf_find_ruleset(pr
->path
)) == NULL
) {
2619 if (ruleset
->anchor
== NULL
) {
2620 /* XXX kludge for pf_main_ruleset */
2621 RB_FOREACH(anchor
, pf_anchor_global
, &pf_anchors
)
2622 if (anchor
->parent
== NULL
&& nr
++ == pr
->nr
) {
2623 strlcpy(pr
->name
, anchor
->name
,
2628 RB_FOREACH(anchor
, pf_anchor_node
,
2629 &ruleset
->anchor
->children
)
2630 if (nr
++ == pr
->nr
) {
2631 strlcpy(pr
->name
, anchor
->name
,
2641 case DIOCRCLRTABLES
: {
2642 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2644 if (io
->pfrio_esize
!= 0) {
2648 error
= pfr_clr_tables(&io
->pfrio_table
, &io
->pfrio_ndel
,
2649 io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2653 case DIOCRADDTABLES
: {
2654 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2655 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
2657 if (io
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2661 error
= pfr_add_tables(buf
, io
->pfrio_size
,
2662 &io
->pfrio_nadd
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2666 case DIOCRDELTABLES
: {
2667 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2668 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
2670 if (io
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2674 error
= pfr_del_tables(buf
, io
->pfrio_size
,
2675 &io
->pfrio_ndel
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2679 case DIOCRGETTABLES
: {
2680 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2681 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
2683 if (io
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2687 error
= pfr_get_tables(&io
->pfrio_table
, buf
,
2688 &io
->pfrio_size
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2692 case DIOCRGETTSTATS
: {
2693 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2694 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
2696 if (io
->pfrio_esize
!= sizeof (struct pfr_tstats
)) {
2700 error
= pfr_get_tstats(&io
->pfrio_table
, buf
,
2701 &io
->pfrio_size
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2705 case DIOCRCLRTSTATS
: {
2706 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2707 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
2709 if (io
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2713 error
= pfr_clr_tstats(buf
, io
->pfrio_size
,
2714 &io
->pfrio_nzero
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2718 case DIOCRSETTFLAGS
: {
2719 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2720 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
2722 if (io
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2726 error
= pfr_set_tflags(buf
, io
->pfrio_size
,
2727 io
->pfrio_setflag
, io
->pfrio_clrflag
, &io
->pfrio_nchange
,
2728 &io
->pfrio_ndel
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2732 case DIOCRCLRADDRS
: {
2733 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2735 if (io
->pfrio_esize
!= 0) {
2739 error
= pfr_clr_addrs(&io
->pfrio_table
, &io
->pfrio_ndel
,
2740 io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2744 case DIOCRADDADDRS
: {
2745 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2746 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
2748 if (io
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2752 error
= pfr_add_addrs(&io
->pfrio_table
, buf
,
2753 io
->pfrio_size
, &io
->pfrio_nadd
, io
->pfrio_flags
|
2754 PFR_FLAG_USERIOCTL
);
2758 case DIOCRDELADDRS
: {
2759 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2760 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
2762 if (io
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2766 error
= pfr_del_addrs(&io
->pfrio_table
, buf
,
2767 io
->pfrio_size
, &io
->pfrio_ndel
, io
->pfrio_flags
|
2768 PFR_FLAG_USERIOCTL
);
2772 case DIOCRSETADDRS
: {
2773 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2774 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
2776 if (io
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2780 error
= pfr_set_addrs(&io
->pfrio_table
, buf
,
2781 io
->pfrio_size
, &io
->pfrio_size2
, &io
->pfrio_nadd
,
2782 &io
->pfrio_ndel
, &io
->pfrio_nchange
, io
->pfrio_flags
|
2783 PFR_FLAG_USERIOCTL
, 0);
2787 case DIOCRGETADDRS
: {
2788 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2789 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
2791 if (io
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2795 error
= pfr_get_addrs(&io
->pfrio_table
, buf
,
2796 &io
->pfrio_size
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2800 case DIOCRGETASTATS
: {
2801 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2802 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
2804 if (io
->pfrio_esize
!= sizeof (struct pfr_astats
)) {
2808 error
= pfr_get_astats(&io
->pfrio_table
, buf
,
2809 &io
->pfrio_size
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2813 case DIOCRCLRASTATS
: {
2814 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2815 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
2817 if (io
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2821 error
= pfr_clr_astats(&io
->pfrio_table
, buf
,
2822 io
->pfrio_size
, &io
->pfrio_nzero
, io
->pfrio_flags
|
2823 PFR_FLAG_USERIOCTL
);
2827 case DIOCRTSTADDRS
: {
2828 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2829 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
2831 if (io
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2835 error
= pfr_tst_addrs(&io
->pfrio_table
, buf
,
2836 io
->pfrio_size
, &io
->pfrio_nmatch
, io
->pfrio_flags
|
2837 PFR_FLAG_USERIOCTL
);
2841 case DIOCRINADEFINE
: {
2842 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2843 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
2845 if (io
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2849 error
= pfr_ina_define(&io
->pfrio_table
, buf
,
2850 io
->pfrio_size
, &io
->pfrio_nadd
, &io
->pfrio_naddr
,
2851 io
->pfrio_ticket
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2856 struct pf_osfp_ioctl
*io
= (struct pf_osfp_ioctl
*)addr
;
2857 error
= pf_osfp_add(io
);
2862 struct pf_osfp_ioctl
*io
= (struct pf_osfp_ioctl
*)addr
;
2863 error
= pf_osfp_get(io
);
2868 struct pfioc_trans
*io
= (struct pfioc_trans
*)addr
;
2869 struct pfioc_trans_e
*ioe
;
2870 struct pfr_table
*table
;
2874 if (io
->esize
!= sizeof (*ioe
)) {
2878 ioe
= _MALLOC(sizeof (*ioe
), M_TEMP
, M_WAITOK
);
2879 table
= _MALLOC(sizeof (*table
), M_TEMP
, M_WAITOK
);
2880 buf
= PF_USER_ADDR(addr
, pfioc_trans
, array
);
2881 for (i
= 0; i
< io
->size
; i
++, buf
+= sizeof (*ioe
)) {
2882 if (copyin(buf
, ioe
, sizeof (*ioe
))) {
2883 _FREE(table
, M_TEMP
);
2888 switch (ioe
->rs_num
) {
2889 case PF_RULESET_ALTQ
:
2891 if (ioe
->anchor
[0]) {
2892 _FREE(table
, M_TEMP
);
2897 if ((error
= pf_begin_altq(&ioe
->ticket
))) {
2898 _FREE(table
, M_TEMP
);
2904 case PF_RULESET_TABLE
:
2905 bzero(table
, sizeof (*table
));
2906 strlcpy(table
->pfrt_anchor
, ioe
->anchor
,
2907 sizeof (table
->pfrt_anchor
));
2908 if ((error
= pfr_ina_begin(table
,
2909 &ioe
->ticket
, NULL
, 0))) {
2910 _FREE(table
, M_TEMP
);
2916 if ((error
= pf_begin_rules(&ioe
->ticket
,
2917 ioe
->rs_num
, ioe
->anchor
))) {
2918 _FREE(table
, M_TEMP
);
2924 if (copyout(ioe
, buf
, sizeof (*ioe
))) {
2925 _FREE(table
, M_TEMP
);
2931 _FREE(table
, M_TEMP
);
2936 case DIOCXROLLBACK
: {
2937 struct pfioc_trans
*io
= (struct pfioc_trans
*)addr
;
2938 struct pfioc_trans_e
*ioe
;
2939 struct pfr_table
*table
;
2943 if (io
->esize
!= sizeof (*ioe
)) {
2947 ioe
= _MALLOC(sizeof (*ioe
), M_TEMP
, M_WAITOK
);
2948 table
= _MALLOC(sizeof (*table
), M_TEMP
, M_WAITOK
);
2949 buf
= PF_USER_ADDR(addr
, pfioc_trans
, array
);
2950 for (i
= 0; i
< io
->size
; i
++, buf
+= sizeof (*ioe
)) {
2951 if (copyin(buf
, ioe
, sizeof (*ioe
))) {
2952 _FREE(table
, M_TEMP
);
2957 switch (ioe
->rs_num
) {
2958 case PF_RULESET_ALTQ
:
2960 if (ioe
->anchor
[0]) {
2961 _FREE(table
, M_TEMP
);
2966 if ((error
= pf_rollback_altq(ioe
->ticket
))) {
2967 _FREE(table
, M_TEMP
);
2969 goto fail
; /* really bad */
2973 case PF_RULESET_TABLE
:
2974 bzero(table
, sizeof (*table
));
2975 strlcpy(table
->pfrt_anchor
, ioe
->anchor
,
2976 sizeof (table
->pfrt_anchor
));
2977 if ((error
= pfr_ina_rollback(table
,
2978 ioe
->ticket
, NULL
, 0))) {
2979 _FREE(table
, M_TEMP
);
2981 goto fail
; /* really bad */
2985 if ((error
= pf_rollback_rules(ioe
->ticket
,
2986 ioe
->rs_num
, ioe
->anchor
))) {
2987 _FREE(table
, M_TEMP
);
2989 goto fail
; /* really bad */
2994 _FREE(table
, M_TEMP
);
3000 struct pfioc_trans
*io
= (struct pfioc_trans
*)addr
;
3001 struct pfioc_trans_e
*ioe
;
3002 struct pfr_table
*table
;
3003 struct pf_ruleset
*rs
;
3004 user_addr_t _buf
, buf
;
3007 if (io
->esize
!= sizeof (*ioe
)) {
3011 ioe
= _MALLOC(sizeof (*ioe
), M_TEMP
, M_WAITOK
);
3012 table
= _MALLOC(sizeof (*table
), M_TEMP
, M_WAITOK
);
3013 buf
= _buf
= PF_USER_ADDR(addr
, pfioc_trans
, array
);
3014 /* first makes sure everything will succeed */
3015 for (i
= 0; i
< io
->size
; i
++, buf
+= sizeof (*ioe
)) {
3016 if (copyin(buf
, ioe
, sizeof (*ioe
))) {
3017 _FREE(table
, M_TEMP
);
3022 switch (ioe
->rs_num
) {
3023 case PF_RULESET_ALTQ
:
3025 if (ioe
->anchor
[0]) {
3026 _FREE(table
, M_TEMP
);
3031 if (!altqs_inactive_open
|| ioe
->ticket
!=
3032 ticket_altqs_inactive
) {
3033 _FREE(table
, M_TEMP
);
3040 case PF_RULESET_TABLE
:
3041 rs
= pf_find_ruleset(ioe
->anchor
);
3042 if (rs
== NULL
|| !rs
->topen
|| ioe
->ticket
!=
3044 _FREE(table
, M_TEMP
);
3051 if (ioe
->rs_num
< 0 || ioe
->rs_num
>=
3053 _FREE(table
, M_TEMP
);
3058 rs
= pf_find_ruleset(ioe
->anchor
);
3060 !rs
->rules
[ioe
->rs_num
].inactive
.open
||
3061 rs
->rules
[ioe
->rs_num
].inactive
.ticket
!=
3063 _FREE(table
, M_TEMP
);
3072 /* now do the commit - no errors should happen here */
3073 for (i
= 0; i
< io
->size
; i
++, buf
+= sizeof (*ioe
)) {
3074 if (copyin(buf
, ioe
, sizeof (*ioe
))) {
3075 _FREE(table
, M_TEMP
);
3080 switch (ioe
->rs_num
) {
3081 case PF_RULESET_ALTQ
:
3083 if ((error
= pf_commit_altq(ioe
->ticket
))) {
3084 _FREE(table
, M_TEMP
);
3086 goto fail
; /* really bad */
3090 case PF_RULESET_TABLE
:
3091 bzero(table
, sizeof (*table
));
3092 strlcpy(table
->pfrt_anchor
, ioe
->anchor
,
3093 sizeof (table
->pfrt_anchor
));
3094 if ((error
= pfr_ina_commit(table
, ioe
->ticket
,
3096 _FREE(table
, M_TEMP
);
3098 goto fail
; /* really bad */
3102 if ((error
= pf_commit_rules(ioe
->ticket
,
3103 ioe
->rs_num
, ioe
->anchor
))) {
3104 _FREE(table
, M_TEMP
);
3106 goto fail
; /* really bad */
3111 _FREE(table
, M_TEMP
);
3116 case DIOCGETSRCNODES
: {
3117 struct pfioc_src_nodes
*psn
= (struct pfioc_src_nodes
*)addr
;
3118 struct pf_src_node
*n
, *pstore
;
3121 int space
= psn
->psn_len
;
3124 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
)
3126 psn
->psn_len
= sizeof (struct pf_src_node
) * nr
;
3130 pstore
= _MALLOC(sizeof (*pstore
), M_TEMP
, M_WAITOK
);
3131 buf
= PF_USER_ADDR(addr
, pfioc_src_nodes
, psn_buf
);
3133 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
) {
3134 uint64_t secs
= pf_time_second(), diff
;
3136 if ((nr
+ 1) * sizeof (*pstore
) >
3137 (unsigned)psn
->psn_len
)
3140 bcopy(n
, pstore
, sizeof (*pstore
));
3141 if (n
->rule
.ptr
!= NULL
)
3142 pstore
->rule
.nr
= n
->rule
.ptr
->nr
;
3143 pstore
->creation
= secs
- pstore
->creation
;
3144 if (pstore
->expire
> secs
)
3145 pstore
->expire
-= secs
;
3149 /* adjust the connection rate estimate */
3150 diff
= secs
- n
->conn_rate
.last
;
3151 if (diff
>= n
->conn_rate
.seconds
)
3152 pstore
->conn_rate
.count
= 0;
3154 pstore
->conn_rate
.count
-=
3155 n
->conn_rate
.count
* diff
/
3156 n
->conn_rate
.seconds
;
3158 error
= copyout(pstore
, buf
, sizeof (*pstore
));
3160 _FREE(pstore
, M_TEMP
);
3163 buf
+= sizeof (*pstore
);
3166 psn
->psn_len
= sizeof (struct pf_src_node
) * nr
;
3168 _FREE(pstore
, M_TEMP
);
3172 case DIOCCLRSRCNODES
: {
3173 struct pf_src_node
*n
;
3174 struct pf_state
*state
;
3176 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
3177 state
->src_node
= NULL
;
3178 state
->nat_src_node
= NULL
;
3180 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
) {
3184 pf_purge_expired_src_nodes();
3185 pf_status
.src_nodes
= 0;
3189 case DIOCKILLSRCNODES
: {
3190 struct pf_src_node
*sn
;
3192 struct pfioc_src_node_kill
*psnk
=
3193 (struct pfioc_src_node_kill
*)addr
;
3196 RB_FOREACH(sn
, pf_src_tree
, &tree_src_tracking
) {
3197 if (PF_MATCHA(psnk
->psnk_src
.neg
,
3198 &psnk
->psnk_src
.addr
.v
.a
.addr
,
3199 &psnk
->psnk_src
.addr
.v
.a
.mask
,
3200 &sn
->addr
, sn
->af
) &&
3201 PF_MATCHA(psnk
->psnk_dst
.neg
,
3202 &psnk
->psnk_dst
.addr
.v
.a
.addr
,
3203 &psnk
->psnk_dst
.addr
.v
.a
.mask
,
3204 &sn
->raddr
, sn
->af
)) {
3205 /* Handle state to src_node linkage */
3206 if (sn
->states
!= 0) {
3207 RB_FOREACH(s
, pf_state_tree_id
,
3209 if (s
->src_node
== sn
)
3211 if (s
->nat_src_node
== sn
)
3212 s
->nat_src_node
= NULL
;
3222 pf_purge_expired_src_nodes();
3224 psnk
->psnk_af
= killed
;
3228 case DIOCSETHOSTID
: {
3229 u_int32_t
*hid
= (u_int32_t
*)addr
;
3232 pf_status
.hostid
= random();
3234 pf_status
.hostid
= *hid
;
3242 case DIOCIGETIFACES
: {
3243 struct pfioc_iface
*io
= (struct pfioc_iface
*)addr
;
3244 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_iface
, pfiio_buffer
);
3246 /* esize must be that of the user space version of pfi_kif */
3247 if (io
->pfiio_esize
!= sizeof (struct pfi_uif
)) {
3251 io
->pfiio_name
[sizeof (io
->pfiio_name
) - 1] = '\0';
3252 error
= pfi_get_ifaces(io
->pfiio_name
, buf
, &io
->pfiio_size
);
3256 case DIOCSETIFFLAG
: {
3257 struct pfioc_iface
*io
= (struct pfioc_iface
*)addr
;
3259 io
->pfiio_name
[sizeof (io
->pfiio_name
) - 1] = '\0';
3260 error
= pfi_set_flags(io
->pfiio_name
, io
->pfiio_flags
);
3264 case DIOCCLRIFFLAG
: {
3265 struct pfioc_iface
*io
= (struct pfioc_iface
*)addr
;
3267 io
->pfiio_name
[sizeof (io
->pfiio_name
) - 1] = '\0';
3268 error
= pfi_clear_flags(io
->pfiio_name
, io
->pfiio_flags
);
3277 lck_mtx_unlock(pf_lock
);
3278 lck_rw_done(pf_perim_lock
);
3284 pf_af_hook(struct ifnet
*ifp
, struct mbuf
**mppn
, struct mbuf
**mp
,
3285 unsigned int af
, int input
)
3287 int error
= 0, reentry
;
3288 struct thread
*curthread
= current_thread();
3289 struct mbuf
*nextpkt
;
3291 reentry
= (ifp
->if_pf_curthread
== curthread
);
3293 lck_rw_lock_shared(pf_perim_lock
);
3297 lck_mtx_lock(pf_lock
);
3298 ifp
->if_pf_curthread
= curthread
;
3301 if (mppn
!= NULL
&& *mppn
!= NULL
)
3302 VERIFY(*mppn
== *mp
);
3303 if ((nextpkt
= (*mp
)->m_nextpkt
) != NULL
)
3304 (*mp
)->m_nextpkt
= NULL
;
3309 error
= pf_inet_hook(ifp
, mp
, input
);
3315 error
= pf_inet6_hook(ifp
, mp
, input
);
3322 if (nextpkt
!= NULL
) {
3324 struct mbuf
*m
= *mp
;
3325 while (m
->m_nextpkt
!= NULL
)
3327 m
->m_nextpkt
= nextpkt
;
3332 if (mppn
!= NULL
&& *mppn
!= NULL
)
3336 ifp
->if_pf_curthread
= NULL
;
3337 lck_mtx_unlock(pf_lock
);
3341 lck_rw_done(pf_perim_lock
);
3349 pf_inet_hook(struct ifnet
*ifp
, struct mbuf
**mp
, int input
)
3351 struct mbuf
*m
= *mp
;
3352 #if BYTE_ORDER != BIG_ENDIAN
3353 struct ip
*ip
= mtod(m
, struct ip
*);
3358 * If the packet is outbound, is originated locally, is flagged for
3359 * delayed UDP/TCP checksum calculation, and is about to be processed
3360 * for an interface that doesn't support the appropriate checksum
3361 * offloading, then calculated the checksum here so that PF can adjust
3364 if (!input
&& m
->m_pkthdr
.rcvif
== NULL
) {
3365 static const int mask
= CSUM_DELAY_DATA
;
3366 const int flags
= m
->m_pkthdr
.csum_flags
&
3367 ~IF_HWASSIST_CSUM_FLAGS(ifp
->if_hwassist
);
3370 in_delayed_cksum(m
);
3371 m
->m_pkthdr
.csum_flags
&= ~mask
;
3375 #if BYTE_ORDER != BIG_ENDIAN
3379 if (pf_test(input
? PF_IN
: PF_OUT
, ifp
, mp
, NULL
) != PF_PASS
) {
3383 error
= EHOSTUNREACH
;
3388 #if BYTE_ORDER != BIG_ENDIAN
3390 ip
= mtod(*mp
, struct ip
*);
3401 pf_inet6_hook(struct ifnet
*ifp
, struct mbuf
**mp
, int input
)
3407 * TODO: once we support IPv6 hardware checksum offload
3410 * If the packet is outbound, is originated locally, is flagged for
3411 * delayed UDP/TCP checksum calculation, and is about to be processed
3412 * for an interface that doesn't support the appropriate checksum
3413 * offloading, then calculated the checksum here so that PF can adjust
3416 if (!input
&& (*mp
)->m_pkthdr
.rcvif
== NULL
) {
3417 static const int mask
= CSUM_DELAY_DATA
;
3418 const int flags
= (*mp
)->m_pkthdr
.csum_flags
&
3419 ~IF_HWASSIST_CSUM_FLAGS(ifp
->if_hwassist
);
3422 in6_delayed_cksum(*mp
);
3423 (*mp
)->m_pkthdr
.csum_flags
&= ~mask
;
3428 if (pf_test6(input
? PF_IN
: PF_OUT
, ifp
, mp
, NULL
) != PF_PASS
) {
3432 error
= EHOSTUNREACH
;
3442 pf_ifaddr_hook(struct ifnet
*ifp
, unsigned long cmd
)
3444 lck_rw_lock_shared(pf_perim_lock
);
3445 lck_mtx_lock(pf_lock
);
3452 case SIOCAIFADDR_IN6
:
3453 case SIOCDIFADDR_IN6
:
3455 if (ifp
->if_pf_kif
!= NULL
)
3456 pfi_kifaddr_update(ifp
->if_pf_kif
);
3459 panic("%s: unexpected ioctl %lu", __func__
, cmd
);
3463 lck_mtx_unlock(pf_lock
);
3464 lck_rw_done(pf_perim_lock
);
3469 * Caller acquires dlil lock as writer (exclusive)
3472 pf_ifnet_hook(struct ifnet
*ifp
, int attach
)
3474 lck_rw_lock_shared(pf_perim_lock
);
3475 lck_mtx_lock(pf_lock
);
3477 pfi_attach_ifnet(ifp
);
3479 pfi_detach_ifnet(ifp
);
3480 lck_mtx_unlock(pf_lock
);
3481 lck_rw_done(pf_perim_lock
);
3485 pf_attach_hooks(void)
3487 ifnet_head_lock_shared();
3489 * Check against ifnet_addrs[] before proceeding, in case this
3490 * is called very early on, e.g. during dlil_init() before any
3491 * network interface is attached.
3493 if (ifnet_addrs
!= NULL
) {
3496 for (i
= 0; i
<= if_index
; i
++) {
3497 struct ifnet
*ifp
= ifindex2ifnet
[i
];
3499 pfi_attach_ifnet(ifp
);
3507 /* currently unused along with pfdetach() */
3509 pf_detach_hooks(void)
3511 ifnet_head_lock_shared();
3512 if (ifnet_addrs
!= NULL
) {
3513 for (i
= 0; i
<= if_index
; i
++) {
3516 struct ifnet
*ifp
= ifindex2ifnet
[i
];
3517 if (ifp
!= NULL
&& ifp
->if_pf_kif
!= NULL
) {
3518 pfi_detach_ifnet(ifp
);