2 * Copyright (c) 2007-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */
30 /* $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * - Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * - Redistributions in binary form must reproduce the above
44 * copyright notice, this list of conditions and the following
45 * disclaimer in the documentation and/or other materials provided
46 * with the distribution.
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
71 #include <sys/filio.h>
72 #include <sys/fcntl.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/kernel.h>
77 #include <sys/proc_internal.h>
78 #include <sys/malloc.h>
79 #include <sys/kauth.h>
81 #include <sys/mcache.h>
82 #include <sys/queue.h>
84 #include <mach/vm_param.h>
88 #include <net/if_types.h>
89 #include <net/net_api_stats.h>
90 #include <net/route.h>
92 #include <netinet/in.h>
93 #include <netinet/in_var.h>
94 #include <netinet/in_systm.h>
95 #include <netinet/ip.h>
96 #include <netinet/ip_var.h>
97 #include <netinet/ip_icmp.h>
98 #include <netinet/if_ether.h>
101 #include <netinet/ip_dummynet.h>
104 #endif /* DUMMYNET */
106 #include <libkern/crypto/md5.h>
108 #include <machine/machine_routines.h>
110 #include <miscfs/devfs/devfs.h>
112 #include <net/pfvar.h>
115 #include <net/if_pfsync.h>
119 #include <net/if_pflog.h>
123 #include <netinet/ip6.h>
124 #include <netinet/in_pcb.h>
127 #include <dev/random/randomdev.h>
130 static void pfdetach(void);
132 static int pfopen(dev_t
, int, int, struct proc
*);
133 static int pfclose(dev_t
, int, int, struct proc
*);
134 static int pfioctl(dev_t
, u_long
, caddr_t
, int, struct proc
*);
135 static int pfioctl_ioc_table(u_long
, struct pfioc_table_32
*,
136 struct pfioc_table_64
*, struct proc
*);
137 static int pfioctl_ioc_tokens(u_long
, struct pfioc_tokens_32
*,
138 struct pfioc_tokens_64
*, struct proc
*);
139 static int pfioctl_ioc_rule(u_long
, int, struct pfioc_rule
*, struct proc
*);
140 static int pfioctl_ioc_state_kill(u_long
, struct pfioc_state_kill
*,
142 static int pfioctl_ioc_state(u_long
, struct pfioc_state
*, struct proc
*);
143 static int pfioctl_ioc_states(u_long
, struct pfioc_states_32
*,
144 struct pfioc_states_64
*, struct proc
*);
145 static int pfioctl_ioc_natlook(u_long
, struct pfioc_natlook
*, struct proc
*);
146 static int pfioctl_ioc_tm(u_long
, struct pfioc_tm
*, struct proc
*);
147 static int pfioctl_ioc_limit(u_long
, struct pfioc_limit
*, struct proc
*);
148 static int pfioctl_ioc_pooladdr(u_long
, struct pfioc_pooladdr
*, struct proc
*);
149 static int pfioctl_ioc_ruleset(u_long
, struct pfioc_ruleset
*, struct proc
*);
150 static int pfioctl_ioc_trans(u_long
, struct pfioc_trans_32
*,
151 struct pfioc_trans_64
*, struct proc
*);
152 static int pfioctl_ioc_src_nodes(u_long
, struct pfioc_src_nodes_32
*,
153 struct pfioc_src_nodes_64
*, struct proc
*);
154 static int pfioctl_ioc_src_node_kill(u_long
, struct pfioc_src_node_kill
*,
156 static int pfioctl_ioc_iface(u_long
, struct pfioc_iface_32
*,
157 struct pfioc_iface_64
*, struct proc
*);
158 static struct pf_pool
*pf_get_pool(char *, u_int32_t
, u_int8_t
, u_int32_t
,
159 u_int8_t
, u_int8_t
, u_int8_t
);
160 static void pf_mv_pool(struct pf_palist
*, struct pf_palist
*);
161 static void pf_empty_pool(struct pf_palist
*);
162 static int pf_begin_rules(u_int32_t
*, int, const char *);
163 static int pf_rollback_rules(u_int32_t
, int, char *);
164 static int pf_setup_pfsync_matching(struct pf_ruleset
*);
165 static void pf_hash_rule(MD5_CTX
*, struct pf_rule
*);
166 static void pf_hash_rule_addr(MD5_CTX
*, struct pf_rule_addr
*, u_int8_t
);
167 static int pf_commit_rules(u_int32_t
, int, char *);
168 static void pf_rule_copyin(struct pf_rule
*, struct pf_rule
*, struct proc
*,
170 static void pf_rule_copyout(struct pf_rule
*, struct pf_rule
*);
171 static void pf_state_export(struct pfsync_state
*, struct pf_state_key
*,
173 static void pf_state_import(struct pfsync_state
*, struct pf_state_key
*,
175 static void pf_pooladdr_copyin(struct pf_pooladdr
*, struct pf_pooladdr
*);
176 static void pf_pooladdr_copyout(struct pf_pooladdr
*, struct pf_pooladdr
*);
177 static void pf_expire_states_and_src_nodes(struct pf_rule
*);
178 static void pf_delete_rule_from_ruleset(struct pf_ruleset
*,
179 int, struct pf_rule
*);
180 static void pf_addrwrap_setup(struct pf_addr_wrap
*);
181 static int pf_rule_setup(struct pfioc_rule
*, struct pf_rule
*,
182 struct pf_ruleset
*);
183 static void pf_delete_rule_by_owner(char *, u_int32_t
);
184 static int pf_delete_rule_by_ticket(struct pfioc_rule
*, u_int32_t
);
185 static void pf_ruleset_cleanup(struct pf_ruleset
*, int);
186 static void pf_deleterule_anchor_step_out(struct pf_ruleset
**,
187 int, struct pf_rule
**);
189 #define PF_CDEV_MAJOR (-1)
191 static struct cdevsw pf_cdevsw
= {
194 /* read */ eno_rdwrt
,
195 /* write */ eno_rdwrt
,
198 /* reset */ eno_reset
,
200 /* select */ eno_select
,
202 /* strategy */ eno_strat
,
208 static void pf_attach_hooks(void);
210 /* currently unused along with pfdetach() */
211 static void pf_detach_hooks(void);
215 * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer,
216 * and used in pf_af_hook() for performance optimization, such that packets
217 * will enter pf_test() or pf_test6() only when PF is running.
219 int pf_is_enabled
= 0;
221 u_int32_t pf_hash_seed
;
222 int16_t pf_nat64_configured
= 0;
225 * These are the pf enabled reference counting variables
227 static u_int64_t pf_enabled_ref_count
;
228 static u_int32_t nr_tokens
= 0;
229 static u_int64_t pffwrules
;
230 static u_int32_t pfdevcnt
;
232 SLIST_HEAD(list_head
, pfioc_kernel_token
);
233 static struct list_head token_list_head
;
235 struct pf_rule pf_default_rule
;
237 #define TAGID_MAX 50000
238 static TAILQ_HEAD(pf_tags
, pf_tagname
) pf_tags
=
239 TAILQ_HEAD_INITIALIZER(pf_tags
);
241 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
242 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
244 static u_int16_t
tagname2tag(struct pf_tags
*, char *);
245 static void tag2tagname(struct pf_tags
*, u_int16_t
, char *);
246 static void tag_unref(struct pf_tags
*, u_int16_t
);
247 static int pf_rtlabel_add(struct pf_addr_wrap
*);
248 static void pf_rtlabel_remove(struct pf_addr_wrap
*);
249 static void pf_rtlabel_copyout(struct pf_addr_wrap
*);
252 static int pf_inet_hook(struct ifnet
*, struct mbuf
**, int,
253 struct ip_fw_args
*);
256 static int pf_inet6_hook(struct ifnet
*, struct mbuf
**, int,
257 struct ip_fw_args
*);
260 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
263 * Helper macros for ioctl structures which vary in size (32-bit vs. 64-bit)
265 #define PFIOCX_STRUCT_DECL(s) \
268 struct s##_32 _s##_32; \
269 struct s##_64 _s##_64; \
273 #define PFIOCX_STRUCT_BEGIN(a, s, _action) { \
274 VERIFY(s##_un == NULL); \
275 s##_un = _MALLOC(sizeof (*s##_un), M_TEMP, M_WAITOK|M_ZERO); \
276 if (s##_un == NULL) { \
280 bcopy(a, &s##_un->_u._s##_64, \
281 sizeof (struct s##_64)); \
283 bcopy(a, &s##_un->_u._s##_32, \
284 sizeof (struct s##_32)); \
288 #define PFIOCX_STRUCT_END(s, a) { \
289 VERIFY(s##_un != NULL); \
291 bcopy(&s##_un->_u._s##_64, a, sizeof (struct s##_64)); \
293 bcopy(&s##_un->_u._s##_32, a, sizeof (struct s##_32)); \
294 _FREE(s##_un, M_TEMP); \
298 #define PFIOCX_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
299 #define PFIOCX_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
302 * Helper macros for regular ioctl structures.
304 #define PFIOC_STRUCT_BEGIN(a, v, _action) { \
305 VERIFY((v) == NULL); \
306 (v) = _MALLOC(sizeof (*(v)), M_TEMP, M_WAITOK|M_ZERO); \
310 bcopy(a, v, sizeof (*(v))); \
314 #define PFIOC_STRUCT_END(v, a) { \
315 VERIFY((v) != NULL); \
316 bcopy(v, a, sizeof (*(v))); \
321 #define PFIOC_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
322 #define PFIOC_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
324 static lck_attr_t
*pf_perim_lock_attr
;
325 static lck_grp_t
*pf_perim_lock_grp
;
326 static lck_grp_attr_t
*pf_perim_lock_grp_attr
;
328 static lck_attr_t
*pf_lock_attr
;
329 static lck_grp_t
*pf_lock_grp
;
330 static lck_grp_attr_t
*pf_lock_grp_attr
;
332 struct thread
*pf_purge_thread
;
334 extern void pfi_kifaddr_update(void *);
336 /* pf enable ref-counting helper functions */
337 static u_int64_t
generate_token(struct proc
*);
338 static int remove_token(struct pfioc_remove_token
*);
339 static void invalidate_all_tokens(void);
342 generate_token(struct proc
*p
)
344 u_int64_t token_value
;
345 struct pfioc_kernel_token
*new_token
;
347 new_token
= _MALLOC(sizeof(struct pfioc_kernel_token
), M_TEMP
,
350 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
352 if (new_token
== NULL
) {
353 /* malloc failed! bail! */
354 printf("%s: unable to allocate pf token structure!", __func__
);
358 token_value
= VM_KERNEL_ADDRPERM((u_int64_t
)(uintptr_t)new_token
);
360 new_token
->token
.token_value
= token_value
;
361 new_token
->token
.pid
= proc_pid(p
);
362 proc_name(new_token
->token
.pid
, new_token
->token
.proc_name
,
363 sizeof(new_token
->token
.proc_name
));
364 new_token
->token
.timestamp
= pf_calendar_time_second();
366 SLIST_INSERT_HEAD(&token_list_head
, new_token
, next
);
373 remove_token(struct pfioc_remove_token
*tok
)
375 struct pfioc_kernel_token
*entry
, *tmp
;
377 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
379 SLIST_FOREACH_SAFE(entry
, &token_list_head
, next
, tmp
) {
380 if (tok
->token_value
== entry
->token
.token_value
) {
381 SLIST_REMOVE(&token_list_head
, entry
,
382 pfioc_kernel_token
, next
);
383 _FREE(entry
, M_TEMP
);
385 return 0; /* success */
389 printf("pf : remove failure\n");
390 return ESRCH
; /* failure */
394 invalidate_all_tokens(void)
396 struct pfioc_kernel_token
*entry
, *tmp
;
398 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
400 SLIST_FOREACH_SAFE(entry
, &token_list_head
, next
, tmp
) {
401 SLIST_REMOVE(&token_list_head
, entry
, pfioc_kernel_token
, next
);
402 _FREE(entry
, M_TEMP
);
411 u_int32_t
*t
= pf_default_rule
.timeout
;
414 pf_perim_lock_grp_attr
= lck_grp_attr_alloc_init();
415 pf_perim_lock_grp
= lck_grp_alloc_init("pf_perim",
416 pf_perim_lock_grp_attr
);
417 pf_perim_lock_attr
= lck_attr_alloc_init();
418 lck_rw_init(pf_perim_lock
, pf_perim_lock_grp
, pf_perim_lock_attr
);
420 pf_lock_grp_attr
= lck_grp_attr_alloc_init();
421 pf_lock_grp
= lck_grp_alloc_init("pf", pf_lock_grp_attr
);
422 pf_lock_attr
= lck_attr_alloc_init();
423 lck_mtx_init(pf_lock
, pf_lock_grp
, pf_lock_attr
);
425 pool_init(&pf_rule_pl
, sizeof(struct pf_rule
), 0, 0, 0, "pfrulepl",
427 pool_init(&pf_src_tree_pl
, sizeof(struct pf_src_node
), 0, 0, 0,
429 pool_init(&pf_state_pl
, sizeof(struct pf_state
), 0, 0, 0, "pfstatepl",
431 pool_init(&pf_state_key_pl
, sizeof(struct pf_state_key
), 0, 0, 0,
432 "pfstatekeypl", NULL
);
433 pool_init(&pf_app_state_pl
, sizeof(struct pf_app_state
), 0, 0, 0,
434 "pfappstatepl", NULL
);
435 pool_init(&pf_pooladdr_pl
, sizeof(struct pf_pooladdr
), 0, 0, 0,
436 "pfpooladdrpl", NULL
);
439 pf_osfp_initialize();
441 pool_sethardlimit(pf_pool_limits
[PF_LIMIT_STATES
].pp
,
442 pf_pool_limits
[PF_LIMIT_STATES
].limit
, NULL
, 0);
444 if (max_mem
<= 256 * 1024 * 1024) {
445 pf_pool_limits
[PF_LIMIT_TABLE_ENTRIES
].limit
=
446 PFR_KENTRY_HIWAT_SMALL
;
449 RB_INIT(&tree_src_tracking
);
450 RB_INIT(&pf_anchors
);
451 pf_init_ruleset(&pf_main_ruleset
);
452 TAILQ_INIT(&pf_pabuf
);
453 TAILQ_INIT(&state_list
);
455 _CASSERT((SC_BE
& SCIDX_MASK
) == SCIDX_BE
);
456 _CASSERT((SC_BK_SYS
& SCIDX_MASK
) == SCIDX_BK_SYS
);
457 _CASSERT((SC_BK
& SCIDX_MASK
) == SCIDX_BK
);
458 _CASSERT((SC_RD
& SCIDX_MASK
) == SCIDX_RD
);
459 _CASSERT((SC_OAM
& SCIDX_MASK
) == SCIDX_OAM
);
460 _CASSERT((SC_AV
& SCIDX_MASK
) == SCIDX_AV
);
461 _CASSERT((SC_RV
& SCIDX_MASK
) == SCIDX_RV
);
462 _CASSERT((SC_VI
& SCIDX_MASK
) == SCIDX_VI
);
463 _CASSERT((SC_SIG
& SCIDX_MASK
) == SCIDX_SIG
);
464 _CASSERT((SC_VO
& SCIDX_MASK
) == SCIDX_VO
);
465 _CASSERT((SC_CTL
& SCIDX_MASK
) == SCIDX_CTL
);
467 /* default rule should never be garbage collected */
468 pf_default_rule
.entries
.tqe_prev
= &pf_default_rule
.entries
.tqe_next
;
469 pf_default_rule
.action
= PF_PASS
;
470 pf_default_rule
.nr
= -1;
471 pf_default_rule
.rtableid
= IFSCOPE_NONE
;
473 /* initialize default timeouts */
474 t
[PFTM_TCP_FIRST_PACKET
] = PFTM_TCP_FIRST_PACKET_VAL
;
475 t
[PFTM_TCP_OPENING
] = PFTM_TCP_OPENING_VAL
;
476 t
[PFTM_TCP_ESTABLISHED
] = PFTM_TCP_ESTABLISHED_VAL
;
477 t
[PFTM_TCP_CLOSING
] = PFTM_TCP_CLOSING_VAL
;
478 t
[PFTM_TCP_FIN_WAIT
] = PFTM_TCP_FIN_WAIT_VAL
;
479 t
[PFTM_TCP_CLOSED
] = PFTM_TCP_CLOSED_VAL
;
480 t
[PFTM_UDP_FIRST_PACKET
] = PFTM_UDP_FIRST_PACKET_VAL
;
481 t
[PFTM_UDP_SINGLE
] = PFTM_UDP_SINGLE_VAL
;
482 t
[PFTM_UDP_MULTIPLE
] = PFTM_UDP_MULTIPLE_VAL
;
483 t
[PFTM_ICMP_FIRST_PACKET
] = PFTM_ICMP_FIRST_PACKET_VAL
;
484 t
[PFTM_ICMP_ERROR_REPLY
] = PFTM_ICMP_ERROR_REPLY_VAL
;
485 t
[PFTM_GREv1_FIRST_PACKET
] = PFTM_GREv1_FIRST_PACKET_VAL
;
486 t
[PFTM_GREv1_INITIATING
] = PFTM_GREv1_INITIATING_VAL
;
487 t
[PFTM_GREv1_ESTABLISHED
] = PFTM_GREv1_ESTABLISHED_VAL
;
488 t
[PFTM_ESP_FIRST_PACKET
] = PFTM_ESP_FIRST_PACKET_VAL
;
489 t
[PFTM_ESP_INITIATING
] = PFTM_ESP_INITIATING_VAL
;
490 t
[PFTM_ESP_ESTABLISHED
] = PFTM_ESP_ESTABLISHED_VAL
;
491 t
[PFTM_OTHER_FIRST_PACKET
] = PFTM_OTHER_FIRST_PACKET_VAL
;
492 t
[PFTM_OTHER_SINGLE
] = PFTM_OTHER_SINGLE_VAL
;
493 t
[PFTM_OTHER_MULTIPLE
] = PFTM_OTHER_MULTIPLE_VAL
;
494 t
[PFTM_FRAG
] = PFTM_FRAG_VAL
;
495 t
[PFTM_INTERVAL
] = PFTM_INTERVAL_VAL
;
496 t
[PFTM_SRC_NODE
] = PFTM_SRC_NODE_VAL
;
497 t
[PFTM_TS_DIFF
] = PFTM_TS_DIFF_VAL
;
498 t
[PFTM_ADAPTIVE_START
] = PFSTATE_ADAPT_START
;
499 t
[PFTM_ADAPTIVE_END
] = PFSTATE_ADAPT_END
;
502 bzero(&pf_status
, sizeof(pf_status
));
503 pf_status
.debug
= PF_DEBUG_URGENT
;
504 pf_hash_seed
= RandomULong();
506 /* XXX do our best to avoid a conflict */
507 pf_status
.hostid
= random();
509 if (kernel_thread_start(pf_purge_thread_fn
, NULL
,
510 &pf_purge_thread
) != 0) {
511 printf("%s: unable to start purge thread!", __func__
);
515 maj
= cdevsw_add(PF_CDEV_MAJOR
, &pf_cdevsw
);
517 printf("%s: failed to allocate major number!\n", __func__
);
520 (void) devfs_make_node(makedev(maj
, PFDEV_PF
), DEVFS_CHAR
,
521 UID_ROOT
, GID_WHEEL
, 0600, "pf", 0);
523 (void) devfs_make_node(makedev(maj
, PFDEV_PFM
), DEVFS_CHAR
,
524 UID_ROOT
, GID_WHEEL
, 0600, "pfm", 0);
536 struct pf_anchor
*anchor
;
537 struct pf_state
*state
;
538 struct pf_src_node
*node
;
539 struct pfioc_table pt
;
546 pf_status
.running
= 0;
547 wakeup(pf_purge_thread_fn
);
549 /* clear the rulesets */
550 for (i
= 0; i
< PF_RULESET_MAX
; i
++) {
551 if (pf_begin_rules(&ticket
, i
, &r
) == 0) {
552 pf_commit_rules(ticket
, i
, &r
);
557 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
558 state
->timeout
= PFTM_PURGE
;
560 state
->sync_flags
= PFSTATE_NOSYNC
;
563 pf_purge_expired_states(pf_status
.states
);
566 pfsync_clear_states(pf_status
.hostid
, NULL
);
569 /* clear source nodes */
570 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
571 state
->src_node
= NULL
;
572 state
->nat_src_node
= NULL
;
574 RB_FOREACH(node
, pf_src_tree
, &tree_src_tracking
) {
578 pf_purge_expired_src_nodes();
581 memset(&pt
, '\0', sizeof(pt
));
582 pfr_clr_tables(&pt
.pfrio_table
, &pt
.pfrio_ndel
, pt
.pfrio_flags
);
584 /* destroy anchors */
585 while ((anchor
= RB_MIN(pf_anchor_global
, &pf_anchors
)) != NULL
) {
586 for (i
= 0; i
< PF_RULESET_MAX
; i
++) {
587 if (pf_begin_rules(&ticket
, i
, anchor
->name
) == 0) {
588 pf_commit_rules(ticket
, i
, anchor
->name
);
593 /* destroy main ruleset */
594 pf_remove_if_empty_ruleset(&pf_main_ruleset
);
596 /* destroy the pools */
597 pool_destroy(&pf_pooladdr_pl
);
598 pool_destroy(&pf_state_pl
);
599 pool_destroy(&pf_rule_pl
);
600 pool_destroy(&pf_src_tree_pl
);
602 /* destroy subsystems */
603 pf_normalize_destroy();
611 pfopen(dev_t dev
, int flags
, int fmt
, struct proc
*p
)
613 #pragma unused(flags, fmt, p)
614 if (minor(dev
) >= PFDEV_MAX
) {
618 if (minor(dev
) == PFDEV_PFM
) {
619 lck_mtx_lock(pf_lock
);
621 lck_mtx_unlock(pf_lock
);
625 lck_mtx_unlock(pf_lock
);
631 pfclose(dev_t dev
, int flags
, int fmt
, struct proc
*p
)
633 #pragma unused(flags, fmt, p)
634 if (minor(dev
) >= PFDEV_MAX
) {
638 if (minor(dev
) == PFDEV_PFM
) {
639 lck_mtx_lock(pf_lock
);
640 VERIFY(pfdevcnt
> 0);
642 lck_mtx_unlock(pf_lock
);
647 static struct pf_pool
*
648 pf_get_pool(char *anchor
, u_int32_t ticket
, u_int8_t rule_action
,
649 u_int32_t rule_number
, u_int8_t r_last
, u_int8_t active
,
650 u_int8_t check_ticket
)
652 struct pf_ruleset
*ruleset
;
653 struct pf_rule
*rule
;
656 ruleset
= pf_find_ruleset(anchor
);
657 if (ruleset
== NULL
) {
660 rs_num
= pf_get_ruleset_number(rule_action
);
661 if (rs_num
>= PF_RULESET_MAX
) {
665 if (check_ticket
&& ticket
!=
666 ruleset
->rules
[rs_num
].active
.ticket
) {
670 rule
= TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
,
673 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
676 if (check_ticket
&& ticket
!=
677 ruleset
->rules
[rs_num
].inactive
.ticket
) {
681 rule
= TAILQ_LAST(ruleset
->rules
[rs_num
].inactive
.ptr
,
684 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].inactive
.ptr
);
688 while ((rule
!= NULL
) && (rule
->nr
!= rule_number
)) {
689 rule
= TAILQ_NEXT(rule
, entries
);
700 pf_mv_pool(struct pf_palist
*poola
, struct pf_palist
*poolb
)
702 struct pf_pooladdr
*mv_pool_pa
;
704 while ((mv_pool_pa
= TAILQ_FIRST(poola
)) != NULL
) {
705 TAILQ_REMOVE(poola
, mv_pool_pa
, entries
);
706 TAILQ_INSERT_TAIL(poolb
, mv_pool_pa
, entries
);
711 pf_empty_pool(struct pf_palist
*poola
)
713 struct pf_pooladdr
*empty_pool_pa
;
715 while ((empty_pool_pa
= TAILQ_FIRST(poola
)) != NULL
) {
716 pfi_dynaddr_remove(&empty_pool_pa
->addr
);
717 pf_tbladdr_remove(&empty_pool_pa
->addr
);
718 pfi_kif_unref(empty_pool_pa
->kif
, PFI_KIF_REF_RULE
);
719 TAILQ_REMOVE(poola
, empty_pool_pa
, entries
);
720 pool_put(&pf_pooladdr_pl
, empty_pool_pa
);
725 pf_rm_rule(struct pf_rulequeue
*rulequeue
, struct pf_rule
*rule
)
727 if (rulequeue
!= NULL
) {
728 if (rule
->states
<= 0) {
730 * XXX - we need to remove the table *before* detaching
731 * the rule to make sure the table code does not delete
732 * the anchor under our feet.
734 pf_tbladdr_remove(&rule
->src
.addr
);
735 pf_tbladdr_remove(&rule
->dst
.addr
);
736 if (rule
->overload_tbl
) {
737 pfr_detach_table(rule
->overload_tbl
);
740 TAILQ_REMOVE(rulequeue
, rule
, entries
);
741 rule
->entries
.tqe_prev
= NULL
;
745 if (rule
->states
> 0 || rule
->src_nodes
> 0 ||
746 rule
->entries
.tqe_prev
!= NULL
) {
749 pf_tag_unref(rule
->tag
);
750 pf_tag_unref(rule
->match_tag
);
751 pf_rtlabel_remove(&rule
->src
.addr
);
752 pf_rtlabel_remove(&rule
->dst
.addr
);
753 pfi_dynaddr_remove(&rule
->src
.addr
);
754 pfi_dynaddr_remove(&rule
->dst
.addr
);
755 if (rulequeue
== NULL
) {
756 pf_tbladdr_remove(&rule
->src
.addr
);
757 pf_tbladdr_remove(&rule
->dst
.addr
);
758 if (rule
->overload_tbl
) {
759 pfr_detach_table(rule
->overload_tbl
);
762 pfi_kif_unref(rule
->kif
, PFI_KIF_REF_RULE
);
763 pf_anchor_remove(rule
);
764 pf_empty_pool(&rule
->rpool
.list
);
765 pool_put(&pf_rule_pl
, rule
);
769 tagname2tag(struct pf_tags
*head
, char *tagname
)
771 struct pf_tagname
*tag
, *p
= NULL
;
772 u_int16_t new_tagid
= 1;
774 TAILQ_FOREACH(tag
, head
, entries
)
775 if (strcmp(tagname
, tag
->name
) == 0) {
781 * to avoid fragmentation, we do a linear search from the beginning
782 * and take the first free slot we find. if there is none or the list
783 * is empty, append a new entry at the end.
787 if (!TAILQ_EMPTY(head
)) {
788 for (p
= TAILQ_FIRST(head
); p
!= NULL
&&
789 p
->tag
== new_tagid
; p
= TAILQ_NEXT(p
, entries
)) {
790 new_tagid
= p
->tag
+ 1;
794 if (new_tagid
> TAGID_MAX
) {
798 /* allocate and fill new struct pf_tagname */
799 tag
= _MALLOC(sizeof(*tag
), M_TEMP
, M_WAITOK
| M_ZERO
);
803 strlcpy(tag
->name
, tagname
, sizeof(tag
->name
));
804 tag
->tag
= new_tagid
;
807 if (p
!= NULL
) { /* insert new entry before p */
808 TAILQ_INSERT_BEFORE(p
, tag
, entries
);
809 } else { /* either list empty or no free slot in between */
810 TAILQ_INSERT_TAIL(head
, tag
, entries
);
817 tag2tagname(struct pf_tags
*head
, u_int16_t tagid
, char *p
)
819 struct pf_tagname
*tag
;
821 TAILQ_FOREACH(tag
, head
, entries
)
822 if (tag
->tag
== tagid
) {
823 strlcpy(p
, tag
->name
, PF_TAG_NAME_SIZE
);
829 tag_unref(struct pf_tags
*head
, u_int16_t tag
)
831 struct pf_tagname
*p
, *next
;
837 for (p
= TAILQ_FIRST(head
); p
!= NULL
; p
= next
) {
838 next
= TAILQ_NEXT(p
, entries
);
841 TAILQ_REMOVE(head
, p
, entries
);
850 pf_tagname2tag(char *tagname
)
852 return tagname2tag(&pf_tags
, tagname
);
856 pf_tag2tagname(u_int16_t tagid
, char *p
)
858 tag2tagname(&pf_tags
, tagid
, p
);
862 pf_tag_ref(u_int16_t tag
)
864 struct pf_tagname
*t
;
866 TAILQ_FOREACH(t
, &pf_tags
, entries
)
876 pf_tag_unref(u_int16_t tag
)
878 tag_unref(&pf_tags
, tag
);
882 pf_rtlabel_add(struct pf_addr_wrap
*a
)
889 pf_rtlabel_remove(struct pf_addr_wrap
*a
)
895 pf_rtlabel_copyout(struct pf_addr_wrap
*a
)
901 pf_begin_rules(u_int32_t
*ticket
, int rs_num
, const char *anchor
)
903 struct pf_ruleset
*rs
;
904 struct pf_rule
*rule
;
906 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
) {
909 rs
= pf_find_or_create_ruleset(anchor
);
913 while ((rule
= TAILQ_FIRST(rs
->rules
[rs_num
].inactive
.ptr
)) != NULL
) {
914 pf_rm_rule(rs
->rules
[rs_num
].inactive
.ptr
, rule
);
915 rs
->rules
[rs_num
].inactive
.rcount
--;
917 *ticket
= ++rs
->rules
[rs_num
].inactive
.ticket
;
918 rs
->rules
[rs_num
].inactive
.open
= 1;
923 pf_rollback_rules(u_int32_t ticket
, int rs_num
, char *anchor
)
925 struct pf_ruleset
*rs
;
926 struct pf_rule
*rule
;
928 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
) {
931 rs
= pf_find_ruleset(anchor
);
932 if (rs
== NULL
|| !rs
->rules
[rs_num
].inactive
.open
||
933 rs
->rules
[rs_num
].inactive
.ticket
!= ticket
) {
936 while ((rule
= TAILQ_FIRST(rs
->rules
[rs_num
].inactive
.ptr
)) != NULL
) {
937 pf_rm_rule(rs
->rules
[rs_num
].inactive
.ptr
, rule
);
938 rs
->rules
[rs_num
].inactive
.rcount
--;
940 rs
->rules
[rs_num
].inactive
.open
= 0;
944 #define PF_MD5_UPD(st, elm) \
945 MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm))
947 #define PF_MD5_UPD_STR(st, elm) \
948 MD5Update(ctx, (u_int8_t *)(st)->elm, strlen((st)->elm))
950 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
951 (stor) = htonl((st)->elm); \
952 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t)); \
955 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
956 (stor) = htons((st)->elm); \
957 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t)); \
961 pf_hash_rule_addr(MD5_CTX
*ctx
, struct pf_rule_addr
*pfr
, u_int8_t proto
)
963 PF_MD5_UPD(pfr
, addr
.type
);
964 switch (pfr
->addr
.type
) {
965 case PF_ADDR_DYNIFTL
:
966 PF_MD5_UPD(pfr
, addr
.v
.ifname
);
967 PF_MD5_UPD(pfr
, addr
.iflags
);
970 PF_MD5_UPD(pfr
, addr
.v
.tblname
);
972 case PF_ADDR_ADDRMASK
:
974 PF_MD5_UPD(pfr
, addr
.v
.a
.addr
.addr32
);
975 PF_MD5_UPD(pfr
, addr
.v
.a
.mask
.addr32
);
977 case PF_ADDR_RTLABEL
:
978 PF_MD5_UPD(pfr
, addr
.v
.rtlabelname
);
985 PF_MD5_UPD(pfr
, xport
.range
.port
[0]);
986 PF_MD5_UPD(pfr
, xport
.range
.port
[1]);
987 PF_MD5_UPD(pfr
, xport
.range
.op
);
994 PF_MD5_UPD(pfr
, neg
);
998 pf_hash_rule(MD5_CTX
*ctx
, struct pf_rule
*rule
)
1003 pf_hash_rule_addr(ctx
, &rule
->src
, rule
->proto
);
1004 pf_hash_rule_addr(ctx
, &rule
->dst
, rule
->proto
);
1005 PF_MD5_UPD_STR(rule
, label
);
1006 PF_MD5_UPD_STR(rule
, ifname
);
1007 PF_MD5_UPD_STR(rule
, match_tagname
);
1008 PF_MD5_UPD_HTONS(rule
, match_tag
, x
); /* dup? */
1009 PF_MD5_UPD_HTONL(rule
, os_fingerprint
, y
);
1010 PF_MD5_UPD_HTONL(rule
, prob
, y
);
1011 PF_MD5_UPD_HTONL(rule
, uid
.uid
[0], y
);
1012 PF_MD5_UPD_HTONL(rule
, uid
.uid
[1], y
);
1013 PF_MD5_UPD(rule
, uid
.op
);
1014 PF_MD5_UPD_HTONL(rule
, gid
.gid
[0], y
);
1015 PF_MD5_UPD_HTONL(rule
, gid
.gid
[1], y
);
1016 PF_MD5_UPD(rule
, gid
.op
);
1017 PF_MD5_UPD_HTONL(rule
, rule_flag
, y
);
1018 PF_MD5_UPD(rule
, action
);
1019 PF_MD5_UPD(rule
, direction
);
1020 PF_MD5_UPD(rule
, af
);
1021 PF_MD5_UPD(rule
, quick
);
1022 PF_MD5_UPD(rule
, ifnot
);
1023 PF_MD5_UPD(rule
, match_tag_not
);
1024 PF_MD5_UPD(rule
, natpass
);
1025 PF_MD5_UPD(rule
, keep_state
);
1026 PF_MD5_UPD(rule
, proto
);
1027 PF_MD5_UPD(rule
, type
);
1028 PF_MD5_UPD(rule
, code
);
1029 PF_MD5_UPD(rule
, flags
);
1030 PF_MD5_UPD(rule
, flagset
);
1031 PF_MD5_UPD(rule
, allow_opts
);
1032 PF_MD5_UPD(rule
, rt
);
1033 PF_MD5_UPD(rule
, tos
);
1037 pf_commit_rules(u_int32_t ticket
, int rs_num
, char *anchor
)
1039 struct pf_ruleset
*rs
;
1040 struct pf_rule
*rule
, **old_array
, *r
;
1041 struct pf_rulequeue
*old_rules
;
1043 u_int32_t old_rcount
;
1045 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1047 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
) {
1050 rs
= pf_find_ruleset(anchor
);
1051 if (rs
== NULL
|| !rs
->rules
[rs_num
].inactive
.open
||
1052 ticket
!= rs
->rules
[rs_num
].inactive
.ticket
) {
1056 /* Calculate checksum for the main ruleset */
1057 if (rs
== &pf_main_ruleset
) {
1058 error
= pf_setup_pfsync_matching(rs
);
1064 /* Swap rules, keep the old. */
1065 old_rules
= rs
->rules
[rs_num
].active
.ptr
;
1066 old_rcount
= rs
->rules
[rs_num
].active
.rcount
;
1067 old_array
= rs
->rules
[rs_num
].active
.ptr_array
;
1069 if (old_rcount
!= 0) {
1070 r
= TAILQ_FIRST(rs
->rules
[rs_num
].active
.ptr
);
1072 if (r
->rule_flag
& PFRULE_PFM
) {
1075 r
= TAILQ_NEXT(r
, entries
);
1080 rs
->rules
[rs_num
].active
.ptr
=
1081 rs
->rules
[rs_num
].inactive
.ptr
;
1082 rs
->rules
[rs_num
].active
.ptr_array
=
1083 rs
->rules
[rs_num
].inactive
.ptr_array
;
1084 rs
->rules
[rs_num
].active
.rcount
=
1085 rs
->rules
[rs_num
].inactive
.rcount
;
1086 rs
->rules
[rs_num
].inactive
.ptr
= old_rules
;
1087 rs
->rules
[rs_num
].inactive
.ptr_array
= old_array
;
1088 rs
->rules
[rs_num
].inactive
.rcount
= old_rcount
;
1090 rs
->rules
[rs_num
].active
.ticket
=
1091 rs
->rules
[rs_num
].inactive
.ticket
;
1092 pf_calc_skip_steps(rs
->rules
[rs_num
].active
.ptr
);
1095 /* Purge the old rule list. */
1096 while ((rule
= TAILQ_FIRST(old_rules
)) != NULL
) {
1097 pf_rm_rule(old_rules
, rule
);
1099 if (rs
->rules
[rs_num
].inactive
.ptr_array
) {
1100 _FREE(rs
->rules
[rs_num
].inactive
.ptr_array
, M_TEMP
);
1102 rs
->rules
[rs_num
].inactive
.ptr_array
= NULL
;
1103 rs
->rules
[rs_num
].inactive
.rcount
= 0;
1104 rs
->rules
[rs_num
].inactive
.open
= 0;
1105 pf_remove_if_empty_ruleset(rs
);
1110 pf_rule_copyin(struct pf_rule
*src
, struct pf_rule
*dst
, struct proc
*p
,
1113 bcopy(src
, dst
, sizeof(struct pf_rule
));
1115 dst
->label
[sizeof(dst
->label
) - 1] = '\0';
1116 dst
->ifname
[sizeof(dst
->ifname
) - 1] = '\0';
1117 dst
->qname
[sizeof(dst
->qname
) - 1] = '\0';
1118 dst
->pqname
[sizeof(dst
->pqname
) - 1] = '\0';
1119 dst
->tagname
[sizeof(dst
->tagname
) - 1] = '\0';
1120 dst
->match_tagname
[sizeof(dst
->match_tagname
) - 1] = '\0';
1121 dst
->overload_tblname
[sizeof(dst
->overload_tblname
) - 1] = '\0';
1123 dst
->cuid
= kauth_cred_getuid(p
->p_ucred
);
1124 dst
->cpid
= p
->p_pid
;
1128 dst
->overload_tbl
= NULL
;
1130 TAILQ_INIT(&dst
->rpool
.list
);
1131 dst
->rpool
.cur
= NULL
;
1133 /* initialize refcounting */
1137 dst
->entries
.tqe_prev
= NULL
;
1138 dst
->entries
.tqe_next
= NULL
;
1139 if ((uint8_t)minordev
== PFDEV_PFM
) {
1140 dst
->rule_flag
|= PFRULE_PFM
;
1145 pf_rule_copyout(struct pf_rule
*src
, struct pf_rule
*dst
)
1147 bcopy(src
, dst
, sizeof(struct pf_rule
));
1151 dst
->overload_tbl
= NULL
;
1153 TAILQ_INIT(&dst
->rpool
.list
);
1154 dst
->rpool
.cur
= NULL
;
1156 dst
->entries
.tqe_prev
= NULL
;
1157 dst
->entries
.tqe_next
= NULL
;
1161 pf_state_export(struct pfsync_state
*sp
, struct pf_state_key
*sk
,
1164 uint64_t secs
= pf_time_second();
1165 bzero(sp
, sizeof(struct pfsync_state
));
1167 /* copy from state key */
1168 sp
->lan
.addr
= sk
->lan
.addr
;
1169 sp
->lan
.xport
= sk
->lan
.xport
;
1170 sp
->gwy
.addr
= sk
->gwy
.addr
;
1171 sp
->gwy
.xport
= sk
->gwy
.xport
;
1172 sp
->ext_lan
.addr
= sk
->ext_lan
.addr
;
1173 sp
->ext_lan
.xport
= sk
->ext_lan
.xport
;
1174 sp
->ext_gwy
.addr
= sk
->ext_gwy
.addr
;
1175 sp
->ext_gwy
.xport
= sk
->ext_gwy
.xport
;
1176 sp
->proto_variant
= sk
->proto_variant
;
1178 sp
->proto
= sk
->proto
;
1179 sp
->af_lan
= sk
->af_lan
;
1180 sp
->af_gwy
= sk
->af_gwy
;
1181 sp
->direction
= sk
->direction
;
1182 sp
->flowhash
= sk
->flowhash
;
1184 /* copy from state */
1185 memcpy(&sp
->id
, &s
->id
, sizeof(sp
->id
));
1186 sp
->creatorid
= s
->creatorid
;
1187 strlcpy(sp
->ifname
, s
->kif
->pfik_name
, sizeof(sp
->ifname
));
1188 pf_state_peer_to_pfsync(&s
->src
, &sp
->src
);
1189 pf_state_peer_to_pfsync(&s
->dst
, &sp
->dst
);
1191 sp
->rule
= s
->rule
.ptr
->nr
;
1192 sp
->nat_rule
= (s
->nat_rule
.ptr
== NULL
) ?
1193 (unsigned)-1 : s
->nat_rule
.ptr
->nr
;
1194 sp
->anchor
= (s
->anchor
.ptr
== NULL
) ?
1195 (unsigned)-1 : s
->anchor
.ptr
->nr
;
1197 pf_state_counter_to_pfsync(s
->bytes
[0], sp
->bytes
[0]);
1198 pf_state_counter_to_pfsync(s
->bytes
[1], sp
->bytes
[1]);
1199 pf_state_counter_to_pfsync(s
->packets
[0], sp
->packets
[0]);
1200 pf_state_counter_to_pfsync(s
->packets
[1], sp
->packets
[1]);
1201 sp
->creation
= secs
- s
->creation
;
1202 sp
->expire
= pf_state_expires(s
);
1204 sp
->allow_opts
= s
->allow_opts
;
1205 sp
->timeout
= s
->timeout
;
1208 sp
->sync_flags
|= PFSYNC_FLAG_SRCNODE
;
1210 if (s
->nat_src_node
) {
1211 sp
->sync_flags
|= PFSYNC_FLAG_NATSRCNODE
;
1214 if (sp
->expire
> secs
) {
1222 pf_state_import(struct pfsync_state
*sp
, struct pf_state_key
*sk
,
1225 /* copy to state key */
1226 sk
->lan
.addr
= sp
->lan
.addr
;
1227 sk
->lan
.xport
= sp
->lan
.xport
;
1228 sk
->gwy
.addr
= sp
->gwy
.addr
;
1229 sk
->gwy
.xport
= sp
->gwy
.xport
;
1230 sk
->ext_lan
.addr
= sp
->ext_lan
.addr
;
1231 sk
->ext_lan
.xport
= sp
->ext_lan
.xport
;
1232 sk
->ext_gwy
.addr
= sp
->ext_gwy
.addr
;
1233 sk
->ext_gwy
.xport
= sp
->ext_gwy
.xport
;
1234 sk
->proto_variant
= sp
->proto_variant
;
1236 sk
->proto
= sp
->proto
;
1237 sk
->af_lan
= sp
->af_lan
;
1238 sk
->af_gwy
= sp
->af_gwy
;
1239 sk
->direction
= sp
->direction
;
1240 sk
->flowhash
= pf_calc_state_key_flowhash(sk
);
1243 memcpy(&s
->id
, &sp
->id
, sizeof(sp
->id
));
1244 s
->creatorid
= sp
->creatorid
;
1245 pf_state_peer_from_pfsync(&sp
->src
, &s
->src
);
1246 pf_state_peer_from_pfsync(&sp
->dst
, &s
->dst
);
1248 s
->rule
.ptr
= &pf_default_rule
;
1249 s
->nat_rule
.ptr
= NULL
;
1250 s
->anchor
.ptr
= NULL
;
1252 s
->creation
= pf_time_second();
1253 s
->expire
= pf_time_second();
1254 if (sp
->expire
> 0) {
1255 s
->expire
-= pf_default_rule
.timeout
[sp
->timeout
] - sp
->expire
;
1258 s
->packets
[0] = s
->packets
[1] = 0;
1259 s
->bytes
[0] = s
->bytes
[1] = 0;
1263 pf_pooladdr_copyin(struct pf_pooladdr
*src
, struct pf_pooladdr
*dst
)
1265 bcopy(src
, dst
, sizeof(struct pf_pooladdr
));
1267 dst
->entries
.tqe_prev
= NULL
;
1268 dst
->entries
.tqe_next
= NULL
;
1269 dst
->ifname
[sizeof(dst
->ifname
) - 1] = '\0';
1274 pf_pooladdr_copyout(struct pf_pooladdr
*src
, struct pf_pooladdr
*dst
)
1276 bcopy(src
, dst
, sizeof(struct pf_pooladdr
));
1278 dst
->entries
.tqe_prev
= NULL
;
1279 dst
->entries
.tqe_next
= NULL
;
1284 pf_setup_pfsync_matching(struct pf_ruleset
*rs
)
1287 struct pf_rule
*rule
;
1289 u_int8_t digest
[PF_MD5_DIGEST_LENGTH
];
1292 for (rs_cnt
= 0; rs_cnt
< PF_RULESET_MAX
; rs_cnt
++) {
1293 /* XXX PF_RULESET_SCRUB as well? */
1294 if (rs_cnt
== PF_RULESET_SCRUB
) {
1298 if (rs
->rules
[rs_cnt
].inactive
.ptr_array
) {
1299 _FREE(rs
->rules
[rs_cnt
].inactive
.ptr_array
, M_TEMP
);
1301 rs
->rules
[rs_cnt
].inactive
.ptr_array
= NULL
;
1303 if (rs
->rules
[rs_cnt
].inactive
.rcount
) {
1304 rs
->rules
[rs_cnt
].inactive
.ptr_array
=
1305 _MALLOC(sizeof(caddr_t
) *
1306 rs
->rules
[rs_cnt
].inactive
.rcount
,
1309 if (!rs
->rules
[rs_cnt
].inactive
.ptr_array
) {
1314 TAILQ_FOREACH(rule
, rs
->rules
[rs_cnt
].inactive
.ptr
,
1316 pf_hash_rule(&ctx
, rule
);
1317 (rs
->rules
[rs_cnt
].inactive
.ptr_array
)[rule
->nr
] = rule
;
1321 MD5Final(digest
, &ctx
);
1322 memcpy(pf_status
.pf_chksum
, digest
, sizeof(pf_status
.pf_chksum
));
1329 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1331 VERIFY(pf_is_enabled
== 0);
1334 pf_status
.running
= 1;
1335 pf_status
.since
= pf_calendar_time_second();
1336 if (pf_status
.stateid
== 0) {
1337 pf_status
.stateid
= pf_time_second();
1338 pf_status
.stateid
= pf_status
.stateid
<< 32;
1340 wakeup(pf_purge_thread_fn
);
1341 DPFPRINTF(PF_DEBUG_MISC
, ("pf: started\n"));
1347 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1349 VERIFY(pf_is_enabled
);
1351 pf_status
.running
= 0;
1353 pf_status
.since
= pf_calendar_time_second();
1354 wakeup(pf_purge_thread_fn
);
1355 DPFPRINTF(PF_DEBUG_MISC
, ("pf: stopped\n"));
1359 pfioctl(dev_t dev
, u_long cmd
, caddr_t addr
, int flags
, struct proc
*p
)
1362 int p64
= proc_is64bit(p
);
1364 int minordev
= minor(dev
);
1366 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
1370 /* XXX keep in sync with switch() below */
1371 if (securelevel
> 1) {
1378 case DIOCSETSTATUSIF
:
1384 case DIOCINSERTRULE
:
1385 case DIOCDELETERULE
:
1386 case DIOCGETTIMEOUT
:
1387 case DIOCCLRRULECTRS
:
1392 case DIOCGETRULESETS
:
1393 case DIOCGETRULESET
:
1394 case DIOCRGETTABLES
:
1395 case DIOCRGETTSTATS
:
1396 case DIOCRCLRTSTATS
:
1402 case DIOCRGETASTATS
:
1403 case DIOCRCLRASTATS
:
1406 case DIOCGETSRCNODES
:
1407 case DIOCCLRSRCNODES
:
1408 case DIOCIGETIFACES
:
1413 case DIOCRCLRTABLES
:
1414 case DIOCRADDTABLES
:
1415 case DIOCRDELTABLES
:
1416 case DIOCRSETTFLAGS
: {
1419 bcopy(&((struct pfioc_table
*)(void *)addr
)->
1420 pfrio_flags
, &pfrio_flags
, sizeof(pfrio_flags
));
1422 if (pfrio_flags
& PFR_FLAG_DUMMY
) {
1423 break; /* dummy operation ok */
1432 if (!(flags
& FWRITE
)) {
1438 case DIOCGETSTARTERS
:
1445 case DIOCINSERTRULE
:
1446 case DIOCDELETERULE
:
1447 case DIOCGETTIMEOUT
:
1452 case DIOCGETRULESETS
:
1453 case DIOCGETRULESET
:
1455 case DIOCRGETTABLES
:
1456 case DIOCRGETTSTATS
:
1458 case DIOCRGETASTATS
:
1461 case DIOCGETSRCNODES
:
1462 case DIOCIGETIFACES
:
1465 case DIOCRCLRTABLES
:
1466 case DIOCRADDTABLES
:
1467 case DIOCRDELTABLES
:
1468 case DIOCRCLRTSTATS
:
1473 case DIOCRSETTFLAGS
: {
1476 bcopy(&((struct pfioc_table
*)(void *)addr
)->
1477 pfrio_flags
, &pfrio_flags
, sizeof(pfrio_flags
));
1479 if (pfrio_flags
& PFR_FLAG_DUMMY
) {
1480 flags
|= FWRITE
; /* need write lock for dummy */
1481 break; /* dummy operation ok */
1488 bcopy(&((struct pfioc_rule
*)(void *)addr
)->action
,
1489 &action
, sizeof(action
));
1491 if (action
== PF_GET_CLR_CNTR
) {
1501 if (flags
& FWRITE
) {
1502 lck_rw_lock_exclusive(pf_perim_lock
);
1504 lck_rw_lock_shared(pf_perim_lock
);
1507 lck_mtx_lock(pf_lock
);
1511 if (pf_status
.running
) {
1513 * Increment the reference for a simple -e enable, so
1514 * that even if other processes drop their references,
1515 * pf will still be available to processes that turned
1516 * it on without taking a reference
1518 if (nr_tokens
== pf_enabled_ref_count
) {
1519 pf_enabled_ref_count
++;
1520 VERIFY(pf_enabled_ref_count
!= 0);
1523 } else if (pf_purge_thread
== NULL
) {
1527 pf_enabled_ref_count
++;
1528 VERIFY(pf_enabled_ref_count
!= 0);
1532 case DIOCSTARTREF
: /* u_int64_t */
1533 if (pf_purge_thread
== NULL
) {
1538 /* small enough to be on stack */
1539 if ((token
= generate_token(p
)) != 0) {
1540 if (pf_is_enabled
== 0) {
1543 pf_enabled_ref_count
++;
1544 VERIFY(pf_enabled_ref_count
!= 0);
1547 DPFPRINTF(PF_DEBUG_URGENT
,
1548 ("pf: unable to generate token\n"));
1550 bcopy(&token
, addr
, sizeof(token
));
1555 if (!pf_status
.running
) {
1559 pf_enabled_ref_count
= 0;
1560 invalidate_all_tokens();
1564 case DIOCSTOPREF
: /* struct pfioc_remove_token */
1565 if (!pf_status
.running
) {
1568 struct pfioc_remove_token pfrt
;
1570 /* small enough to be on stack */
1571 bcopy(addr
, &pfrt
, sizeof(pfrt
));
1572 if ((error
= remove_token(&pfrt
)) == 0) {
1573 VERIFY(pf_enabled_ref_count
!= 0);
1574 pf_enabled_ref_count
--;
1575 /* return currently held references */
1576 pfrt
.refcount
= pf_enabled_ref_count
;
1577 DPFPRINTF(PF_DEBUG_MISC
,
1578 ("pf: enabled refcount decremented\n"));
1581 DPFPRINTF(PF_DEBUG_URGENT
,
1582 ("pf: token mismatch\n"));
1584 bcopy(&pfrt
, addr
, sizeof(pfrt
));
1586 if (error
== 0 && pf_enabled_ref_count
== 0) {
1592 case DIOCGETSTARTERS
: { /* struct pfioc_tokens */
1593 PFIOCX_STRUCT_DECL(pfioc_tokens
);
1595 PFIOCX_STRUCT_BEGIN(addr
, pfioc_tokens
, error
= ENOMEM
; break; );
1596 error
= pfioctl_ioc_tokens(cmd
,
1597 PFIOCX_STRUCT_ADDR32(pfioc_tokens
),
1598 PFIOCX_STRUCT_ADDR64(pfioc_tokens
), p
);
1599 PFIOCX_STRUCT_END(pfioc_tokens
, addr
);
1603 case DIOCADDRULE
: /* struct pfioc_rule */
1604 case DIOCGETRULES
: /* struct pfioc_rule */
1605 case DIOCGETRULE
: /* struct pfioc_rule */
1606 case DIOCCHANGERULE
: /* struct pfioc_rule */
1607 case DIOCINSERTRULE
: /* struct pfioc_rule */
1608 case DIOCDELETERULE
: { /* struct pfioc_rule */
1609 struct pfioc_rule
*pr
= NULL
;
1611 PFIOC_STRUCT_BEGIN(addr
, pr
, error
= ENOMEM
; break; );
1612 error
= pfioctl_ioc_rule(cmd
, minordev
, pr
, p
);
1613 PFIOC_STRUCT_END(pr
, addr
);
1617 case DIOCCLRSTATES
: /* struct pfioc_state_kill */
1618 case DIOCKILLSTATES
: { /* struct pfioc_state_kill */
1619 struct pfioc_state_kill
*psk
= NULL
;
1621 PFIOC_STRUCT_BEGIN(addr
, psk
, error
= ENOMEM
; break; );
1622 error
= pfioctl_ioc_state_kill(cmd
, psk
, p
);
1623 PFIOC_STRUCT_END(psk
, addr
);
1627 case DIOCADDSTATE
: /* struct pfioc_state */
1628 case DIOCGETSTATE
: { /* struct pfioc_state */
1629 struct pfioc_state
*ps
= NULL
;
1631 PFIOC_STRUCT_BEGIN(addr
, ps
, error
= ENOMEM
; break; );
1632 error
= pfioctl_ioc_state(cmd
, ps
, p
);
1633 PFIOC_STRUCT_END(ps
, addr
);
1637 case DIOCGETSTATES
: { /* struct pfioc_states */
1638 PFIOCX_STRUCT_DECL(pfioc_states
);
1640 PFIOCX_STRUCT_BEGIN(addr
, pfioc_states
, error
= ENOMEM
; break; );
1641 error
= pfioctl_ioc_states(cmd
,
1642 PFIOCX_STRUCT_ADDR32(pfioc_states
),
1643 PFIOCX_STRUCT_ADDR64(pfioc_states
), p
);
1644 PFIOCX_STRUCT_END(pfioc_states
, addr
);
1648 case DIOCGETSTATUS
: { /* struct pf_status */
1649 struct pf_status
*s
= NULL
;
1651 PFIOC_STRUCT_BEGIN(&pf_status
, s
, error
= ENOMEM
; break; );
1652 pfi_update_status(s
->ifname
, s
);
1653 PFIOC_STRUCT_END(s
, addr
);
1657 case DIOCSETSTATUSIF
: { /* struct pfioc_if */
1658 struct pfioc_if
*pi
= (struct pfioc_if
*)(void *)addr
;
1660 /* OK for unaligned accesses */
1661 if (pi
->ifname
[0] == 0) {
1662 bzero(pf_status
.ifname
, IFNAMSIZ
);
1665 strlcpy(pf_status
.ifname
, pi
->ifname
, IFNAMSIZ
);
1669 case DIOCCLRSTATUS
: {
1670 bzero(pf_status
.counters
, sizeof(pf_status
.counters
));
1671 bzero(pf_status
.fcounters
, sizeof(pf_status
.fcounters
));
1672 bzero(pf_status
.scounters
, sizeof(pf_status
.scounters
));
1673 pf_status
.since
= pf_calendar_time_second();
1674 if (*pf_status
.ifname
) {
1675 pfi_update_status(pf_status
.ifname
, NULL
);
1680 case DIOCNATLOOK
: { /* struct pfioc_natlook */
1681 struct pfioc_natlook
*pnl
= NULL
;
1683 PFIOC_STRUCT_BEGIN(addr
, pnl
, error
= ENOMEM
; break; );
1684 error
= pfioctl_ioc_natlook(cmd
, pnl
, p
);
1685 PFIOC_STRUCT_END(pnl
, addr
);
1689 case DIOCSETTIMEOUT
: /* struct pfioc_tm */
1690 case DIOCGETTIMEOUT
: { /* struct pfioc_tm */
1693 /* small enough to be on stack */
1694 bcopy(addr
, &pt
, sizeof(pt
));
1695 error
= pfioctl_ioc_tm(cmd
, &pt
, p
);
1696 bcopy(&pt
, addr
, sizeof(pt
));
1700 case DIOCGETLIMIT
: /* struct pfioc_limit */
1701 case DIOCSETLIMIT
: { /* struct pfioc_limit */
1702 struct pfioc_limit pl
;
1704 /* small enough to be on stack */
1705 bcopy(addr
, &pl
, sizeof(pl
));
1706 error
= pfioctl_ioc_limit(cmd
, &pl
, p
);
1707 bcopy(&pl
, addr
, sizeof(pl
));
1711 case DIOCSETDEBUG
: { /* u_int32_t */
1712 bcopy(addr
, &pf_status
.debug
, sizeof(u_int32_t
));
1716 case DIOCCLRRULECTRS
: {
1717 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1718 struct pf_ruleset
*ruleset
= &pf_main_ruleset
;
1719 struct pf_rule
*rule
;
1722 ruleset
->rules
[PF_RULESET_FILTER
].active
.ptr
, entries
) {
1723 rule
->evaluations
= 0;
1724 rule
->packets
[0] = rule
->packets
[1] = 0;
1725 rule
->bytes
[0] = rule
->bytes
[1] = 0;
1730 case DIOCGIFSPEED
: {
1731 struct pf_ifspeed
*psp
= (struct pf_ifspeed
*)(void *)addr
;
1732 struct pf_ifspeed ps
;
1736 if (psp
->ifname
[0] != '\0') {
1737 /* Can we completely trust user-land? */
1738 strlcpy(ps
.ifname
, psp
->ifname
, IFNAMSIZ
);
1739 ps
.ifname
[IFNAMSIZ
- 1] = '\0';
1740 ifp
= ifunit(ps
.ifname
);
1742 baudrate
= ifp
->if_output_bw
.max_bw
;
1743 bcopy(&baudrate
, &psp
->baudrate
,
1754 case DIOCBEGINADDRS
: /* struct pfioc_pooladdr */
1755 case DIOCADDADDR
: /* struct pfioc_pooladdr */
1756 case DIOCGETADDRS
: /* struct pfioc_pooladdr */
1757 case DIOCGETADDR
: /* struct pfioc_pooladdr */
1758 case DIOCCHANGEADDR
: { /* struct pfioc_pooladdr */
1759 struct pfioc_pooladdr
*pp
= NULL
;
1761 PFIOC_STRUCT_BEGIN(addr
, pp
, error
= ENOMEM
; break; )
1762 error
= pfioctl_ioc_pooladdr(cmd
, pp
, p
);
1763 PFIOC_STRUCT_END(pp
, addr
);
1767 case DIOCGETRULESETS
: /* struct pfioc_ruleset */
1768 case DIOCGETRULESET
: { /* struct pfioc_ruleset */
1769 struct pfioc_ruleset
*pr
= NULL
;
1771 PFIOC_STRUCT_BEGIN(addr
, pr
, error
= ENOMEM
; break; );
1772 error
= pfioctl_ioc_ruleset(cmd
, pr
, p
);
1773 PFIOC_STRUCT_END(pr
, addr
);
1777 case DIOCRCLRTABLES
: /* struct pfioc_table */
1778 case DIOCRADDTABLES
: /* struct pfioc_table */
1779 case DIOCRDELTABLES
: /* struct pfioc_table */
1780 case DIOCRGETTABLES
: /* struct pfioc_table */
1781 case DIOCRGETTSTATS
: /* struct pfioc_table */
1782 case DIOCRCLRTSTATS
: /* struct pfioc_table */
1783 case DIOCRSETTFLAGS
: /* struct pfioc_table */
1784 case DIOCRCLRADDRS
: /* struct pfioc_table */
1785 case DIOCRADDADDRS
: /* struct pfioc_table */
1786 case DIOCRDELADDRS
: /* struct pfioc_table */
1787 case DIOCRSETADDRS
: /* struct pfioc_table */
1788 case DIOCRGETADDRS
: /* struct pfioc_table */
1789 case DIOCRGETASTATS
: /* struct pfioc_table */
1790 case DIOCRCLRASTATS
: /* struct pfioc_table */
1791 case DIOCRTSTADDRS
: /* struct pfioc_table */
1792 case DIOCRINADEFINE
: { /* struct pfioc_table */
1793 PFIOCX_STRUCT_DECL(pfioc_table
);
1795 PFIOCX_STRUCT_BEGIN(addr
, pfioc_table
, error
= ENOMEM
; break; );
1796 error
= pfioctl_ioc_table(cmd
,
1797 PFIOCX_STRUCT_ADDR32(pfioc_table
),
1798 PFIOCX_STRUCT_ADDR64(pfioc_table
), p
);
1799 PFIOCX_STRUCT_END(pfioc_table
, addr
);
1803 case DIOCOSFPADD
: /* struct pf_osfp_ioctl */
1804 case DIOCOSFPGET
: { /* struct pf_osfp_ioctl */
1805 struct pf_osfp_ioctl
*io
= NULL
;
1807 PFIOC_STRUCT_BEGIN(addr
, io
, error
= ENOMEM
; break; );
1808 if (cmd
== DIOCOSFPADD
) {
1809 error
= pf_osfp_add(io
);
1811 VERIFY(cmd
== DIOCOSFPGET
);
1812 error
= pf_osfp_get(io
);
1814 PFIOC_STRUCT_END(io
, addr
);
1818 case DIOCXBEGIN
: /* struct pfioc_trans */
1819 case DIOCXROLLBACK
: /* struct pfioc_trans */
1820 case DIOCXCOMMIT
: { /* struct pfioc_trans */
1821 PFIOCX_STRUCT_DECL(pfioc_trans
);
1823 PFIOCX_STRUCT_BEGIN(addr
, pfioc_trans
, error
= ENOMEM
; break; );
1824 error
= pfioctl_ioc_trans(cmd
,
1825 PFIOCX_STRUCT_ADDR32(pfioc_trans
),
1826 PFIOCX_STRUCT_ADDR64(pfioc_trans
), p
);
1827 PFIOCX_STRUCT_END(pfioc_trans
, addr
);
1831 case DIOCGETSRCNODES
: { /* struct pfioc_src_nodes */
1832 PFIOCX_STRUCT_DECL(pfioc_src_nodes
);
1834 PFIOCX_STRUCT_BEGIN(addr
, pfioc_src_nodes
,
1835 error
= ENOMEM
; break; );
1836 error
= pfioctl_ioc_src_nodes(cmd
,
1837 PFIOCX_STRUCT_ADDR32(pfioc_src_nodes
),
1838 PFIOCX_STRUCT_ADDR64(pfioc_src_nodes
), p
);
1839 PFIOCX_STRUCT_END(pfioc_src_nodes
, addr
);
1843 case DIOCCLRSRCNODES
: {
1844 struct pf_src_node
*n
;
1845 struct pf_state
*state
;
1847 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
1848 state
->src_node
= NULL
;
1849 state
->nat_src_node
= NULL
;
1851 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
) {
1855 pf_purge_expired_src_nodes();
1856 pf_status
.src_nodes
= 0;
1860 case DIOCKILLSRCNODES
: { /* struct pfioc_src_node_kill */
1861 struct pfioc_src_node_kill
*psnk
= NULL
;
1863 PFIOC_STRUCT_BEGIN(addr
, psnk
, error
= ENOMEM
; break; );
1864 error
= pfioctl_ioc_src_node_kill(cmd
, psnk
, p
);
1865 PFIOC_STRUCT_END(psnk
, addr
);
1869 case DIOCSETHOSTID
: { /* u_int32_t */
1872 /* small enough to be on stack */
1873 bcopy(addr
, &hid
, sizeof(hid
));
1875 pf_status
.hostid
= random();
1877 pf_status
.hostid
= hid
;
1886 case DIOCIGETIFACES
: /* struct pfioc_iface */
1887 case DIOCSETIFFLAG
: /* struct pfioc_iface */
1888 case DIOCCLRIFFLAG
: { /* struct pfioc_iface */
1889 PFIOCX_STRUCT_DECL(pfioc_iface
);
1891 PFIOCX_STRUCT_BEGIN(addr
, pfioc_iface
, error
= ENOMEM
; break; );
1892 error
= pfioctl_ioc_iface(cmd
,
1893 PFIOCX_STRUCT_ADDR32(pfioc_iface
),
1894 PFIOCX_STRUCT_ADDR64(pfioc_iface
), p
);
1895 PFIOCX_STRUCT_END(pfioc_iface
, addr
);
1904 lck_mtx_unlock(pf_lock
);
1905 lck_rw_done(pf_perim_lock
);
1911 pfioctl_ioc_table(u_long cmd
, struct pfioc_table_32
*io32
,
1912 struct pfioc_table_64
*io64
, struct proc
*p
)
1914 int p64
= proc_is64bit(p
);
1922 * 64-bit structure processing
1925 case DIOCRCLRTABLES
:
1926 if (io64
->pfrio_esize
!= 0) {
1930 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
1931 error
= pfr_clr_tables(&io64
->pfrio_table
, &io64
->pfrio_ndel
,
1932 io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
1935 case DIOCRADDTABLES
:
1936 if (io64
->pfrio_esize
!= sizeof(struct pfr_table
)) {
1940 error
= pfr_add_tables(io64
->pfrio_buffer
, io64
->pfrio_size
,
1941 &io64
->pfrio_nadd
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
1944 case DIOCRDELTABLES
:
1945 if (io64
->pfrio_esize
!= sizeof(struct pfr_table
)) {
1949 error
= pfr_del_tables(io64
->pfrio_buffer
, io64
->pfrio_size
,
1950 &io64
->pfrio_ndel
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
1953 case DIOCRGETTABLES
:
1954 if (io64
->pfrio_esize
!= sizeof(struct pfr_table
)) {
1958 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
1959 error
= pfr_get_tables(&io64
->pfrio_table
, io64
->pfrio_buffer
,
1960 &io64
->pfrio_size
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
1963 case DIOCRGETTSTATS
:
1964 if (io64
->pfrio_esize
!= sizeof(struct pfr_tstats
)) {
1968 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
1969 error
= pfr_get_tstats(&io64
->pfrio_table
, io64
->pfrio_buffer
,
1970 &io64
->pfrio_size
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
1973 case DIOCRCLRTSTATS
:
1974 if (io64
->pfrio_esize
!= sizeof(struct pfr_table
)) {
1978 error
= pfr_clr_tstats(io64
->pfrio_buffer
, io64
->pfrio_size
,
1979 &io64
->pfrio_nzero
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
1982 case DIOCRSETTFLAGS
:
1983 if (io64
->pfrio_esize
!= sizeof(struct pfr_table
)) {
1987 error
= pfr_set_tflags(io64
->pfrio_buffer
, io64
->pfrio_size
,
1988 io64
->pfrio_setflag
, io64
->pfrio_clrflag
,
1989 &io64
->pfrio_nchange
, &io64
->pfrio_ndel
,
1990 io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
1994 if (io64
->pfrio_esize
!= 0) {
1998 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
1999 error
= pfr_clr_addrs(&io64
->pfrio_table
, &io64
->pfrio_ndel
,
2000 io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2004 if (io64
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2008 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2009 error
= pfr_add_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2010 io64
->pfrio_size
, &io64
->pfrio_nadd
, io64
->pfrio_flags
|
2011 PFR_FLAG_USERIOCTL
);
2015 if (io64
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2019 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2020 error
= pfr_del_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2021 io64
->pfrio_size
, &io64
->pfrio_ndel
, io64
->pfrio_flags
|
2022 PFR_FLAG_USERIOCTL
);
2026 if (io64
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2030 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2031 error
= pfr_set_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2032 io64
->pfrio_size
, &io64
->pfrio_size2
, &io64
->pfrio_nadd
,
2033 &io64
->pfrio_ndel
, &io64
->pfrio_nchange
, io64
->pfrio_flags
|
2034 PFR_FLAG_USERIOCTL
, 0);
2038 if (io64
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2042 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2043 error
= pfr_get_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2044 &io64
->pfrio_size
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2047 case DIOCRGETASTATS
:
2048 if (io64
->pfrio_esize
!= sizeof(struct pfr_astats
)) {
2052 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2053 error
= pfr_get_astats(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2054 &io64
->pfrio_size
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2057 case DIOCRCLRASTATS
:
2058 if (io64
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2062 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2063 error
= pfr_clr_astats(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2064 io64
->pfrio_size
, &io64
->pfrio_nzero
, io64
->pfrio_flags
|
2065 PFR_FLAG_USERIOCTL
);
2069 if (io64
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2073 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2074 error
= pfr_tst_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2075 io64
->pfrio_size
, &io64
->pfrio_nmatch
, io64
->pfrio_flags
|
2076 PFR_FLAG_USERIOCTL
);
2079 case DIOCRINADEFINE
:
2080 if (io64
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2084 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2085 error
= pfr_ina_define(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2086 io64
->pfrio_size
, &io64
->pfrio_nadd
, &io64
->pfrio_naddr
,
2087 io64
->pfrio_ticket
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2098 * 32-bit structure processing
2101 case DIOCRCLRTABLES
:
2102 if (io32
->pfrio_esize
!= 0) {
2106 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2107 error
= pfr_clr_tables(&io32
->pfrio_table
, &io32
->pfrio_ndel
,
2108 io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2111 case DIOCRADDTABLES
:
2112 if (io32
->pfrio_esize
!= sizeof(struct pfr_table
)) {
2116 error
= pfr_add_tables(io32
->pfrio_buffer
, io32
->pfrio_size
,
2117 &io32
->pfrio_nadd
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2120 case DIOCRDELTABLES
:
2121 if (io32
->pfrio_esize
!= sizeof(struct pfr_table
)) {
2125 error
= pfr_del_tables(io32
->pfrio_buffer
, io32
->pfrio_size
,
2126 &io32
->pfrio_ndel
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2129 case DIOCRGETTABLES
:
2130 if (io32
->pfrio_esize
!= sizeof(struct pfr_table
)) {
2134 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2135 error
= pfr_get_tables(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2136 &io32
->pfrio_size
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2139 case DIOCRGETTSTATS
:
2140 if (io32
->pfrio_esize
!= sizeof(struct pfr_tstats
)) {
2144 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2145 error
= pfr_get_tstats(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2146 &io32
->pfrio_size
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2149 case DIOCRCLRTSTATS
:
2150 if (io32
->pfrio_esize
!= sizeof(struct pfr_table
)) {
2154 error
= pfr_clr_tstats(io32
->pfrio_buffer
, io32
->pfrio_size
,
2155 &io32
->pfrio_nzero
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2158 case DIOCRSETTFLAGS
:
2159 if (io32
->pfrio_esize
!= sizeof(struct pfr_table
)) {
2163 error
= pfr_set_tflags(io32
->pfrio_buffer
, io32
->pfrio_size
,
2164 io32
->pfrio_setflag
, io32
->pfrio_clrflag
,
2165 &io32
->pfrio_nchange
, &io32
->pfrio_ndel
,
2166 io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2170 if (io32
->pfrio_esize
!= 0) {
2174 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2175 error
= pfr_clr_addrs(&io32
->pfrio_table
, &io32
->pfrio_ndel
,
2176 io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2180 if (io32
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2184 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2185 error
= pfr_add_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2186 io32
->pfrio_size
, &io32
->pfrio_nadd
, io32
->pfrio_flags
|
2187 PFR_FLAG_USERIOCTL
);
2191 if (io32
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2195 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2196 error
= pfr_del_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2197 io32
->pfrio_size
, &io32
->pfrio_ndel
, io32
->pfrio_flags
|
2198 PFR_FLAG_USERIOCTL
);
2202 if (io32
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2206 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2207 error
= pfr_set_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2208 io32
->pfrio_size
, &io32
->pfrio_size2
, &io32
->pfrio_nadd
,
2209 &io32
->pfrio_ndel
, &io32
->pfrio_nchange
, io32
->pfrio_flags
|
2210 PFR_FLAG_USERIOCTL
, 0);
2214 if (io32
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2218 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2219 error
= pfr_get_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2220 &io32
->pfrio_size
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2223 case DIOCRGETASTATS
:
2224 if (io32
->pfrio_esize
!= sizeof(struct pfr_astats
)) {
2228 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2229 error
= pfr_get_astats(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2230 &io32
->pfrio_size
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2233 case DIOCRCLRASTATS
:
2234 if (io32
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2238 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2239 error
= pfr_clr_astats(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2240 io32
->pfrio_size
, &io32
->pfrio_nzero
, io32
->pfrio_flags
|
2241 PFR_FLAG_USERIOCTL
);
2245 if (io32
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2249 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2250 error
= pfr_tst_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2251 io32
->pfrio_size
, &io32
->pfrio_nmatch
, io32
->pfrio_flags
|
2252 PFR_FLAG_USERIOCTL
);
2255 case DIOCRINADEFINE
:
2256 if (io32
->pfrio_esize
!= sizeof(struct pfr_addr
)) {
2260 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2261 error
= pfr_ina_define(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2262 io32
->pfrio_size
, &io32
->pfrio_nadd
, &io32
->pfrio_naddr
,
2263 io32
->pfrio_ticket
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2276 pfioctl_ioc_tokens(u_long cmd
, struct pfioc_tokens_32
*tok32
,
2277 struct pfioc_tokens_64
*tok64
, struct proc
*p
)
2279 struct pfioc_token
*tokens
;
2280 struct pfioc_kernel_token
*entry
, *tmp
;
2281 user_addr_t token_buf
;
2282 int ocnt
, cnt
, error
= 0, p64
= proc_is64bit(p
);
2286 case DIOCGETSTARTERS
: {
2289 if (nr_tokens
== 0) {
2294 size
= sizeof(struct pfioc_token
) * nr_tokens
;
2295 ocnt
= cnt
= (p64
? tok64
->size
: tok32
->size
);
2305 token_buf
= (p64
? tok64
->pgt_buf
: tok32
->pgt_buf
);
2306 tokens
= _MALLOC(size
, M_TEMP
, M_WAITOK
| M_ZERO
);
2307 if (tokens
== NULL
) {
2312 ptr
= (void *)tokens
;
2313 SLIST_FOREACH_SAFE(entry
, &token_list_head
, next
, tmp
) {
2314 struct pfioc_token
*t
;
2316 if ((unsigned)cnt
< sizeof(*tokens
)) {
2317 break; /* no more buffer space left */
2319 t
= (struct pfioc_token
*)(void *)ptr
;
2320 t
->token_value
= entry
->token
.token_value
;
2321 t
->timestamp
= entry
->token
.timestamp
;
2322 t
->pid
= entry
->token
.pid
;
2323 bcopy(entry
->token
.proc_name
, t
->proc_name
,
2324 PFTOK_PROCNAME_LEN
);
2325 ptr
+= sizeof(struct pfioc_token
);
2327 cnt
-= sizeof(struct pfioc_token
);
2331 error
= copyout(tokens
, token_buf
, ocnt
- cnt
);
2335 tok64
->size
= ocnt
- cnt
;
2337 tok32
->size
= ocnt
- cnt
;
2340 _FREE(tokens
, M_TEMP
);
2353 pf_expire_states_and_src_nodes(struct pf_rule
*rule
)
2355 struct pf_state
*state
;
2356 struct pf_src_node
*sn
;
2359 /* expire the states */
2360 state
= TAILQ_FIRST(&state_list
);
2362 if (state
->rule
.ptr
== rule
) {
2363 state
->timeout
= PFTM_PURGE
;
2365 state
= TAILQ_NEXT(state
, entry_list
);
2367 pf_purge_expired_states(pf_status
.states
);
2369 /* expire the src_nodes */
2370 RB_FOREACH(sn
, pf_src_tree
, &tree_src_tracking
) {
2371 if (sn
->rule
.ptr
!= rule
) {
2374 if (sn
->states
!= 0) {
2375 RB_FOREACH(state
, pf_state_tree_id
,
2377 if (state
->src_node
== sn
) {
2378 state
->src_node
= NULL
;
2380 if (state
->nat_src_node
== sn
) {
2381 state
->nat_src_node
= NULL
;
2390 pf_purge_expired_src_nodes();
2395 pf_delete_rule_from_ruleset(struct pf_ruleset
*ruleset
, int rs_num
,
2396 struct pf_rule
*rule
)
2401 pf_expire_states_and_src_nodes(rule
);
2403 pf_rm_rule(ruleset
->rules
[rs_num
].active
.ptr
, rule
);
2404 if (ruleset
->rules
[rs_num
].active
.rcount
-- == 0) {
2405 panic("%s: rcount value broken!", __func__
);
2407 r
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
2411 r
= TAILQ_NEXT(r
, entries
);
2417 pf_ruleset_cleanup(struct pf_ruleset
*ruleset
, int rs
)
2419 pf_calc_skip_steps(ruleset
->rules
[rs
].active
.ptr
);
2420 ruleset
->rules
[rs
].active
.ticket
=
2421 ++ruleset
->rules
[rs
].inactive
.ticket
;
2425 * req_dev encodes the PF interface. Currently, possible values are
2429 pf_delete_rule_by_ticket(struct pfioc_rule
*pr
, u_int32_t req_dev
)
2431 struct pf_ruleset
*ruleset
;
2432 struct pf_rule
*rule
= NULL
;
2437 is_anchor
= (pr
->anchor_call
[0] != '\0');
2438 if ((ruleset
= pf_find_ruleset_with_owner(pr
->anchor
,
2439 pr
->rule
.owner
, is_anchor
, &error
)) == NULL
) {
2443 for (i
= 0; i
< PF_RULESET_MAX
&& rule
== NULL
; i
++) {
2444 rule
= TAILQ_FIRST(ruleset
->rules
[i
].active
.ptr
);
2445 while (rule
&& (rule
->ticket
!= pr
->rule
.ticket
)) {
2446 rule
= TAILQ_NEXT(rule
, entries
);
2455 if (strcmp(rule
->owner
, pr
->rule
.owner
)) {
2460 if (rule
->anchor
&& (ruleset
!= &pf_main_ruleset
) &&
2461 ((strcmp(ruleset
->anchor
->owner
, "")) == 0) &&
2462 ((ruleset
->rules
[i
].active
.rcount
- 1) == 0)) {
2463 /* set rule & ruleset to parent and repeat */
2464 struct pf_rule
*delete_rule
= rule
;
2465 struct pf_ruleset
*delete_ruleset
= ruleset
;
2467 #define parent_ruleset ruleset->anchor->parent->ruleset
2468 if (ruleset
->anchor
->parent
== NULL
) {
2469 ruleset
= &pf_main_ruleset
;
2471 ruleset
= &parent_ruleset
;
2474 rule
= TAILQ_FIRST(ruleset
->rules
[i
].active
.ptr
);
2476 (rule
->anchor
!= delete_ruleset
->anchor
)) {
2477 rule
= TAILQ_NEXT(rule
, entries
);
2480 panic("%s: rule not found!", __func__
);
2484 * if reqest device != rule's device, bail :
2485 * with error if ticket matches;
2486 * without error if ticket doesn't match (i.e. its just cleanup)
2488 if ((rule
->rule_flag
& PFRULE_PFM
) ^ req_dev
) {
2489 if (rule
->ticket
!= pr
->rule
.ticket
) {
2496 if (delete_rule
->rule_flag
& PFRULE_PFM
) {
2500 pf_delete_rule_from_ruleset(delete_ruleset
,
2502 delete_ruleset
->rules
[i
].active
.ticket
=
2503 ++delete_ruleset
->rules
[i
].inactive
.ticket
;
2507 * process deleting rule only if device that added the
2508 * rule matches device that issued the request
2510 if ((rule
->rule_flag
& PFRULE_PFM
) ^ req_dev
) {
2513 if (rule
->rule_flag
& PFRULE_PFM
) {
2516 pf_delete_rule_from_ruleset(ruleset
, i
,
2518 pf_ruleset_cleanup(ruleset
, i
);
2525 * req_dev encodes the PF interface. Currently, possible values are
2529 pf_delete_rule_by_owner(char *owner
, u_int32_t req_dev
)
2531 struct pf_ruleset
*ruleset
;
2532 struct pf_rule
*rule
, *next
;
2535 for (int rs
= 0; rs
< PF_RULESET_MAX
; rs
++) {
2536 rule
= TAILQ_FIRST(pf_main_ruleset
.rules
[rs
].active
.ptr
);
2537 ruleset
= &pf_main_ruleset
;
2539 next
= TAILQ_NEXT(rule
, entries
);
2541 * process deleting rule only if device that added the
2542 * rule matches device that issued the request
2544 if ((rule
->rule_flag
& PFRULE_PFM
) ^ req_dev
) {
2549 if (((strcmp(rule
->owner
, owner
)) == 0) ||
2550 ((strcmp(rule
->owner
, "")) == 0)) {
2551 if (rule
->anchor
->ruleset
.rules
[rs
].active
.rcount
> 0) {
2553 pf_ruleset_cleanup(ruleset
, rs
);
2556 /* step into anchor */
2558 &rule
->anchor
->ruleset
;
2559 rule
= TAILQ_FIRST(ruleset
->rules
[rs
].active
.ptr
);
2562 if (rule
->rule_flag
&
2566 pf_delete_rule_from_ruleset(ruleset
, rs
, rule
);
2574 if (((strcmp(rule
->owner
, owner
)) == 0)) {
2576 if (rule
->rule_flag
& PFRULE_PFM
) {
2579 pf_delete_rule_from_ruleset(ruleset
,
2587 pf_ruleset_cleanup(ruleset
, rs
);
2590 if (ruleset
!= &pf_main_ruleset
) {
2591 pf_deleterule_anchor_step_out(&ruleset
,
2600 pf_deleterule_anchor_step_out(struct pf_ruleset
**ruleset_ptr
,
2601 int rs
, struct pf_rule
**rule_ptr
)
2603 struct pf_ruleset
*ruleset
= *ruleset_ptr
;
2604 struct pf_rule
*rule
= *rule_ptr
;
2606 /* step out of anchor */
2607 struct pf_ruleset
*rs_copy
= ruleset
;
2608 ruleset
= ruleset
->anchor
->parent
?
2609 &ruleset
->anchor
->parent
->ruleset
:&pf_main_ruleset
;
2611 rule
= TAILQ_FIRST(ruleset
->rules
[rs
].active
.ptr
);
2612 while (rule
&& (rule
->anchor
!= rs_copy
->anchor
)) {
2613 rule
= TAILQ_NEXT(rule
, entries
);
2616 panic("%s: parent rule of anchor not found!", __func__
);
2618 if (rule
->anchor
->ruleset
.rules
[rs
].active
.rcount
> 0) {
2619 rule
= TAILQ_NEXT(rule
, entries
);
2622 *ruleset_ptr
= ruleset
;
2627 pf_addrwrap_setup(struct pf_addr_wrap
*aw
)
2630 bzero(&aw
->p
, sizeof aw
->p
);
2634 pf_rule_setup(struct pfioc_rule
*pr
, struct pf_rule
*rule
,
2635 struct pf_ruleset
*ruleset
)
2637 struct pf_pooladdr
*apa
;
2640 if (rule
->ifname
[0]) {
2641 rule
->kif
= pfi_kif_get(rule
->ifname
);
2642 if (rule
->kif
== NULL
) {
2643 pool_put(&pf_rule_pl
, rule
);
2646 pfi_kif_ref(rule
->kif
, PFI_KIF_REF_RULE
);
2648 if (rule
->tagname
[0]) {
2649 if ((rule
->tag
= pf_tagname2tag(rule
->tagname
)) == 0) {
2653 if (rule
->match_tagname
[0]) {
2654 if ((rule
->match_tag
=
2655 pf_tagname2tag(rule
->match_tagname
)) == 0) {
2659 if (rule
->rt
&& !rule
->direction
) {
2666 if (rule
->logif
>= PFLOGIFS_MAX
) {
2670 pf_addrwrap_setup(&rule
->src
.addr
);
2671 pf_addrwrap_setup(&rule
->dst
.addr
);
2672 if (pf_rtlabel_add(&rule
->src
.addr
) ||
2673 pf_rtlabel_add(&rule
->dst
.addr
)) {
2676 if (pfi_dynaddr_setup(&rule
->src
.addr
, rule
->af
)) {
2679 if (pfi_dynaddr_setup(&rule
->dst
.addr
, rule
->af
)) {
2682 if (pf_tbladdr_setup(ruleset
, &rule
->src
.addr
)) {
2685 if (pf_tbladdr_setup(ruleset
, &rule
->dst
.addr
)) {
2688 if (pf_anchor_setup(rule
, ruleset
, pr
->anchor_call
)) {
2691 TAILQ_FOREACH(apa
, &pf_pabuf
, entries
)
2692 if (pf_tbladdr_setup(ruleset
, &apa
->addr
)) {
2696 if (rule
->overload_tblname
[0]) {
2697 if ((rule
->overload_tbl
= pfr_attach_table(ruleset
,
2698 rule
->overload_tblname
)) == NULL
) {
2701 rule
->overload_tbl
->pfrkt_flags
|=
2706 pf_mv_pool(&pf_pabuf
, &rule
->rpool
.list
);
2708 if (((((rule
->action
== PF_NAT
) || (rule
->action
== PF_RDR
) ||
2709 (rule
->action
== PF_BINAT
) || (rule
->action
== PF_NAT64
)) &&
2710 rule
->anchor
== NULL
) ||
2711 (rule
->rt
> PF_FASTROUTE
)) &&
2712 (TAILQ_FIRST(&rule
->rpool
.list
) == NULL
)) {
2717 pf_rm_rule(NULL
, rule
);
2720 /* For a NAT64 rule the rule's address family is AF_INET6 whereas
2721 * the address pool's family will be AF_INET
2723 rule
->rpool
.af
= (rule
->action
== PF_NAT64
) ? AF_INET
: rule
->af
;
2724 rule
->rpool
.cur
= TAILQ_FIRST(&rule
->rpool
.list
);
2725 rule
->evaluations
= rule
->packets
[0] = rule
->packets
[1] =
2726 rule
->bytes
[0] = rule
->bytes
[1] = 0;
2732 pfioctl_ioc_rule(u_long cmd
, int minordev
, struct pfioc_rule
*pr
, struct proc
*p
)
2735 u_int32_t req_dev
= 0;
2739 struct pf_ruleset
*ruleset
;
2740 struct pf_rule
*rule
, *tail
;
2743 pr
->anchor
[sizeof(pr
->anchor
) - 1] = '\0';
2744 pr
->anchor_call
[sizeof(pr
->anchor_call
) - 1] = '\0';
2745 ruleset
= pf_find_ruleset(pr
->anchor
);
2746 if (ruleset
== NULL
) {
2750 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
2751 if (rs_num
>= PF_RULESET_MAX
) {
2755 if (pr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
2759 if (pr
->ticket
!= ruleset
->rules
[rs_num
].inactive
.ticket
) {
2763 if (pr
->pool_ticket
!= ticket_pabuf
) {
2767 rule
= pool_get(&pf_rule_pl
, PR_WAITOK
);
2772 pf_rule_copyin(&pr
->rule
, rule
, p
, minordev
);
2774 if (rule
->af
== AF_INET
) {
2775 pool_put(&pf_rule_pl
, rule
);
2776 error
= EAFNOSUPPORT
;
2781 if (rule
->af
== AF_INET6
) {
2782 pool_put(&pf_rule_pl
, rule
);
2783 error
= EAFNOSUPPORT
;
2787 tail
= TAILQ_LAST(ruleset
->rules
[rs_num
].inactive
.ptr
,
2790 rule
->nr
= tail
->nr
+ 1;
2795 if ((error
= pf_rule_setup(pr
, rule
, ruleset
))) {
2799 TAILQ_INSERT_TAIL(ruleset
->rules
[rs_num
].inactive
.ptr
,
2801 ruleset
->rules
[rs_num
].inactive
.rcount
++;
2802 if (rule
->rule_flag
& PFRULE_PFM
) {
2806 if (rule
->action
== PF_NAT64
) {
2807 atomic_add_16(&pf_nat64_configured
, 1);
2810 if (pr
->anchor_call
[0] == '\0') {
2811 INC_ATOMIC_INT64_LIM(net_api_stats
.nas_pf_addrule_total
);
2812 if (rule
->rule_flag
& PFRULE_PFM
) {
2813 INC_ATOMIC_INT64_LIM(net_api_stats
.nas_pf_addrule_os
);
2818 if (rule
->action
== PF_DUMMYNET
) {
2819 struct dummynet_event dn_event
;
2820 uint32_t direction
= DN_INOUT
;;
2821 bzero(&dn_event
, sizeof(dn_event
));
2823 dn_event
.dn_event_code
= DUMMYNET_RULE_CONFIG
;
2825 if (rule
->direction
== PF_IN
) {
2827 } else if (rule
->direction
== PF_OUT
) {
2831 dn_event
.dn_event_rule_config
.dir
= direction
;
2832 dn_event
.dn_event_rule_config
.af
= rule
->af
;
2833 dn_event
.dn_event_rule_config
.proto
= rule
->proto
;
2834 dn_event
.dn_event_rule_config
.src_port
= rule
->src
.xport
.range
.port
[0];
2835 dn_event
.dn_event_rule_config
.dst_port
= rule
->dst
.xport
.range
.port
[0];
2836 strlcpy(dn_event
.dn_event_rule_config
.ifname
, rule
->ifname
,
2837 sizeof(dn_event
.dn_event_rule_config
.ifname
));
2839 dummynet_event_enqueue_nwk_wq_entry(&dn_event
);
2845 case DIOCGETRULES
: {
2846 struct pf_ruleset
*ruleset
;
2847 struct pf_rule
*tail
;
2850 pr
->anchor
[sizeof(pr
->anchor
) - 1] = '\0';
2851 pr
->anchor_call
[sizeof(pr
->anchor_call
) - 1] = '\0';
2852 ruleset
= pf_find_ruleset(pr
->anchor
);
2853 if (ruleset
== NULL
) {
2857 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
2858 if (rs_num
>= PF_RULESET_MAX
) {
2862 tail
= TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
,
2865 pr
->nr
= tail
->nr
+ 1;
2869 pr
->ticket
= ruleset
->rules
[rs_num
].active
.ticket
;
2874 struct pf_ruleset
*ruleset
;
2875 struct pf_rule
*rule
;
2878 pr
->anchor
[sizeof(pr
->anchor
) - 1] = '\0';
2879 pr
->anchor_call
[sizeof(pr
->anchor_call
) - 1] = '\0';
2880 ruleset
= pf_find_ruleset(pr
->anchor
);
2881 if (ruleset
== NULL
) {
2885 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
2886 if (rs_num
>= PF_RULESET_MAX
) {
2890 if (pr
->ticket
!= ruleset
->rules
[rs_num
].active
.ticket
) {
2894 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
2895 while ((rule
!= NULL
) && (rule
->nr
!= pr
->nr
)) {
2896 rule
= TAILQ_NEXT(rule
, entries
);
2902 pf_rule_copyout(rule
, &pr
->rule
);
2903 if (pf_anchor_copyout(ruleset
, rule
, pr
)) {
2907 pfi_dynaddr_copyout(&pr
->rule
.src
.addr
);
2908 pfi_dynaddr_copyout(&pr
->rule
.dst
.addr
);
2909 pf_tbladdr_copyout(&pr
->rule
.src
.addr
);
2910 pf_tbladdr_copyout(&pr
->rule
.dst
.addr
);
2911 pf_rtlabel_copyout(&pr
->rule
.src
.addr
);
2912 pf_rtlabel_copyout(&pr
->rule
.dst
.addr
);
2913 for (i
= 0; i
< PF_SKIP_COUNT
; ++i
) {
2914 if (rule
->skip
[i
].ptr
== NULL
) {
2915 pr
->rule
.skip
[i
].nr
= -1;
2917 pr
->rule
.skip
[i
].nr
=
2918 rule
->skip
[i
].ptr
->nr
;
2922 if (pr
->action
== PF_GET_CLR_CNTR
) {
2923 rule
->evaluations
= 0;
2924 rule
->packets
[0] = rule
->packets
[1] = 0;
2925 rule
->bytes
[0] = rule
->bytes
[1] = 0;
2930 case DIOCCHANGERULE
: {
2931 struct pfioc_rule
*pcr
= pr
;
2932 struct pf_ruleset
*ruleset
;
2933 struct pf_rule
*oldrule
= NULL
, *newrule
= NULL
;
2934 struct pf_pooladdr
*pa
;
2938 if (!(pcr
->action
== PF_CHANGE_REMOVE
||
2939 pcr
->action
== PF_CHANGE_GET_TICKET
) &&
2940 pcr
->pool_ticket
!= ticket_pabuf
) {
2945 if (pcr
->action
< PF_CHANGE_ADD_HEAD
||
2946 pcr
->action
> PF_CHANGE_GET_TICKET
) {
2950 pcr
->anchor
[sizeof(pcr
->anchor
) - 1] = '\0';
2951 pcr
->anchor_call
[sizeof(pcr
->anchor_call
) - 1] = '\0';
2952 ruleset
= pf_find_ruleset(pcr
->anchor
);
2953 if (ruleset
== NULL
) {
2957 rs_num
= pf_get_ruleset_number(pcr
->rule
.action
);
2958 if (rs_num
>= PF_RULESET_MAX
) {
2963 if (pcr
->action
== PF_CHANGE_GET_TICKET
) {
2964 pcr
->ticket
= ++ruleset
->rules
[rs_num
].active
.ticket
;
2968 ruleset
->rules
[rs_num
].active
.ticket
) {
2972 if (pcr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
2978 if (pcr
->action
!= PF_CHANGE_REMOVE
) {
2979 newrule
= pool_get(&pf_rule_pl
, PR_WAITOK
);
2980 if (newrule
== NULL
) {
2984 pf_rule_copyin(&pcr
->rule
, newrule
, p
, minordev
);
2986 if (newrule
->af
== AF_INET
) {
2987 pool_put(&pf_rule_pl
, newrule
);
2988 error
= EAFNOSUPPORT
;
2993 if (newrule
->af
== AF_INET6
) {
2994 pool_put(&pf_rule_pl
, newrule
);
2995 error
= EAFNOSUPPORT
;
2999 if (newrule
->ifname
[0]) {
3000 newrule
->kif
= pfi_kif_get(newrule
->ifname
);
3001 if (newrule
->kif
== NULL
) {
3002 pool_put(&pf_rule_pl
, newrule
);
3006 pfi_kif_ref(newrule
->kif
, PFI_KIF_REF_RULE
);
3008 newrule
->kif
= NULL
;
3011 if (newrule
->tagname
[0]) {
3013 pf_tagname2tag(newrule
->tagname
)) == 0) {
3017 if (newrule
->match_tagname
[0]) {
3018 if ((newrule
->match_tag
= pf_tagname2tag(
3019 newrule
->match_tagname
)) == 0) {
3023 if (newrule
->rt
&& !newrule
->direction
) {
3027 if (!newrule
->log
) {
3030 if (newrule
->logif
>= PFLOGIFS_MAX
) {
3034 pf_addrwrap_setup(&newrule
->src
.addr
);
3035 pf_addrwrap_setup(&newrule
->dst
.addr
);
3036 if (pf_rtlabel_add(&newrule
->src
.addr
) ||
3037 pf_rtlabel_add(&newrule
->dst
.addr
)) {
3040 if (pfi_dynaddr_setup(&newrule
->src
.addr
, newrule
->af
)) {
3043 if (pfi_dynaddr_setup(&newrule
->dst
.addr
, newrule
->af
)) {
3046 if (pf_tbladdr_setup(ruleset
, &newrule
->src
.addr
)) {
3049 if (pf_tbladdr_setup(ruleset
, &newrule
->dst
.addr
)) {
3052 if (pf_anchor_setup(newrule
, ruleset
, pcr
->anchor_call
)) {
3055 TAILQ_FOREACH(pa
, &pf_pabuf
, entries
)
3056 if (pf_tbladdr_setup(ruleset
, &pa
->addr
)) {
3060 if (newrule
->overload_tblname
[0]) {
3061 if ((newrule
->overload_tbl
= pfr_attach_table(
3062 ruleset
, newrule
->overload_tblname
)) ==
3066 newrule
->overload_tbl
->pfrkt_flags
|=
3071 pf_mv_pool(&pf_pabuf
, &newrule
->rpool
.list
);
3072 if (((((newrule
->action
== PF_NAT
) ||
3073 (newrule
->action
== PF_RDR
) ||
3074 (newrule
->action
== PF_BINAT
) ||
3075 (newrule
->rt
> PF_FASTROUTE
)) &&
3076 !newrule
->anchor
)) &&
3077 (TAILQ_FIRST(&newrule
->rpool
.list
) == NULL
)) {
3082 pf_rm_rule(NULL
, newrule
);
3085 newrule
->rpool
.cur
= TAILQ_FIRST(&newrule
->rpool
.list
);
3086 newrule
->evaluations
= 0;
3087 newrule
->packets
[0] = newrule
->packets
[1] = 0;
3088 newrule
->bytes
[0] = newrule
->bytes
[1] = 0;
3090 pf_empty_pool(&pf_pabuf
);
3092 if (pcr
->action
== PF_CHANGE_ADD_HEAD
) {
3093 oldrule
= TAILQ_FIRST(
3094 ruleset
->rules
[rs_num
].active
.ptr
);
3095 } else if (pcr
->action
== PF_CHANGE_ADD_TAIL
) {
3096 oldrule
= TAILQ_LAST(
3097 ruleset
->rules
[rs_num
].active
.ptr
, pf_rulequeue
);
3099 oldrule
= TAILQ_FIRST(
3100 ruleset
->rules
[rs_num
].active
.ptr
);
3101 while ((oldrule
!= NULL
) && (oldrule
->nr
!= pcr
->nr
)) {
3102 oldrule
= TAILQ_NEXT(oldrule
, entries
);
3104 if (oldrule
== NULL
) {
3105 if (newrule
!= NULL
) {
3106 pf_rm_rule(NULL
, newrule
);
3113 if (pcr
->action
== PF_CHANGE_REMOVE
) {
3114 pf_rm_rule(ruleset
->rules
[rs_num
].active
.ptr
, oldrule
);
3115 ruleset
->rules
[rs_num
].active
.rcount
--;
3117 if (oldrule
== NULL
) {
3119 ruleset
->rules
[rs_num
].active
.ptr
,
3121 } else if (pcr
->action
== PF_CHANGE_ADD_HEAD
||
3122 pcr
->action
== PF_CHANGE_ADD_BEFORE
) {
3123 TAILQ_INSERT_BEFORE(oldrule
, newrule
, entries
);
3126 ruleset
->rules
[rs_num
].active
.ptr
,
3127 oldrule
, newrule
, entries
);
3129 ruleset
->rules
[rs_num
].active
.rcount
++;
3133 TAILQ_FOREACH(oldrule
,
3134 ruleset
->rules
[rs_num
].active
.ptr
, entries
)
3137 ruleset
->rules
[rs_num
].active
.ticket
++;
3139 pf_calc_skip_steps(ruleset
->rules
[rs_num
].active
.ptr
);
3140 pf_remove_if_empty_ruleset(ruleset
);
3145 case DIOCINSERTRULE
: {
3146 struct pf_ruleset
*ruleset
;
3147 struct pf_rule
*rule
, *tail
, *r
;
3151 pr
->anchor
[sizeof(pr
->anchor
) - 1] = '\0';
3152 pr
->anchor_call
[sizeof(pr
->anchor_call
) - 1] = '\0';
3153 is_anchor
= (pr
->anchor_call
[0] != '\0');
3155 if ((ruleset
= pf_find_ruleset_with_owner(pr
->anchor
,
3156 pr
->rule
.owner
, is_anchor
, &error
)) == NULL
) {
3160 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
3161 if (rs_num
>= PF_RULESET_MAX
) {
3165 if (pr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
3170 /* make sure this anchor rule doesn't exist already */
3172 r
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
3175 ((strcmp(r
->anchor
->name
,
3176 pr
->anchor_call
)) == 0)) {
3177 if (((strcmp(pr
->rule
.owner
,
3179 ((strcmp(r
->owner
, "")) == 0)) {
3186 r
= TAILQ_NEXT(r
, entries
);
3193 rule
= pool_get(&pf_rule_pl
, PR_WAITOK
);
3198 pf_rule_copyin(&pr
->rule
, rule
, p
, minordev
);
3200 if (rule
->af
== AF_INET
) {
3201 pool_put(&pf_rule_pl
, rule
);
3202 error
= EAFNOSUPPORT
;
3207 if (rule
->af
== AF_INET6
) {
3208 pool_put(&pf_rule_pl
, rule
);
3209 error
= EAFNOSUPPORT
;
3214 r
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
3215 while ((r
!= NULL
) && (rule
->priority
>= (unsigned)r
->priority
)) {
3216 r
= TAILQ_NEXT(r
, entries
);
3220 TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
,
3221 pf_rulequeue
)) != NULL
) {
3222 rule
->nr
= tail
->nr
+ 1;
3230 if ((error
= pf_rule_setup(pr
, rule
, ruleset
))) {
3234 if (rule
->anchor
!= NULL
) {
3235 strlcpy(rule
->anchor
->owner
, rule
->owner
,
3236 PF_OWNER_NAME_SIZE
);
3240 TAILQ_INSERT_BEFORE(r
, rule
, entries
);
3241 while (r
&& ++r
->nr
) {
3242 r
= TAILQ_NEXT(r
, entries
);
3245 TAILQ_INSERT_TAIL(ruleset
->rules
[rs_num
].active
.ptr
,
3248 ruleset
->rules
[rs_num
].active
.rcount
++;
3250 /* Calculate checksum for the main ruleset */
3251 if (ruleset
== &pf_main_ruleset
) {
3252 error
= pf_setup_pfsync_matching(ruleset
);
3255 pf_ruleset_cleanup(ruleset
, rs_num
);
3256 rule
->ticket
= VM_KERNEL_ADDRPERM((u_int64_t
)(uintptr_t)rule
);
3258 pr
->rule
.ticket
= rule
->ticket
;
3259 pf_rule_copyout(rule
, &pr
->rule
);
3260 if (rule
->rule_flag
& PFRULE_PFM
) {
3263 if (rule
->action
== PF_NAT64
) {
3264 atomic_add_16(&pf_nat64_configured
, 1);
3267 if (pr
->anchor_call
[0] == '\0') {
3268 INC_ATOMIC_INT64_LIM(net_api_stats
.nas_pf_addrule_total
);
3269 if (rule
->rule_flag
& PFRULE_PFM
) {
3270 INC_ATOMIC_INT64_LIM(net_api_stats
.nas_pf_addrule_os
);
3276 case DIOCDELETERULE
: {
3277 pr
->anchor
[sizeof(pr
->anchor
) - 1] = '\0';
3278 pr
->anchor_call
[sizeof(pr
->anchor_call
) - 1] = '\0';
3280 if (pr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
3285 /* get device through which request is made */
3286 if ((uint8_t)minordev
== PFDEV_PFM
) {
3287 req_dev
|= PFRULE_PFM
;
3290 if (pr
->rule
.ticket
) {
3291 if ((error
= pf_delete_rule_by_ticket(pr
, req_dev
))) {
3295 pf_delete_rule_by_owner(pr
->rule
.owner
, req_dev
);
3298 if (pr
->rule
.action
== PF_NAT64
) {
3299 atomic_add_16(&pf_nat64_configured
, -1);
3313 pfioctl_ioc_state_kill(u_long cmd
, struct pfioc_state_kill
*psk
, struct proc
*p
)
3318 psk
->psk_ifname
[sizeof(psk
->psk_ifname
) - 1] = '\0';
3319 psk
->psk_ownername
[sizeof(psk
->psk_ownername
) - 1] = '\0';
3321 bool ifname_matched
= true;
3322 bool owner_matched
= true;
3325 case DIOCCLRSTATES
: {
3326 struct pf_state
*s
, *nexts
;
3329 for (s
= RB_MIN(pf_state_tree_id
, &tree_id
); s
; s
= nexts
) {
3330 nexts
= RB_NEXT(pf_state_tree_id
, &tree_id
, s
);
3332 * Purge all states only when neither ifname
3333 * or owner is provided. If any of these are provided
3334 * we purge only the states with meta data that match
3336 bool unlink_state
= false;
3337 ifname_matched
= true;
3338 owner_matched
= true;
3340 if (psk
->psk_ifname
[0] &&
3341 strcmp(psk
->psk_ifname
, s
->kif
->pfik_name
)) {
3342 ifname_matched
= false;
3345 if (psk
->psk_ownername
[0] &&
3346 ((NULL
== s
->rule
.ptr
) ||
3347 strcmp(psk
->psk_ownername
, s
->rule
.ptr
->owner
))) {
3348 owner_matched
= false;
3351 unlink_state
= ifname_matched
&& owner_matched
;
3355 /* don't send out individual delete messages */
3356 s
->sync_flags
= PFSTATE_NOSYNC
;
3362 psk
->psk_af
= killed
;
3364 pfsync_clear_states(pf_status
.hostid
, psk
->psk_ifname
);
3369 case DIOCKILLSTATES
: {
3370 struct pf_state
*s
, *nexts
;
3371 struct pf_state_key
*sk
;
3372 struct pf_state_host
*src
, *dst
;
3375 for (s
= RB_MIN(pf_state_tree_id
, &tree_id
); s
;
3377 nexts
= RB_NEXT(pf_state_tree_id
, &tree_id
, s
);
3379 ifname_matched
= true;
3380 owner_matched
= true;
3382 if (psk
->psk_ifname
[0] &&
3383 strcmp(psk
->psk_ifname
, s
->kif
->pfik_name
)) {
3384 ifname_matched
= false;
3387 if (psk
->psk_ownername
[0] &&
3388 ((NULL
== s
->rule
.ptr
) ||
3389 strcmp(psk
->psk_ownername
, s
->rule
.ptr
->owner
))) {
3390 owner_matched
= false;
3393 if (sk
->direction
== PF_OUT
) {
3400 if ((!psk
->psk_af
|| sk
->af_lan
== psk
->psk_af
) &&
3401 (!psk
->psk_proto
|| psk
->psk_proto
== sk
->proto
) &&
3402 PF_MATCHA(psk
->psk_src
.neg
,
3403 &psk
->psk_src
.addr
.v
.a
.addr
,
3404 &psk
->psk_src
.addr
.v
.a
.mask
,
3405 &src
->addr
, sk
->af_lan
) &&
3406 PF_MATCHA(psk
->psk_dst
.neg
,
3407 &psk
->psk_dst
.addr
.v
.a
.addr
,
3408 &psk
->psk_dst
.addr
.v
.a
.mask
,
3409 &dst
->addr
, sk
->af_lan
) &&
3410 (pf_match_xport(psk
->psk_proto
,
3411 psk
->psk_proto_variant
, &psk
->psk_src
.xport
,
3413 (pf_match_xport(psk
->psk_proto
,
3414 psk
->psk_proto_variant
, &psk
->psk_dst
.xport
,
3419 /* send immediate delete of state */
3420 pfsync_delete_state(s
);
3421 s
->sync_flags
|= PFSTATE_NOSYNC
;
3427 psk
->psk_af
= killed
;
3440 pfioctl_ioc_state(u_long cmd
, struct pfioc_state
*ps
, struct proc
*p
)
3446 case DIOCADDSTATE
: {
3447 struct pfsync_state
*sp
= &ps
->state
;
3449 struct pf_state_key
*sk
;
3450 struct pfi_kif
*kif
;
3452 if (sp
->timeout
>= PFTM_MAX
) {
3456 s
= pool_get(&pf_state_pl
, PR_WAITOK
);
3461 bzero(s
, sizeof(struct pf_state
));
3462 if ((sk
= pf_alloc_state_key(s
, NULL
)) == NULL
) {
3463 pool_put(&pf_state_pl
, s
);
3467 pf_state_import(sp
, sk
, s
);
3468 kif
= pfi_kif_get(sp
->ifname
);
3470 pool_put(&pf_state_pl
, s
);
3471 pool_put(&pf_state_key_pl
, sk
);
3475 TAILQ_INIT(&s
->unlink_hooks
);
3476 s
->state_key
->app_state
= 0;
3477 if (pf_insert_state(kif
, s
)) {
3478 pfi_kif_unref(kif
, PFI_KIF_REF_NONE
);
3479 pool_put(&pf_state_pl
, s
);
3483 pf_default_rule
.states
++;
3484 VERIFY(pf_default_rule
.states
!= 0);
3488 case DIOCGETSTATE
: {
3490 struct pf_state_cmp id_key
;
3492 bcopy(ps
->state
.id
, &id_key
.id
, sizeof(id_key
.id
));
3493 id_key
.creatorid
= ps
->state
.creatorid
;
3495 s
= pf_find_state_byid(&id_key
);
3501 pf_state_export(&ps
->state
, s
->state_key
, s
);
3514 pfioctl_ioc_states(u_long cmd
, struct pfioc_states_32
*ps32
,
3515 struct pfioc_states_64
*ps64
, struct proc
*p
)
3517 int p64
= proc_is64bit(p
);
3521 case DIOCGETSTATES
: { /* struct pfioc_states */
3522 struct pf_state
*state
;
3523 struct pfsync_state
*pstore
;
3528 len
= (p64
? ps64
->ps_len
: ps32
->ps_len
);
3530 size
= sizeof(struct pfsync_state
) * pf_status
.states
;
3532 ps64
->ps_len
= size
;
3534 ps32
->ps_len
= size
;
3539 pstore
= _MALLOC(sizeof(*pstore
), M_TEMP
, M_WAITOK
| M_ZERO
);
3540 if (pstore
== NULL
) {
3544 buf
= (p64
? ps64
->ps_buf
: ps32
->ps_buf
);
3546 state
= TAILQ_FIRST(&state_list
);
3548 if (state
->timeout
!= PFTM_UNLINKED
) {
3549 if ((nr
+ 1) * sizeof(*pstore
) > (unsigned)len
) {
3553 pf_state_export(pstore
,
3554 state
->state_key
, state
);
3555 error
= copyout(pstore
, buf
, sizeof(*pstore
));
3557 _FREE(pstore
, M_TEMP
);
3560 buf
+= sizeof(*pstore
);
3563 state
= TAILQ_NEXT(state
, entry_list
);
3566 size
= sizeof(struct pfsync_state
) * nr
;
3568 ps64
->ps_len
= size
;
3570 ps32
->ps_len
= size
;
3573 _FREE(pstore
, M_TEMP
);
3586 pfioctl_ioc_natlook(u_long cmd
, struct pfioc_natlook
*pnl
, struct proc
*p
)
3593 struct pf_state_key
*sk
;
3594 struct pf_state
*state
;
3595 struct pf_state_key_cmp key
;
3596 int m
= 0, direction
= pnl
->direction
;
3598 key
.proto
= pnl
->proto
;
3599 key
.proto_variant
= pnl
->proto_variant
;
3602 PF_AZERO(&pnl
->saddr
, pnl
->af
) ||
3603 PF_AZERO(&pnl
->daddr
, pnl
->af
) ||
3604 ((pnl
->proto
== IPPROTO_TCP
||
3605 pnl
->proto
== IPPROTO_UDP
) &&
3606 (!pnl
->dxport
.port
|| !pnl
->sxport
.port
))) {
3610 * userland gives us source and dest of connection,
3611 * reverse the lookup so we ask for what happens with
3612 * the return traffic, enabling us to find it in the
3615 if (direction
== PF_IN
) {
3616 key
.af_gwy
= pnl
->af
;
3617 PF_ACPY(&key
.ext_gwy
.addr
, &pnl
->daddr
,
3619 memcpy(&key
.ext_gwy
.xport
, &pnl
->dxport
,
3620 sizeof(key
.ext_gwy
.xport
));
3621 PF_ACPY(&key
.gwy
.addr
, &pnl
->saddr
, pnl
->af
);
3622 memcpy(&key
.gwy
.xport
, &pnl
->sxport
,
3623 sizeof(key
.gwy
.xport
));
3624 state
= pf_find_state_all(&key
, PF_IN
, &m
);
3626 key
.af_lan
= pnl
->af
;
3627 PF_ACPY(&key
.lan
.addr
, &pnl
->daddr
, pnl
->af
);
3628 memcpy(&key
.lan
.xport
, &pnl
->dxport
,
3629 sizeof(key
.lan
.xport
));
3630 PF_ACPY(&key
.ext_lan
.addr
, &pnl
->saddr
,
3632 memcpy(&key
.ext_lan
.xport
, &pnl
->sxport
,
3633 sizeof(key
.ext_lan
.xport
));
3634 state
= pf_find_state_all(&key
, PF_OUT
, &m
);
3637 error
= E2BIG
; /* more than one state */
3638 } else if (state
!= NULL
) {
3639 sk
= state
->state_key
;
3640 if (direction
== PF_IN
) {
3641 PF_ACPY(&pnl
->rsaddr
, &sk
->lan
.addr
,
3643 memcpy(&pnl
->rsxport
, &sk
->lan
.xport
,
3644 sizeof(pnl
->rsxport
));
3645 PF_ACPY(&pnl
->rdaddr
, &pnl
->daddr
,
3647 memcpy(&pnl
->rdxport
, &pnl
->dxport
,
3648 sizeof(pnl
->rdxport
));
3650 PF_ACPY(&pnl
->rdaddr
, &sk
->gwy
.addr
,
3652 memcpy(&pnl
->rdxport
, &sk
->gwy
.xport
,
3653 sizeof(pnl
->rdxport
));
3654 PF_ACPY(&pnl
->rsaddr
, &pnl
->saddr
,
3656 memcpy(&pnl
->rsxport
, &pnl
->sxport
,
3657 sizeof(pnl
->rsxport
));
3675 pfioctl_ioc_tm(u_long cmd
, struct pfioc_tm
*pt
, struct proc
*p
)
3681 case DIOCSETTIMEOUT
: {
3684 if (pt
->timeout
< 0 || pt
->timeout
>= PFTM_MAX
||
3689 old
= pf_default_rule
.timeout
[pt
->timeout
];
3690 if (pt
->timeout
== PFTM_INTERVAL
&& pt
->seconds
== 0) {
3693 pf_default_rule
.timeout
[pt
->timeout
] = pt
->seconds
;
3694 if (pt
->timeout
== PFTM_INTERVAL
&& pt
->seconds
< old
) {
3695 wakeup(pf_purge_thread_fn
);
3701 case DIOCGETTIMEOUT
: {
3702 if (pt
->timeout
< 0 || pt
->timeout
>= PFTM_MAX
) {
3706 pt
->seconds
= pf_default_rule
.timeout
[pt
->timeout
];
3719 pfioctl_ioc_limit(u_long cmd
, struct pfioc_limit
*pl
, struct proc
*p
)
3725 case DIOCGETLIMIT
: {
3726 if (pl
->index
< 0 || pl
->index
>= PF_LIMIT_MAX
) {
3730 pl
->limit
= pf_pool_limits
[pl
->index
].limit
;
3734 case DIOCSETLIMIT
: {
3737 if (pl
->index
< 0 || pl
->index
>= PF_LIMIT_MAX
||
3738 pf_pool_limits
[pl
->index
].pp
== NULL
) {
3742 pool_sethardlimit(pf_pool_limits
[pl
->index
].pp
,
3743 pl
->limit
, NULL
, 0);
3744 old_limit
= pf_pool_limits
[pl
->index
].limit
;
3745 pf_pool_limits
[pl
->index
].limit
= pl
->limit
;
3746 pl
->limit
= old_limit
;
3759 pfioctl_ioc_pooladdr(u_long cmd
, struct pfioc_pooladdr
*pp
, struct proc
*p
)
3762 struct pf_pooladdr
*pa
= NULL
;
3763 struct pf_pool
*pool
= NULL
;
3767 case DIOCBEGINADDRS
: {
3768 pf_empty_pool(&pf_pabuf
);
3769 pp
->ticket
= ++ticket_pabuf
;
3774 pp
->anchor
[sizeof(pp
->anchor
) - 1] = '\0';
3775 if (pp
->ticket
!= ticket_pabuf
) {
3780 if (pp
->af
== AF_INET
) {
3781 error
= EAFNOSUPPORT
;
3786 if (pp
->af
== AF_INET6
) {
3787 error
= EAFNOSUPPORT
;
3791 if (pp
->addr
.addr
.type
!= PF_ADDR_ADDRMASK
&&
3792 pp
->addr
.addr
.type
!= PF_ADDR_DYNIFTL
&&
3793 pp
->addr
.addr
.type
!= PF_ADDR_TABLE
) {
3797 pa
= pool_get(&pf_pooladdr_pl
, PR_WAITOK
);
3802 pf_pooladdr_copyin(&pp
->addr
, pa
);
3803 if (pa
->ifname
[0]) {
3804 pa
->kif
= pfi_kif_get(pa
->ifname
);
3805 if (pa
->kif
== NULL
) {
3806 pool_put(&pf_pooladdr_pl
, pa
);
3810 pfi_kif_ref(pa
->kif
, PFI_KIF_REF_RULE
);
3812 pf_addrwrap_setup(&pa
->addr
);
3813 if (pfi_dynaddr_setup(&pa
->addr
, pp
->af
)) {
3814 pfi_dynaddr_remove(&pa
->addr
);
3815 pfi_kif_unref(pa
->kif
, PFI_KIF_REF_RULE
);
3816 pool_put(&pf_pooladdr_pl
, pa
);
3820 TAILQ_INSERT_TAIL(&pf_pabuf
, pa
, entries
);
3824 case DIOCGETADDRS
: {
3826 pp
->anchor
[sizeof(pp
->anchor
) - 1] = '\0';
3827 pool
= pf_get_pool(pp
->anchor
, pp
->ticket
, pp
->r_action
,
3828 pp
->r_num
, 0, 1, 0);
3833 TAILQ_FOREACH(pa
, &pool
->list
, entries
)
3841 pp
->anchor
[sizeof(pp
->anchor
) - 1] = '\0';
3842 pool
= pf_get_pool(pp
->anchor
, pp
->ticket
, pp
->r_action
,
3843 pp
->r_num
, 0, 1, 1);
3848 pa
= TAILQ_FIRST(&pool
->list
);
3849 while ((pa
!= NULL
) && (nr
< pp
->nr
)) {
3850 pa
= TAILQ_NEXT(pa
, entries
);
3857 pf_pooladdr_copyout(pa
, &pp
->addr
);
3858 pfi_dynaddr_copyout(&pp
->addr
.addr
);
3859 pf_tbladdr_copyout(&pp
->addr
.addr
);
3860 pf_rtlabel_copyout(&pp
->addr
.addr
);
3864 case DIOCCHANGEADDR
: {
3865 struct pfioc_pooladdr
*pca
= pp
;
3866 struct pf_pooladdr
*oldpa
= NULL
, *newpa
= NULL
;
3867 struct pf_ruleset
*ruleset
;
3869 if (pca
->action
< PF_CHANGE_ADD_HEAD
||
3870 pca
->action
> PF_CHANGE_REMOVE
) {
3874 if (pca
->addr
.addr
.type
!= PF_ADDR_ADDRMASK
&&
3875 pca
->addr
.addr
.type
!= PF_ADDR_DYNIFTL
&&
3876 pca
->addr
.addr
.type
!= PF_ADDR_TABLE
) {
3881 pca
->anchor
[sizeof(pca
->anchor
) - 1] = '\0';
3882 ruleset
= pf_find_ruleset(pca
->anchor
);
3883 if (ruleset
== NULL
) {
3887 pool
= pf_get_pool(pca
->anchor
, pca
->ticket
, pca
->r_action
,
3888 pca
->r_num
, pca
->r_last
, 1, 1);
3893 if (pca
->action
!= PF_CHANGE_REMOVE
) {
3894 newpa
= pool_get(&pf_pooladdr_pl
, PR_WAITOK
);
3895 if (newpa
== NULL
) {
3899 pf_pooladdr_copyin(&pca
->addr
, newpa
);
3901 if (pca
->af
== AF_INET
) {
3902 pool_put(&pf_pooladdr_pl
, newpa
);
3903 error
= EAFNOSUPPORT
;
3908 if (pca
->af
== AF_INET6
) {
3909 pool_put(&pf_pooladdr_pl
, newpa
);
3910 error
= EAFNOSUPPORT
;
3914 if (newpa
->ifname
[0]) {
3915 newpa
->kif
= pfi_kif_get(newpa
->ifname
);
3916 if (newpa
->kif
== NULL
) {
3917 pool_put(&pf_pooladdr_pl
, newpa
);
3921 pfi_kif_ref(newpa
->kif
, PFI_KIF_REF_RULE
);
3925 pf_addrwrap_setup(&newpa
->addr
);
3926 if (pfi_dynaddr_setup(&newpa
->addr
, pca
->af
) ||
3927 pf_tbladdr_setup(ruleset
, &newpa
->addr
)) {
3928 pfi_dynaddr_remove(&newpa
->addr
);
3929 pfi_kif_unref(newpa
->kif
, PFI_KIF_REF_RULE
);
3930 pool_put(&pf_pooladdr_pl
, newpa
);
3936 if (pca
->action
== PF_CHANGE_ADD_HEAD
) {
3937 oldpa
= TAILQ_FIRST(&pool
->list
);
3938 } else if (pca
->action
== PF_CHANGE_ADD_TAIL
) {
3939 oldpa
= TAILQ_LAST(&pool
->list
, pf_palist
);
3943 oldpa
= TAILQ_FIRST(&pool
->list
);
3944 while ((oldpa
!= NULL
) && (i
< (int)pca
->nr
)) {
3945 oldpa
= TAILQ_NEXT(oldpa
, entries
);
3948 if (oldpa
== NULL
) {
3954 if (pca
->action
== PF_CHANGE_REMOVE
) {
3955 TAILQ_REMOVE(&pool
->list
, oldpa
, entries
);
3956 pfi_dynaddr_remove(&oldpa
->addr
);
3957 pf_tbladdr_remove(&oldpa
->addr
);
3958 pfi_kif_unref(oldpa
->kif
, PFI_KIF_REF_RULE
);
3959 pool_put(&pf_pooladdr_pl
, oldpa
);
3961 if (oldpa
== NULL
) {
3962 TAILQ_INSERT_TAIL(&pool
->list
, newpa
, entries
);
3963 } else if (pca
->action
== PF_CHANGE_ADD_HEAD
||
3964 pca
->action
== PF_CHANGE_ADD_BEFORE
) {
3965 TAILQ_INSERT_BEFORE(oldpa
, newpa
, entries
);
3967 TAILQ_INSERT_AFTER(&pool
->list
, oldpa
,
3972 pool
->cur
= TAILQ_FIRST(&pool
->list
);
3973 PF_ACPY(&pool
->counter
, &pool
->cur
->addr
.v
.a
.addr
,
3987 pfioctl_ioc_ruleset(u_long cmd
, struct pfioc_ruleset
*pr
, struct proc
*p
)
3993 case DIOCGETRULESETS
: {
3994 struct pf_ruleset
*ruleset
;
3995 struct pf_anchor
*anchor
;
3997 pr
->path
[sizeof(pr
->path
) - 1] = '\0';
3998 pr
->name
[sizeof(pr
->name
) - 1] = '\0';
3999 if ((ruleset
= pf_find_ruleset(pr
->path
)) == NULL
) {
4004 if (ruleset
->anchor
== NULL
) {
4005 /* XXX kludge for pf_main_ruleset */
4006 RB_FOREACH(anchor
, pf_anchor_global
, &pf_anchors
)
4007 if (anchor
->parent
== NULL
) {
4011 RB_FOREACH(anchor
, pf_anchor_node
,
4012 &ruleset
->anchor
->children
)
4018 case DIOCGETRULESET
: {
4019 struct pf_ruleset
*ruleset
;
4020 struct pf_anchor
*anchor
;
4023 pr
->path
[sizeof(pr
->path
) - 1] = '\0';
4024 if ((ruleset
= pf_find_ruleset(pr
->path
)) == NULL
) {
4029 if (ruleset
->anchor
== NULL
) {
4030 /* XXX kludge for pf_main_ruleset */
4031 RB_FOREACH(anchor
, pf_anchor_global
, &pf_anchors
)
4032 if (anchor
->parent
== NULL
&& nr
++ == pr
->nr
) {
4033 strlcpy(pr
->name
, anchor
->name
,
4038 RB_FOREACH(anchor
, pf_anchor_node
,
4039 &ruleset
->anchor
->children
)
4040 if (nr
++ == pr
->nr
) {
4041 strlcpy(pr
->name
, anchor
->name
,
4061 pfioctl_ioc_trans(u_long cmd
, struct pfioc_trans_32
*io32
,
4062 struct pfioc_trans_64
*io64
, struct proc
*p
)
4064 int p64
= proc_is64bit(p
);
4065 int error
= 0, esize
, size
;
4068 esize
= (p64
? io64
->esize
: io32
->esize
);
4069 size
= (p64
? io64
->size
: io32
->size
);
4070 buf
= (p64
? io64
->array
: io32
->array
);
4074 struct pfioc_trans_e
*ioe
;
4075 struct pfr_table
*table
;
4078 if (esize
!= sizeof(*ioe
)) {
4082 ioe
= _MALLOC(sizeof(*ioe
), M_TEMP
, M_WAITOK
);
4083 table
= _MALLOC(sizeof(*table
), M_TEMP
, M_WAITOK
);
4084 for (i
= 0; i
< size
; i
++, buf
+= sizeof(*ioe
)) {
4085 if (copyin(buf
, ioe
, sizeof(*ioe
))) {
4086 _FREE(table
, M_TEMP
);
4091 ioe
->anchor
[sizeof(ioe
->anchor
) - 1] = '\0';
4092 switch (ioe
->rs_num
) {
4093 case PF_RULESET_ALTQ
:
4095 case PF_RULESET_TABLE
:
4096 bzero(table
, sizeof(*table
));
4097 strlcpy(table
->pfrt_anchor
, ioe
->anchor
,
4098 sizeof(table
->pfrt_anchor
));
4099 if ((error
= pfr_ina_begin(table
,
4100 &ioe
->ticket
, NULL
, 0))) {
4101 _FREE(table
, M_TEMP
);
4107 if ((error
= pf_begin_rules(&ioe
->ticket
,
4108 ioe
->rs_num
, ioe
->anchor
))) {
4109 _FREE(table
, M_TEMP
);
4115 if (copyout(ioe
, buf
, sizeof(*ioe
))) {
4116 _FREE(table
, M_TEMP
);
4122 _FREE(table
, M_TEMP
);
4127 case DIOCXROLLBACK
: {
4128 struct pfioc_trans_e
*ioe
;
4129 struct pfr_table
*table
;
4132 if (esize
!= sizeof(*ioe
)) {
4136 ioe
= _MALLOC(sizeof(*ioe
), M_TEMP
, M_WAITOK
);
4137 table
= _MALLOC(sizeof(*table
), M_TEMP
, M_WAITOK
);
4138 for (i
= 0; i
< size
; i
++, buf
+= sizeof(*ioe
)) {
4139 if (copyin(buf
, ioe
, sizeof(*ioe
))) {
4140 _FREE(table
, M_TEMP
);
4145 ioe
->anchor
[sizeof(ioe
->anchor
) - 1] = '\0';
4146 switch (ioe
->rs_num
) {
4147 case PF_RULESET_ALTQ
:
4149 case PF_RULESET_TABLE
:
4150 bzero(table
, sizeof(*table
));
4151 strlcpy(table
->pfrt_anchor
, ioe
->anchor
,
4152 sizeof(table
->pfrt_anchor
));
4153 if ((error
= pfr_ina_rollback(table
,
4154 ioe
->ticket
, NULL
, 0))) {
4155 _FREE(table
, M_TEMP
);
4157 goto fail
; /* really bad */
4161 if ((error
= pf_rollback_rules(ioe
->ticket
,
4162 ioe
->rs_num
, ioe
->anchor
))) {
4163 _FREE(table
, M_TEMP
);
4165 goto fail
; /* really bad */
4170 _FREE(table
, M_TEMP
);
4176 struct pfioc_trans_e
*ioe
;
4177 struct pfr_table
*table
;
4178 struct pf_ruleset
*rs
;
4179 user_addr_t _buf
= buf
;
4182 if (esize
!= sizeof(*ioe
)) {
4186 ioe
= _MALLOC(sizeof(*ioe
), M_TEMP
, M_WAITOK
);
4187 table
= _MALLOC(sizeof(*table
), M_TEMP
, M_WAITOK
);
4188 /* first makes sure everything will succeed */
4189 for (i
= 0; i
< size
; i
++, buf
+= sizeof(*ioe
)) {
4190 if (copyin(buf
, ioe
, sizeof(*ioe
))) {
4191 _FREE(table
, M_TEMP
);
4196 ioe
->anchor
[sizeof(ioe
->anchor
) - 1] = '\0';
4197 switch (ioe
->rs_num
) {
4198 case PF_RULESET_ALTQ
:
4200 case PF_RULESET_TABLE
:
4201 rs
= pf_find_ruleset(ioe
->anchor
);
4202 if (rs
== NULL
|| !rs
->topen
|| ioe
->ticket
!=
4204 _FREE(table
, M_TEMP
);
4211 if (ioe
->rs_num
< 0 || ioe
->rs_num
>=
4213 _FREE(table
, M_TEMP
);
4218 rs
= pf_find_ruleset(ioe
->anchor
);
4220 !rs
->rules
[ioe
->rs_num
].inactive
.open
||
4221 rs
->rules
[ioe
->rs_num
].inactive
.ticket
!=
4223 _FREE(table
, M_TEMP
);
4232 /* now do the commit - no errors should happen here */
4233 for (i
= 0; i
< size
; i
++, buf
+= sizeof(*ioe
)) {
4234 if (copyin(buf
, ioe
, sizeof(*ioe
))) {
4235 _FREE(table
, M_TEMP
);
4240 ioe
->anchor
[sizeof(ioe
->anchor
) - 1] = '\0';
4241 switch (ioe
->rs_num
) {
4242 case PF_RULESET_ALTQ
:
4244 case PF_RULESET_TABLE
:
4245 bzero(table
, sizeof(*table
));
4246 strlcpy(table
->pfrt_anchor
, ioe
->anchor
,
4247 sizeof(table
->pfrt_anchor
));
4248 if ((error
= pfr_ina_commit(table
, ioe
->ticket
,
4250 _FREE(table
, M_TEMP
);
4252 goto fail
; /* really bad */
4256 if ((error
= pf_commit_rules(ioe
->ticket
,
4257 ioe
->rs_num
, ioe
->anchor
))) {
4258 _FREE(table
, M_TEMP
);
4260 goto fail
; /* really bad */
4265 _FREE(table
, M_TEMP
);
4279 pfioctl_ioc_src_nodes(u_long cmd
, struct pfioc_src_nodes_32
*psn32
,
4280 struct pfioc_src_nodes_64
*psn64
, struct proc
*p
)
4282 int p64
= proc_is64bit(p
);
4286 case DIOCGETSRCNODES
: {
4287 struct pf_src_node
*n
, *pstore
;
4292 space
= (p64
? psn64
->psn_len
: psn32
->psn_len
);
4294 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
)
4297 size
= sizeof(struct pf_src_node
) * nr
;
4299 psn64
->psn_len
= size
;
4301 psn32
->psn_len
= size
;
4306 pstore
= _MALLOC(sizeof(*pstore
), M_TEMP
, M_WAITOK
);
4307 if (pstore
== NULL
) {
4311 buf
= (p64
? psn64
->psn_buf
: psn32
->psn_buf
);
4313 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
) {
4314 uint64_t secs
= pf_time_second(), diff
;
4316 if ((nr
+ 1) * sizeof(*pstore
) > (unsigned)space
) {
4320 bcopy(n
, pstore
, sizeof(*pstore
));
4321 if (n
->rule
.ptr
!= NULL
) {
4322 pstore
->rule
.nr
= n
->rule
.ptr
->nr
;
4324 pstore
->creation
= secs
- pstore
->creation
;
4325 if (pstore
->expire
> secs
) {
4326 pstore
->expire
-= secs
;
4331 /* adjust the connection rate estimate */
4332 diff
= secs
- n
->conn_rate
.last
;
4333 if (diff
>= n
->conn_rate
.seconds
) {
4334 pstore
->conn_rate
.count
= 0;
4336 pstore
->conn_rate
.count
-=
4337 n
->conn_rate
.count
* diff
/
4338 n
->conn_rate
.seconds
;
4341 _RB_PARENT(pstore
, entry
) = NULL
;
4342 RB_LEFT(pstore
, entry
) = RB_RIGHT(pstore
, entry
) = NULL
;
4345 error
= copyout(pstore
, buf
, sizeof(*pstore
));
4347 _FREE(pstore
, M_TEMP
);
4350 buf
+= sizeof(*pstore
);
4354 size
= sizeof(struct pf_src_node
) * nr
;
4356 psn64
->psn_len
= size
;
4358 psn32
->psn_len
= size
;
4361 _FREE(pstore
, M_TEMP
);
4374 pfioctl_ioc_src_node_kill(u_long cmd
, struct pfioc_src_node_kill
*psnk
,
4381 case DIOCKILLSRCNODES
: {
4382 struct pf_src_node
*sn
;
4386 RB_FOREACH(sn
, pf_src_tree
, &tree_src_tracking
) {
4387 if (PF_MATCHA(psnk
->psnk_src
.neg
,
4388 &psnk
->psnk_src
.addr
.v
.a
.addr
,
4389 &psnk
->psnk_src
.addr
.v
.a
.mask
,
4390 &sn
->addr
, sn
->af
) &&
4391 PF_MATCHA(psnk
->psnk_dst
.neg
,
4392 &psnk
->psnk_dst
.addr
.v
.a
.addr
,
4393 &psnk
->psnk_dst
.addr
.v
.a
.mask
,
4394 &sn
->raddr
, sn
->af
)) {
4395 /* Handle state to src_node linkage */
4396 if (sn
->states
!= 0) {
4397 RB_FOREACH(s
, pf_state_tree_id
,
4399 if (s
->src_node
== sn
) {
4402 if (s
->nat_src_node
== sn
) {
4403 s
->nat_src_node
= NULL
;
4414 pf_purge_expired_src_nodes();
4417 psnk
->psnk_af
= killed
;
4430 pfioctl_ioc_iface(u_long cmd
, struct pfioc_iface_32
*io32
,
4431 struct pfioc_iface_64
*io64
, struct proc
*p
)
4433 int p64
= proc_is64bit(p
);
4437 case DIOCIGETIFACES
: {
4441 buf
= (p64
? io64
->pfiio_buffer
: io32
->pfiio_buffer
);
4442 esize
= (p64
? io64
->pfiio_esize
: io32
->pfiio_esize
);
4444 /* esize must be that of the user space version of pfi_kif */
4445 if (esize
!= sizeof(struct pfi_uif
)) {
4450 io64
->pfiio_name
[sizeof(io64
->pfiio_name
) - 1] = '\0';
4452 io32
->pfiio_name
[sizeof(io32
->pfiio_name
) - 1] = '\0';
4454 error
= pfi_get_ifaces(
4455 p64
? io64
->pfiio_name
: io32
->pfiio_name
, buf
,
4456 p64
? &io64
->pfiio_size
: &io32
->pfiio_size
);
4460 case DIOCSETIFFLAG
: {
4462 io64
->pfiio_name
[sizeof(io64
->pfiio_name
) - 1] = '\0';
4464 io32
->pfiio_name
[sizeof(io32
->pfiio_name
) - 1] = '\0';
4467 error
= pfi_set_flags(
4468 p64
? io64
->pfiio_name
: io32
->pfiio_name
,
4469 p64
? io64
->pfiio_flags
: io32
->pfiio_flags
);
4473 case DIOCCLRIFFLAG
: {
4475 io64
->pfiio_name
[sizeof(io64
->pfiio_name
) - 1] = '\0';
4477 io32
->pfiio_name
[sizeof(io32
->pfiio_name
) - 1] = '\0';
4480 error
= pfi_clear_flags(
4481 p64
? io64
->pfiio_name
: io32
->pfiio_name
,
4482 p64
? io64
->pfiio_flags
: io32
->pfiio_flags
);
4495 pf_af_hook(struct ifnet
*ifp
, struct mbuf
**mppn
, struct mbuf
**mp
,
4496 unsigned int af
, int input
, struct ip_fw_args
*fwa
)
4499 struct mbuf
*nextpkt
;
4500 net_thread_marks_t marks
;
4501 struct ifnet
* pf_ifp
= ifp
;
4503 /* Always allow traffic on co-processor interfaces. */
4504 if (!intcoproc_unrestricted
&& ifp
&& IFNET_IS_INTCOPROC(ifp
)) {
4508 marks
= net_thread_marks_push(NET_THREAD_HELD_PF
);
4510 if (marks
!= net_thread_marks_none
) {
4511 lck_rw_lock_shared(pf_perim_lock
);
4512 if (!pf_is_enabled
) {
4515 lck_mtx_lock(pf_lock
);
4518 if (mppn
!= NULL
&& *mppn
!= NULL
) {
4519 VERIFY(*mppn
== *mp
);
4521 if ((nextpkt
= (*mp
)->m_nextpkt
) != NULL
) {
4522 (*mp
)->m_nextpkt
= NULL
;
4526 * For packets destined to locally hosted IP address
4527 * ip_output_list sets Mbuf's pkt header's rcvif to
4528 * the interface hosting the IP address.
4529 * While on the output path ifp passed to pf_af_hook
4530 * to such local communication is the loopback interface,
4531 * the input path derives ifp from mbuf packet header's
4533 * This asymmetry caues issues with PF.
4534 * To handle that case, we have a limited change here to
4535 * pass interface as loopback if packets are looped in.
4537 if (input
&& ((*mp
)->m_pkthdr
.pkt_flags
& PKTF_LOOP
)) {
4544 error
= pf_inet_hook(pf_ifp
, mp
, input
, fwa
);
4550 error
= pf_inet6_hook(pf_ifp
, mp
, input
, fwa
);
4557 /* When packet valid, link to the next packet */
4558 if (*mp
!= NULL
&& nextpkt
!= NULL
) {
4559 struct mbuf
*m
= *mp
;
4560 while (m
->m_nextpkt
!= NULL
) {
4563 m
->m_nextpkt
= nextpkt
;
4565 /* Fix up linkage of previous packet in the chain */
4574 if (marks
!= net_thread_marks_none
) {
4575 lck_mtx_unlock(pf_lock
);
4579 if (marks
!= net_thread_marks_none
) {
4580 lck_rw_done(pf_perim_lock
);
4583 net_thread_marks_pop(marks
);
4590 pf_inet_hook(struct ifnet
*ifp
, struct mbuf
**mp
, int input
,
4591 struct ip_fw_args
*fwa
)
4593 struct mbuf
*m
= *mp
;
4594 #if BYTE_ORDER != BIG_ENDIAN
4595 struct ip
*ip
= mtod(m
, struct ip
*);
4600 * If the packet is outbound, is originated locally, is flagged for
4601 * delayed UDP/TCP checksum calculation, and is about to be processed
4602 * for an interface that doesn't support the appropriate checksum
4603 * offloading, then calculated the checksum here so that PF can adjust
4606 if (!input
&& m
->m_pkthdr
.rcvif
== NULL
) {
4607 static const int mask
= CSUM_DELAY_DATA
;
4608 const int flags
= m
->m_pkthdr
.csum_flags
&
4609 ~IF_HWASSIST_CSUM_FLAGS(ifp
->if_hwassist
);
4612 in_delayed_cksum(m
);
4613 m
->m_pkthdr
.csum_flags
&= ~mask
;
4617 #if BYTE_ORDER != BIG_ENDIAN
4621 if (pf_test_mbuf(input
? PF_IN
: PF_OUT
, ifp
, mp
, NULL
, fwa
) != PF_PASS
) {
4625 error
= EHOSTUNREACH
;
4630 #if BYTE_ORDER != BIG_ENDIAN
4633 ip
= mtod(*mp
, struct ip
*);
4645 pf_inet6_hook(struct ifnet
*ifp
, struct mbuf
**mp
, int input
,
4646 struct ip_fw_args
*fwa
)
4651 * If the packet is outbound, is originated locally, is flagged for
4652 * delayed UDP/TCP checksum calculation, and is about to be processed
4653 * for an interface that doesn't support the appropriate checksum
4654 * offloading, then calculated the checksum here so that PF can adjust
4657 if (!input
&& (*mp
)->m_pkthdr
.rcvif
== NULL
) {
4658 static const int mask
= CSUM_DELAY_IPV6_DATA
;
4659 const int flags
= (*mp
)->m_pkthdr
.csum_flags
&
4660 ~IF_HWASSIST_CSUM_FLAGS(ifp
->if_hwassist
);
4664 * Checksum offload should not have been enabled
4665 * when extension headers exist, thus 0 for optlen.
4667 in6_delayed_cksum(*mp
);
4668 (*mp
)->m_pkthdr
.csum_flags
&= ~mask
;
4672 if (pf_test6_mbuf(input
? PF_IN
: PF_OUT
, ifp
, mp
, NULL
, fwa
) != PF_PASS
) {
4676 error
= EHOSTUNREACH
;
4686 pf_ifaddr_hook(struct ifnet
*ifp
)
4688 struct pfi_kif
*kif
= ifp
->if_pf_kif
;
4691 lck_rw_lock_shared(pf_perim_lock
);
4692 lck_mtx_lock(pf_lock
);
4694 pfi_kifaddr_update(kif
);
4696 lck_mtx_unlock(pf_lock
);
4697 lck_rw_done(pf_perim_lock
);
4703 * Caller acquires dlil lock as writer (exclusive)
4706 pf_ifnet_hook(struct ifnet
*ifp
, int attach
)
4708 lck_rw_lock_shared(pf_perim_lock
);
4709 lck_mtx_lock(pf_lock
);
4711 pfi_attach_ifnet(ifp
);
4713 pfi_detach_ifnet(ifp
);
4715 lck_mtx_unlock(pf_lock
);
4716 lck_rw_done(pf_perim_lock
);
4720 pf_attach_hooks(void)
4722 ifnet_head_lock_shared();
4724 * Check against ifnet_addrs[] before proceeding, in case this
4725 * is called very early on, e.g. during dlil_init() before any
4726 * network interface is attached.
4728 if (ifnet_addrs
!= NULL
) {
4731 for (i
= 0; i
<= if_index
; i
++) {
4732 struct ifnet
*ifp
= ifindex2ifnet
[i
];
4734 pfi_attach_ifnet(ifp
);
4742 /* currently unused along with pfdetach() */
4744 pf_detach_hooks(void)
4746 ifnet_head_lock_shared();
4747 if (ifnet_addrs
!= NULL
) {
4748 for (i
= 0; i
<= if_index
; i
++) {
4751 struct ifnet
*ifp
= ifindex2ifnet
[i
];
4752 if (ifp
!= NULL
&& ifp
->if_pf_kif
!= NULL
) {
4753 pfi_detach_ifnet(ifp
);
4764 * The switch statement below does nothing at runtime, as it serves as a
4765 * compile time check to ensure that all of the socket 'D' ioctls (those
4766 * in the 'D' group going thru soo_ioctl) that are made available by the
4767 * networking stack is unique. This works as long as this routine gets
4768 * updated each time a new interface ioctl gets added.
4770 * Any failures at compile time indicates duplicated ioctl values.
4772 static __attribute__((unused
)) void
4773 pfioctl_cassert(void)
4776 * This is equivalent to _CASSERT() and the compiler wouldn't
4777 * generate any instructions, thus for compile time only.
4779 switch ((u_long
)0) {
4782 /* bsd/net/pfvar.h */
4786 case DIOCGETSTARTERS
:
4793 case DIOCSETSTATUSIF
:
4799 case DIOCCHANGERULE
:
4800 case DIOCINSERTRULE
:
4801 case DIOCDELETERULE
:
4802 case DIOCSETTIMEOUT
:
4803 case DIOCGETTIMEOUT
:
4805 case DIOCCLRRULECTRS
:
4808 case DIOCKILLSTATES
:
4814 case DIOCCHANGEALTQ
:
4816 case DIOCBEGINADDRS
:
4820 case DIOCCHANGEADDR
:
4821 case DIOCGETRULESETS
:
4822 case DIOCGETRULESET
:
4823 case DIOCRCLRTABLES
:
4824 case DIOCRADDTABLES
:
4825 case DIOCRDELTABLES
:
4826 case DIOCRGETTABLES
:
4827 case DIOCRGETTSTATS
:
4828 case DIOCRCLRTSTATS
:
4834 case DIOCRGETASTATS
:
4835 case DIOCRCLRASTATS
:
4837 case DIOCRSETTFLAGS
:
4838 case DIOCRINADEFINE
:
4845 case DIOCGETSRCNODES
:
4846 case DIOCCLRSRCNODES
:
4848 case DIOCIGETIFACES
:
4851 case DIOCKILLSRCNODES
: