2 * Copyright (c) 2007-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */
30 /* $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * - Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * - Redistributions in binary form must reproduce the above
44 * copyright notice, this list of conditions and the following
45 * disclaimer in the documentation and/or other materials provided
46 * with the distribution.
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
71 #include <sys/filio.h>
72 #include <sys/fcntl.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/kernel.h>
77 #include <sys/proc_internal.h>
78 #include <sys/malloc.h>
79 #include <sys/kauth.h>
81 #include <sys/mcache.h>
82 #include <sys/queue.h>
84 #include <mach/vm_param.h>
88 #include <net/if_types.h>
89 #include <net/net_api_stats.h>
90 #include <net/route.h>
92 #include <netinet/in.h>
93 #include <netinet/in_var.h>
94 #include <netinet/in_systm.h>
95 #include <netinet/ip.h>
96 #include <netinet/ip_var.h>
97 #include <netinet/ip_icmp.h>
98 #include <netinet/if_ether.h>
101 #include <netinet/ip_dummynet.h>
104 #endif /* DUMMYNET */
106 #include <libkern/crypto/md5.h>
108 #include <machine/machine_routines.h>
110 #include <miscfs/devfs/devfs.h>
112 #include <net/pfvar.h>
115 #include <net/if_pfsync.h>
119 #include <net/if_pflog.h>
123 #include <netinet/ip6.h>
124 #include <netinet/in_pcb.h>
127 #include <dev/random/randomdev.h>
130 static void pfdetach(void);
132 static int pfopen(dev_t
, int, int, struct proc
*);
133 static int pfclose(dev_t
, int, int, struct proc
*);
134 static int pfioctl(dev_t
, u_long
, caddr_t
, int, struct proc
*);
135 static int pfioctl_ioc_table(u_long
, struct pfioc_table_32
*,
136 struct pfioc_table_64
*, struct proc
*);
137 static int pfioctl_ioc_tokens(u_long
, struct pfioc_tokens_32
*,
138 struct pfioc_tokens_64
*, struct proc
*);
139 static int pfioctl_ioc_rule(u_long
, int, struct pfioc_rule
*, struct proc
*);
140 static int pfioctl_ioc_state_kill(u_long
, struct pfioc_state_kill
*,
142 static int pfioctl_ioc_state(u_long
, struct pfioc_state
*, struct proc
*);
143 static int pfioctl_ioc_states(u_long
, struct pfioc_states_32
*,
144 struct pfioc_states_64
*, struct proc
*);
145 static int pfioctl_ioc_natlook(u_long
, struct pfioc_natlook
*, struct proc
*);
146 static int pfioctl_ioc_tm(u_long
, struct pfioc_tm
*, struct proc
*);
147 static int pfioctl_ioc_limit(u_long
, struct pfioc_limit
*, struct proc
*);
148 static int pfioctl_ioc_pooladdr(u_long
, struct pfioc_pooladdr
*, struct proc
*);
149 static int pfioctl_ioc_ruleset(u_long
, struct pfioc_ruleset
*, struct proc
*);
150 static int pfioctl_ioc_trans(u_long
, struct pfioc_trans_32
*,
151 struct pfioc_trans_64
*, struct proc
*);
152 static int pfioctl_ioc_src_nodes(u_long
, struct pfioc_src_nodes_32
*,
153 struct pfioc_src_nodes_64
*, struct proc
*);
154 static int pfioctl_ioc_src_node_kill(u_long
, struct pfioc_src_node_kill
*,
156 static int pfioctl_ioc_iface(u_long
, struct pfioc_iface_32
*,
157 struct pfioc_iface_64
*, struct proc
*);
158 static struct pf_pool
*pf_get_pool(char *, u_int32_t
, u_int8_t
, u_int32_t
,
159 u_int8_t
, u_int8_t
, u_int8_t
);
160 static void pf_mv_pool(struct pf_palist
*, struct pf_palist
*);
161 static void pf_empty_pool(struct pf_palist
*);
162 static int pf_begin_rules(u_int32_t
*, int, const char *);
163 static int pf_rollback_rules(u_int32_t
, int, char *);
164 static int pf_setup_pfsync_matching(struct pf_ruleset
*);
165 static void pf_hash_rule(MD5_CTX
*, struct pf_rule
*);
166 static void pf_hash_rule_addr(MD5_CTX
*, struct pf_rule_addr
*, u_int8_t
);
167 static int pf_commit_rules(u_int32_t
, int, char *);
168 static void pf_rule_copyin(struct pf_rule
*, struct pf_rule
*, struct proc
*,
170 static void pf_rule_copyout(struct pf_rule
*, struct pf_rule
*);
171 static void pf_state_export(struct pfsync_state
*, struct pf_state_key
*,
173 static void pf_state_import(struct pfsync_state
*, struct pf_state_key
*,
175 static void pf_pooladdr_copyin(struct pf_pooladdr
*, struct pf_pooladdr
*);
176 static void pf_pooladdr_copyout(struct pf_pooladdr
*, struct pf_pooladdr
*);
177 static void pf_expire_states_and_src_nodes(struct pf_rule
*);
178 static void pf_delete_rule_from_ruleset(struct pf_ruleset
*,
179 int, struct pf_rule
*);
180 static void pf_addrwrap_setup(struct pf_addr_wrap
*);
181 static int pf_rule_setup(struct pfioc_rule
*, struct pf_rule
*,
182 struct pf_ruleset
*);
183 static void pf_delete_rule_by_owner(char *, u_int32_t
);
184 static int pf_delete_rule_by_ticket(struct pfioc_rule
*, u_int32_t
);
185 static void pf_ruleset_cleanup(struct pf_ruleset
*, int);
186 static void pf_deleterule_anchor_step_out(struct pf_ruleset
**,
187 int, struct pf_rule
**);
189 #define PF_CDEV_MAJOR (-1)
191 static struct cdevsw pf_cdevsw
= {
194 /* read */ eno_rdwrt
,
195 /* write */ eno_rdwrt
,
198 /* reset */ eno_reset
,
200 /* select */ eno_select
,
202 /* strategy */ eno_strat
,
208 static void pf_attach_hooks(void);
210 /* currently unused along with pfdetach() */
211 static void pf_detach_hooks(void);
215 * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer,
216 * and used in pf_af_hook() for performance optimization, such that packets
217 * will enter pf_test() or pf_test6() only when PF is running.
219 int pf_is_enabled
= 0;
221 u_int32_t pf_hash_seed
;
222 int16_t pf_nat64_configured
= 0;
225 * These are the pf enabled reference counting variables
227 static u_int64_t pf_enabled_ref_count
;
228 static u_int32_t nr_tokens
= 0;
229 static u_int64_t pffwrules
;
230 static u_int32_t pfdevcnt
;
232 SLIST_HEAD(list_head
, pfioc_kernel_token
);
233 static struct list_head token_list_head
;
235 struct pf_rule pf_default_rule
;
237 #define TAGID_MAX 50000
238 static TAILQ_HEAD(pf_tags
, pf_tagname
) pf_tags
=
239 TAILQ_HEAD_INITIALIZER(pf_tags
);
241 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
242 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
244 static u_int16_t
tagname2tag(struct pf_tags
*, char *);
245 static void tag2tagname(struct pf_tags
*, u_int16_t
, char *);
246 static void tag_unref(struct pf_tags
*, u_int16_t
);
247 static int pf_rtlabel_add(struct pf_addr_wrap
*);
248 static void pf_rtlabel_remove(struct pf_addr_wrap
*);
249 static void pf_rtlabel_copyout(struct pf_addr_wrap
*);
252 static int pf_inet_hook(struct ifnet
*, struct mbuf
**, int,
253 struct ip_fw_args
*);
256 static int pf_inet6_hook(struct ifnet
*, struct mbuf
**, int,
257 struct ip_fw_args
*);
260 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
263 * Helper macros for ioctl structures which vary in size (32-bit vs. 64-bit)
265 #define PFIOCX_STRUCT_DECL(s) \
268 struct s##_32 _s##_32; \
269 struct s##_64 _s##_64; \
273 #define PFIOCX_STRUCT_BEGIN(a, s, _action) { \
274 VERIFY(s##_un == NULL); \
275 s##_un = _MALLOC(sizeof (*s##_un), M_TEMP, M_WAITOK|M_ZERO); \
276 if (s##_un == NULL) { \
280 bcopy(a, &s##_un->_u._s##_64, \
281 sizeof (struct s##_64)); \
283 bcopy(a, &s##_un->_u._s##_32, \
284 sizeof (struct s##_32)); \
288 #define PFIOCX_STRUCT_END(s, a) { \
289 VERIFY(s##_un != NULL); \
291 bcopy(&s##_un->_u._s##_64, a, sizeof (struct s##_64)); \
293 bcopy(&s##_un->_u._s##_32, a, sizeof (struct s##_32)); \
294 _FREE(s##_un, M_TEMP); \
298 #define PFIOCX_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
299 #define PFIOCX_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
302 * Helper macros for regular ioctl structures.
304 #define PFIOC_STRUCT_BEGIN(a, v, _action) { \
305 VERIFY((v) == NULL); \
306 (v) = _MALLOC(sizeof (*(v)), M_TEMP, M_WAITOK|M_ZERO); \
310 bcopy(a, v, sizeof (*(v))); \
314 #define PFIOC_STRUCT_END(v, a) { \
315 VERIFY((v) != NULL); \
316 bcopy(v, a, sizeof (*(v))); \
321 #define PFIOC_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
322 #define PFIOC_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
324 static lck_attr_t
*pf_perim_lock_attr
;
325 static lck_grp_t
*pf_perim_lock_grp
;
326 static lck_grp_attr_t
*pf_perim_lock_grp_attr
;
328 static lck_attr_t
*pf_lock_attr
;
329 static lck_grp_t
*pf_lock_grp
;
330 static lck_grp_attr_t
*pf_lock_grp_attr
;
332 struct thread
*pf_purge_thread
;
334 extern void pfi_kifaddr_update(void *);
336 /* pf enable ref-counting helper functions */
337 static u_int64_t
generate_token(struct proc
*);
338 static int remove_token(struct pfioc_remove_token
*);
339 static void invalidate_all_tokens(void);
342 generate_token(struct proc
*p
)
344 u_int64_t token_value
;
345 struct pfioc_kernel_token
*new_token
;
347 new_token
= _MALLOC(sizeof (struct pfioc_kernel_token
), M_TEMP
,
350 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
352 if (new_token
== NULL
) {
353 /* malloc failed! bail! */
354 printf("%s: unable to allocate pf token structure!", __func__
);
358 token_value
= VM_KERNEL_ADDRPERM((u_int64_t
)(uintptr_t)new_token
);
360 new_token
->token
.token_value
= token_value
;
361 new_token
->token
.pid
= proc_pid(p
);
362 proc_name(new_token
->token
.pid
, new_token
->token
.proc_name
,
363 sizeof (new_token
->token
.proc_name
));
364 new_token
->token
.timestamp
= pf_calendar_time_second();
366 SLIST_INSERT_HEAD(&token_list_head
, new_token
, next
);
369 return (token_value
);
373 remove_token(struct pfioc_remove_token
*tok
)
375 struct pfioc_kernel_token
*entry
, *tmp
;
377 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
379 SLIST_FOREACH_SAFE(entry
, &token_list_head
, next
, tmp
) {
380 if (tok
->token_value
== entry
->token
.token_value
) {
381 SLIST_REMOVE(&token_list_head
, entry
,
382 pfioc_kernel_token
, next
);
383 _FREE(entry
, M_TEMP
);
385 return (0); /* success */
389 printf("pf : remove failure\n");
390 return (ESRCH
); /* failure */
394 invalidate_all_tokens(void)
396 struct pfioc_kernel_token
*entry
, *tmp
;
398 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
400 SLIST_FOREACH_SAFE(entry
, &token_list_head
, next
, tmp
) {
401 SLIST_REMOVE(&token_list_head
, entry
, pfioc_kernel_token
, next
);
402 _FREE(entry
, M_TEMP
);
411 u_int32_t
*t
= pf_default_rule
.timeout
;
414 pf_perim_lock_grp_attr
= lck_grp_attr_alloc_init();
415 pf_perim_lock_grp
= lck_grp_alloc_init("pf_perim",
416 pf_perim_lock_grp_attr
);
417 pf_perim_lock_attr
= lck_attr_alloc_init();
418 lck_rw_init(pf_perim_lock
, pf_perim_lock_grp
, pf_perim_lock_attr
);
420 pf_lock_grp_attr
= lck_grp_attr_alloc_init();
421 pf_lock_grp
= lck_grp_alloc_init("pf", pf_lock_grp_attr
);
422 pf_lock_attr
= lck_attr_alloc_init();
423 lck_mtx_init(pf_lock
, pf_lock_grp
, pf_lock_attr
);
425 pool_init(&pf_rule_pl
, sizeof (struct pf_rule
), 0, 0, 0, "pfrulepl",
427 pool_init(&pf_src_tree_pl
, sizeof (struct pf_src_node
), 0, 0, 0,
429 pool_init(&pf_state_pl
, sizeof (struct pf_state
), 0, 0, 0, "pfstatepl",
431 pool_init(&pf_state_key_pl
, sizeof (struct pf_state_key
), 0, 0, 0,
432 "pfstatekeypl", NULL
);
433 pool_init(&pf_app_state_pl
, sizeof (struct pf_app_state
), 0, 0, 0,
434 "pfappstatepl", NULL
);
435 pool_init(&pf_pooladdr_pl
, sizeof (struct pf_pooladdr
), 0, 0, 0,
436 "pfpooladdrpl", NULL
);
439 pf_osfp_initialize();
441 pool_sethardlimit(pf_pool_limits
[PF_LIMIT_STATES
].pp
,
442 pf_pool_limits
[PF_LIMIT_STATES
].limit
, NULL
, 0);
444 if (max_mem
<= 256*1024*1024)
445 pf_pool_limits
[PF_LIMIT_TABLE_ENTRIES
].limit
=
446 PFR_KENTRY_HIWAT_SMALL
;
448 RB_INIT(&tree_src_tracking
);
449 RB_INIT(&pf_anchors
);
450 pf_init_ruleset(&pf_main_ruleset
);
451 TAILQ_INIT(&pf_pabuf
);
452 TAILQ_INIT(&state_list
);
454 _CASSERT((SC_BE
& SCIDX_MASK
) == SCIDX_BE
);
455 _CASSERT((SC_BK_SYS
& SCIDX_MASK
) == SCIDX_BK_SYS
);
456 _CASSERT((SC_BK
& SCIDX_MASK
) == SCIDX_BK
);
457 _CASSERT((SC_RD
& SCIDX_MASK
) == SCIDX_RD
);
458 _CASSERT((SC_OAM
& SCIDX_MASK
) == SCIDX_OAM
);
459 _CASSERT((SC_AV
& SCIDX_MASK
) == SCIDX_AV
);
460 _CASSERT((SC_RV
& SCIDX_MASK
) == SCIDX_RV
);
461 _CASSERT((SC_VI
& SCIDX_MASK
) == SCIDX_VI
);
462 _CASSERT((SC_VO
& SCIDX_MASK
) == SCIDX_VO
);
463 _CASSERT((SC_CTL
& SCIDX_MASK
) == SCIDX_CTL
);
465 /* default rule should never be garbage collected */
466 pf_default_rule
.entries
.tqe_prev
= &pf_default_rule
.entries
.tqe_next
;
467 pf_default_rule
.action
= PF_PASS
;
468 pf_default_rule
.nr
= -1;
469 pf_default_rule
.rtableid
= IFSCOPE_NONE
;
471 /* initialize default timeouts */
472 t
[PFTM_TCP_FIRST_PACKET
] = PFTM_TCP_FIRST_PACKET_VAL
;
473 t
[PFTM_TCP_OPENING
] = PFTM_TCP_OPENING_VAL
;
474 t
[PFTM_TCP_ESTABLISHED
] = PFTM_TCP_ESTABLISHED_VAL
;
475 t
[PFTM_TCP_CLOSING
] = PFTM_TCP_CLOSING_VAL
;
476 t
[PFTM_TCP_FIN_WAIT
] = PFTM_TCP_FIN_WAIT_VAL
;
477 t
[PFTM_TCP_CLOSED
] = PFTM_TCP_CLOSED_VAL
;
478 t
[PFTM_UDP_FIRST_PACKET
] = PFTM_UDP_FIRST_PACKET_VAL
;
479 t
[PFTM_UDP_SINGLE
] = PFTM_UDP_SINGLE_VAL
;
480 t
[PFTM_UDP_MULTIPLE
] = PFTM_UDP_MULTIPLE_VAL
;
481 t
[PFTM_ICMP_FIRST_PACKET
] = PFTM_ICMP_FIRST_PACKET_VAL
;
482 t
[PFTM_ICMP_ERROR_REPLY
] = PFTM_ICMP_ERROR_REPLY_VAL
;
483 t
[PFTM_GREv1_FIRST_PACKET
] = PFTM_GREv1_FIRST_PACKET_VAL
;
484 t
[PFTM_GREv1_INITIATING
] = PFTM_GREv1_INITIATING_VAL
;
485 t
[PFTM_GREv1_ESTABLISHED
] = PFTM_GREv1_ESTABLISHED_VAL
;
486 t
[PFTM_ESP_FIRST_PACKET
] = PFTM_ESP_FIRST_PACKET_VAL
;
487 t
[PFTM_ESP_INITIATING
] = PFTM_ESP_INITIATING_VAL
;
488 t
[PFTM_ESP_ESTABLISHED
] = PFTM_ESP_ESTABLISHED_VAL
;
489 t
[PFTM_OTHER_FIRST_PACKET
] = PFTM_OTHER_FIRST_PACKET_VAL
;
490 t
[PFTM_OTHER_SINGLE
] = PFTM_OTHER_SINGLE_VAL
;
491 t
[PFTM_OTHER_MULTIPLE
] = PFTM_OTHER_MULTIPLE_VAL
;
492 t
[PFTM_FRAG
] = PFTM_FRAG_VAL
;
493 t
[PFTM_INTERVAL
] = PFTM_INTERVAL_VAL
;
494 t
[PFTM_SRC_NODE
] = PFTM_SRC_NODE_VAL
;
495 t
[PFTM_TS_DIFF
] = PFTM_TS_DIFF_VAL
;
496 t
[PFTM_ADAPTIVE_START
] = PFSTATE_ADAPT_START
;
497 t
[PFTM_ADAPTIVE_END
] = PFSTATE_ADAPT_END
;
500 bzero(&pf_status
, sizeof (pf_status
));
501 pf_status
.debug
= PF_DEBUG_URGENT
;
502 pf_hash_seed
= RandomULong();
504 /* XXX do our best to avoid a conflict */
505 pf_status
.hostid
= random();
507 if (kernel_thread_start(pf_purge_thread_fn
, NULL
,
508 &pf_purge_thread
) != 0) {
509 printf("%s: unable to start purge thread!", __func__
);
513 maj
= cdevsw_add(PF_CDEV_MAJOR
, &pf_cdevsw
);
515 printf("%s: failed to allocate major number!\n", __func__
);
518 (void) devfs_make_node(makedev(maj
, PFDEV_PF
), DEVFS_CHAR
,
519 UID_ROOT
, GID_WHEEL
, 0600, "pf", 0);
521 (void) devfs_make_node(makedev(maj
, PFDEV_PFM
), DEVFS_CHAR
,
522 UID_ROOT
, GID_WHEEL
, 0600, "pfm", 0);
534 struct pf_anchor
*anchor
;
535 struct pf_state
*state
;
536 struct pf_src_node
*node
;
537 struct pfioc_table pt
;
544 pf_status
.running
= 0;
545 wakeup(pf_purge_thread_fn
);
547 /* clear the rulesets */
548 for (i
= 0; i
< PF_RULESET_MAX
; i
++)
549 if (pf_begin_rules(&ticket
, i
, &r
) == 0)
550 pf_commit_rules(ticket
, i
, &r
);
553 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
554 state
->timeout
= PFTM_PURGE
;
556 state
->sync_flags
= PFSTATE_NOSYNC
;
559 pf_purge_expired_states(pf_status
.states
);
562 pfsync_clear_states(pf_status
.hostid
, NULL
);
565 /* clear source nodes */
566 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
567 state
->src_node
= NULL
;
568 state
->nat_src_node
= NULL
;
570 RB_FOREACH(node
, pf_src_tree
, &tree_src_tracking
) {
574 pf_purge_expired_src_nodes();
577 memset(&pt
, '\0', sizeof (pt
));
578 pfr_clr_tables(&pt
.pfrio_table
, &pt
.pfrio_ndel
, pt
.pfrio_flags
);
580 /* destroy anchors */
581 while ((anchor
= RB_MIN(pf_anchor_global
, &pf_anchors
)) != NULL
) {
582 for (i
= 0; i
< PF_RULESET_MAX
; i
++)
583 if (pf_begin_rules(&ticket
, i
, anchor
->name
) == 0)
584 pf_commit_rules(ticket
, i
, anchor
->name
);
587 /* destroy main ruleset */
588 pf_remove_if_empty_ruleset(&pf_main_ruleset
);
590 /* destroy the pools */
591 pool_destroy(&pf_pooladdr_pl
);
592 pool_destroy(&pf_state_pl
);
593 pool_destroy(&pf_rule_pl
);
594 pool_destroy(&pf_src_tree_pl
);
596 /* destroy subsystems */
597 pf_normalize_destroy();
605 pfopen(dev_t dev
, int flags
, int fmt
, struct proc
*p
)
607 #pragma unused(flags, fmt, p)
608 if (minor(dev
) >= PFDEV_MAX
)
611 if (minor(dev
) == PFDEV_PFM
) {
612 lck_mtx_lock(pf_lock
);
614 lck_mtx_unlock(pf_lock
);
618 lck_mtx_unlock(pf_lock
);
624 pfclose(dev_t dev
, int flags
, int fmt
, struct proc
*p
)
626 #pragma unused(flags, fmt, p)
627 if (minor(dev
) >= PFDEV_MAX
)
630 if (minor(dev
) == PFDEV_PFM
) {
631 lck_mtx_lock(pf_lock
);
632 VERIFY(pfdevcnt
> 0);
634 lck_mtx_unlock(pf_lock
);
639 static struct pf_pool
*
640 pf_get_pool(char *anchor
, u_int32_t ticket
, u_int8_t rule_action
,
641 u_int32_t rule_number
, u_int8_t r_last
, u_int8_t active
,
642 u_int8_t check_ticket
)
644 struct pf_ruleset
*ruleset
;
645 struct pf_rule
*rule
;
648 ruleset
= pf_find_ruleset(anchor
);
651 rs_num
= pf_get_ruleset_number(rule_action
);
652 if (rs_num
>= PF_RULESET_MAX
)
655 if (check_ticket
&& ticket
!=
656 ruleset
->rules
[rs_num
].active
.ticket
)
659 rule
= TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
,
662 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
664 if (check_ticket
&& ticket
!=
665 ruleset
->rules
[rs_num
].inactive
.ticket
)
668 rule
= TAILQ_LAST(ruleset
->rules
[rs_num
].inactive
.ptr
,
671 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].inactive
.ptr
);
674 while ((rule
!= NULL
) && (rule
->nr
!= rule_number
))
675 rule
= TAILQ_NEXT(rule
, entries
);
680 return (&rule
->rpool
);
684 pf_mv_pool(struct pf_palist
*poola
, struct pf_palist
*poolb
)
686 struct pf_pooladdr
*mv_pool_pa
;
688 while ((mv_pool_pa
= TAILQ_FIRST(poola
)) != NULL
) {
689 TAILQ_REMOVE(poola
, mv_pool_pa
, entries
);
690 TAILQ_INSERT_TAIL(poolb
, mv_pool_pa
, entries
);
695 pf_empty_pool(struct pf_palist
*poola
)
697 struct pf_pooladdr
*empty_pool_pa
;
699 while ((empty_pool_pa
= TAILQ_FIRST(poola
)) != NULL
) {
700 pfi_dynaddr_remove(&empty_pool_pa
->addr
);
701 pf_tbladdr_remove(&empty_pool_pa
->addr
);
702 pfi_kif_unref(empty_pool_pa
->kif
, PFI_KIF_REF_RULE
);
703 TAILQ_REMOVE(poola
, empty_pool_pa
, entries
);
704 pool_put(&pf_pooladdr_pl
, empty_pool_pa
);
709 pf_rm_rule(struct pf_rulequeue
*rulequeue
, struct pf_rule
*rule
)
711 if (rulequeue
!= NULL
) {
712 if (rule
->states
<= 0) {
714 * XXX - we need to remove the table *before* detaching
715 * the rule to make sure the table code does not delete
716 * the anchor under our feet.
718 pf_tbladdr_remove(&rule
->src
.addr
);
719 pf_tbladdr_remove(&rule
->dst
.addr
);
720 if (rule
->overload_tbl
)
721 pfr_detach_table(rule
->overload_tbl
);
723 TAILQ_REMOVE(rulequeue
, rule
, entries
);
724 rule
->entries
.tqe_prev
= NULL
;
728 if (rule
->states
> 0 || rule
->src_nodes
> 0 ||
729 rule
->entries
.tqe_prev
!= NULL
)
731 pf_tag_unref(rule
->tag
);
732 pf_tag_unref(rule
->match_tag
);
733 pf_rtlabel_remove(&rule
->src
.addr
);
734 pf_rtlabel_remove(&rule
->dst
.addr
);
735 pfi_dynaddr_remove(&rule
->src
.addr
);
736 pfi_dynaddr_remove(&rule
->dst
.addr
);
737 if (rulequeue
== NULL
) {
738 pf_tbladdr_remove(&rule
->src
.addr
);
739 pf_tbladdr_remove(&rule
->dst
.addr
);
740 if (rule
->overload_tbl
)
741 pfr_detach_table(rule
->overload_tbl
);
743 pfi_kif_unref(rule
->kif
, PFI_KIF_REF_RULE
);
744 pf_anchor_remove(rule
);
745 pf_empty_pool(&rule
->rpool
.list
);
746 pool_put(&pf_rule_pl
, rule
);
750 tagname2tag(struct pf_tags
*head
, char *tagname
)
752 struct pf_tagname
*tag
, *p
= NULL
;
753 u_int16_t new_tagid
= 1;
755 TAILQ_FOREACH(tag
, head
, entries
)
756 if (strcmp(tagname
, tag
->name
) == 0) {
762 * to avoid fragmentation, we do a linear search from the beginning
763 * and take the first free slot we find. if there is none or the list
764 * is empty, append a new entry at the end.
768 if (!TAILQ_EMPTY(head
))
769 for (p
= TAILQ_FIRST(head
); p
!= NULL
&&
770 p
->tag
== new_tagid
; p
= TAILQ_NEXT(p
, entries
))
771 new_tagid
= p
->tag
+ 1;
773 if (new_tagid
> TAGID_MAX
)
776 /* allocate and fill new struct pf_tagname */
777 tag
= _MALLOC(sizeof (*tag
), M_TEMP
, M_WAITOK
|M_ZERO
);
780 strlcpy(tag
->name
, tagname
, sizeof (tag
->name
));
781 tag
->tag
= new_tagid
;
784 if (p
!= NULL
) /* insert new entry before p */
785 TAILQ_INSERT_BEFORE(p
, tag
, entries
);
786 else /* either list empty or no free slot in between */
787 TAILQ_INSERT_TAIL(head
, tag
, entries
);
793 tag2tagname(struct pf_tags
*head
, u_int16_t tagid
, char *p
)
795 struct pf_tagname
*tag
;
797 TAILQ_FOREACH(tag
, head
, entries
)
798 if (tag
->tag
== tagid
) {
799 strlcpy(p
, tag
->name
, PF_TAG_NAME_SIZE
);
805 tag_unref(struct pf_tags
*head
, u_int16_t tag
)
807 struct pf_tagname
*p
, *next
;
812 for (p
= TAILQ_FIRST(head
); p
!= NULL
; p
= next
) {
813 next
= TAILQ_NEXT(p
, entries
);
816 TAILQ_REMOVE(head
, p
, entries
);
825 pf_tagname2tag(char *tagname
)
827 return (tagname2tag(&pf_tags
, tagname
));
831 pf_tag2tagname(u_int16_t tagid
, char *p
)
833 tag2tagname(&pf_tags
, tagid
, p
);
837 pf_tag_ref(u_int16_t tag
)
839 struct pf_tagname
*t
;
841 TAILQ_FOREACH(t
, &pf_tags
, entries
)
849 pf_tag_unref(u_int16_t tag
)
851 tag_unref(&pf_tags
, tag
);
855 pf_rtlabel_add(struct pf_addr_wrap
*a
)
862 pf_rtlabel_remove(struct pf_addr_wrap
*a
)
868 pf_rtlabel_copyout(struct pf_addr_wrap
*a
)
874 pf_begin_rules(u_int32_t
*ticket
, int rs_num
, const char *anchor
)
876 struct pf_ruleset
*rs
;
877 struct pf_rule
*rule
;
879 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
881 rs
= pf_find_or_create_ruleset(anchor
);
884 while ((rule
= TAILQ_FIRST(rs
->rules
[rs_num
].inactive
.ptr
)) != NULL
) {
885 pf_rm_rule(rs
->rules
[rs_num
].inactive
.ptr
, rule
);
886 rs
->rules
[rs_num
].inactive
.rcount
--;
888 *ticket
= ++rs
->rules
[rs_num
].inactive
.ticket
;
889 rs
->rules
[rs_num
].inactive
.open
= 1;
894 pf_rollback_rules(u_int32_t ticket
, int rs_num
, char *anchor
)
896 struct pf_ruleset
*rs
;
897 struct pf_rule
*rule
;
899 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
901 rs
= pf_find_ruleset(anchor
);
902 if (rs
== NULL
|| !rs
->rules
[rs_num
].inactive
.open
||
903 rs
->rules
[rs_num
].inactive
.ticket
!= ticket
)
905 while ((rule
= TAILQ_FIRST(rs
->rules
[rs_num
].inactive
.ptr
)) != NULL
) {
906 pf_rm_rule(rs
->rules
[rs_num
].inactive
.ptr
, rule
);
907 rs
->rules
[rs_num
].inactive
.rcount
--;
909 rs
->rules
[rs_num
].inactive
.open
= 0;
913 #define PF_MD5_UPD(st, elm) \
914 MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm))
916 #define PF_MD5_UPD_STR(st, elm) \
917 MD5Update(ctx, (u_int8_t *)(st)->elm, strlen((st)->elm))
919 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
920 (stor) = htonl((st)->elm); \
921 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t)); \
924 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
925 (stor) = htons((st)->elm); \
926 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t)); \
930 pf_hash_rule_addr(MD5_CTX
*ctx
, struct pf_rule_addr
*pfr
, u_int8_t proto
)
932 PF_MD5_UPD(pfr
, addr
.type
);
933 switch (pfr
->addr
.type
) {
934 case PF_ADDR_DYNIFTL
:
935 PF_MD5_UPD(pfr
, addr
.v
.ifname
);
936 PF_MD5_UPD(pfr
, addr
.iflags
);
939 PF_MD5_UPD(pfr
, addr
.v
.tblname
);
941 case PF_ADDR_ADDRMASK
:
943 PF_MD5_UPD(pfr
, addr
.v
.a
.addr
.addr32
);
944 PF_MD5_UPD(pfr
, addr
.v
.a
.mask
.addr32
);
946 case PF_ADDR_RTLABEL
:
947 PF_MD5_UPD(pfr
, addr
.v
.rtlabelname
);
954 PF_MD5_UPD(pfr
, xport
.range
.port
[0]);
955 PF_MD5_UPD(pfr
, xport
.range
.port
[1]);
956 PF_MD5_UPD(pfr
, xport
.range
.op
);
963 PF_MD5_UPD(pfr
, neg
);
967 pf_hash_rule(MD5_CTX
*ctx
, struct pf_rule
*rule
)
972 pf_hash_rule_addr(ctx
, &rule
->src
, rule
->proto
);
973 pf_hash_rule_addr(ctx
, &rule
->dst
, rule
->proto
);
974 PF_MD5_UPD_STR(rule
, label
);
975 PF_MD5_UPD_STR(rule
, ifname
);
976 PF_MD5_UPD_STR(rule
, match_tagname
);
977 PF_MD5_UPD_HTONS(rule
, match_tag
, x
); /* dup? */
978 PF_MD5_UPD_HTONL(rule
, os_fingerprint
, y
);
979 PF_MD5_UPD_HTONL(rule
, prob
, y
);
980 PF_MD5_UPD_HTONL(rule
, uid
.uid
[0], y
);
981 PF_MD5_UPD_HTONL(rule
, uid
.uid
[1], y
);
982 PF_MD5_UPD(rule
, uid
.op
);
983 PF_MD5_UPD_HTONL(rule
, gid
.gid
[0], y
);
984 PF_MD5_UPD_HTONL(rule
, gid
.gid
[1], y
);
985 PF_MD5_UPD(rule
, gid
.op
);
986 PF_MD5_UPD_HTONL(rule
, rule_flag
, y
);
987 PF_MD5_UPD(rule
, action
);
988 PF_MD5_UPD(rule
, direction
);
989 PF_MD5_UPD(rule
, af
);
990 PF_MD5_UPD(rule
, quick
);
991 PF_MD5_UPD(rule
, ifnot
);
992 PF_MD5_UPD(rule
, match_tag_not
);
993 PF_MD5_UPD(rule
, natpass
);
994 PF_MD5_UPD(rule
, keep_state
);
995 PF_MD5_UPD(rule
, proto
);
996 PF_MD5_UPD(rule
, type
);
997 PF_MD5_UPD(rule
, code
);
998 PF_MD5_UPD(rule
, flags
);
999 PF_MD5_UPD(rule
, flagset
);
1000 PF_MD5_UPD(rule
, allow_opts
);
1001 PF_MD5_UPD(rule
, rt
);
1002 PF_MD5_UPD(rule
, tos
);
1006 pf_commit_rules(u_int32_t ticket
, int rs_num
, char *anchor
)
1008 struct pf_ruleset
*rs
;
1009 struct pf_rule
*rule
, **old_array
, *r
;
1010 struct pf_rulequeue
*old_rules
;
1012 u_int32_t old_rcount
;
1014 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1016 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
1018 rs
= pf_find_ruleset(anchor
);
1019 if (rs
== NULL
|| !rs
->rules
[rs_num
].inactive
.open
||
1020 ticket
!= rs
->rules
[rs_num
].inactive
.ticket
)
1023 /* Calculate checksum for the main ruleset */
1024 if (rs
== &pf_main_ruleset
) {
1025 error
= pf_setup_pfsync_matching(rs
);
1030 /* Swap rules, keep the old. */
1031 old_rules
= rs
->rules
[rs_num
].active
.ptr
;
1032 old_rcount
= rs
->rules
[rs_num
].active
.rcount
;
1033 old_array
= rs
->rules
[rs_num
].active
.ptr_array
;
1035 if(old_rcount
!= 0) {
1036 r
= TAILQ_FIRST(rs
->rules
[rs_num
].active
.ptr
);
1038 if (r
->rule_flag
& PFRULE_PFM
)
1040 r
= TAILQ_NEXT(r
, entries
);
1045 rs
->rules
[rs_num
].active
.ptr
=
1046 rs
->rules
[rs_num
].inactive
.ptr
;
1047 rs
->rules
[rs_num
].active
.ptr_array
=
1048 rs
->rules
[rs_num
].inactive
.ptr_array
;
1049 rs
->rules
[rs_num
].active
.rcount
=
1050 rs
->rules
[rs_num
].inactive
.rcount
;
1051 rs
->rules
[rs_num
].inactive
.ptr
= old_rules
;
1052 rs
->rules
[rs_num
].inactive
.ptr_array
= old_array
;
1053 rs
->rules
[rs_num
].inactive
.rcount
= old_rcount
;
1055 rs
->rules
[rs_num
].active
.ticket
=
1056 rs
->rules
[rs_num
].inactive
.ticket
;
1057 pf_calc_skip_steps(rs
->rules
[rs_num
].active
.ptr
);
1060 /* Purge the old rule list. */
1061 while ((rule
= TAILQ_FIRST(old_rules
)) != NULL
)
1062 pf_rm_rule(old_rules
, rule
);
1063 if (rs
->rules
[rs_num
].inactive
.ptr_array
)
1064 _FREE(rs
->rules
[rs_num
].inactive
.ptr_array
, M_TEMP
);
1065 rs
->rules
[rs_num
].inactive
.ptr_array
= NULL
;
1066 rs
->rules
[rs_num
].inactive
.rcount
= 0;
1067 rs
->rules
[rs_num
].inactive
.open
= 0;
1068 pf_remove_if_empty_ruleset(rs
);
1073 pf_rule_copyin(struct pf_rule
*src
, struct pf_rule
*dst
, struct proc
*p
,
1076 bcopy(src
, dst
, sizeof (struct pf_rule
));
1078 dst
->label
[sizeof (dst
->label
) - 1] = '\0';
1079 dst
->ifname
[sizeof (dst
->ifname
) - 1] = '\0';
1080 dst
->qname
[sizeof (dst
->qname
) - 1] = '\0';
1081 dst
->pqname
[sizeof (dst
->pqname
) - 1] = '\0';
1082 dst
->tagname
[sizeof (dst
->tagname
) - 1] = '\0';
1083 dst
->match_tagname
[sizeof (dst
->match_tagname
) - 1] = '\0';
1084 dst
->overload_tblname
[sizeof (dst
->overload_tblname
) - 1] = '\0';
1086 dst
->cuid
= kauth_cred_getuid(p
->p_ucred
);
1087 dst
->cpid
= p
->p_pid
;
1091 dst
->overload_tbl
= NULL
;
1093 TAILQ_INIT(&dst
->rpool
.list
);
1094 dst
->rpool
.cur
= NULL
;
1096 /* initialize refcounting */
1100 dst
->entries
.tqe_prev
= NULL
;
1101 dst
->entries
.tqe_next
= NULL
;
1102 if ((uint8_t)minordev
== PFDEV_PFM
)
1103 dst
->rule_flag
|= PFRULE_PFM
;
1107 pf_rule_copyout(struct pf_rule
*src
, struct pf_rule
*dst
)
1109 bcopy(src
, dst
, sizeof (struct pf_rule
));
1113 dst
->overload_tbl
= NULL
;
1115 TAILQ_INIT(&dst
->rpool
.list
);
1116 dst
->rpool
.cur
= NULL
;
1118 dst
->entries
.tqe_prev
= NULL
;
1119 dst
->entries
.tqe_next
= NULL
;
1123 pf_state_export(struct pfsync_state
*sp
, struct pf_state_key
*sk
,
1126 uint64_t secs
= pf_time_second();
1127 bzero(sp
, sizeof (struct pfsync_state
));
1129 /* copy from state key */
1130 sp
->lan
.addr
= sk
->lan
.addr
;
1131 sp
->lan
.xport
= sk
->lan
.xport
;
1132 sp
->gwy
.addr
= sk
->gwy
.addr
;
1133 sp
->gwy
.xport
= sk
->gwy
.xport
;
1134 sp
->ext_lan
.addr
= sk
->ext_lan
.addr
;
1135 sp
->ext_lan
.xport
= sk
->ext_lan
.xport
;
1136 sp
->ext_gwy
.addr
= sk
->ext_gwy
.addr
;
1137 sp
->ext_gwy
.xport
= sk
->ext_gwy
.xport
;
1138 sp
->proto_variant
= sk
->proto_variant
;
1140 sp
->proto
= sk
->proto
;
1141 sp
->af_lan
= sk
->af_lan
;
1142 sp
->af_gwy
= sk
->af_gwy
;
1143 sp
->direction
= sk
->direction
;
1144 sp
->flowhash
= sk
->flowhash
;
1146 /* copy from state */
1147 memcpy(&sp
->id
, &s
->id
, sizeof (sp
->id
));
1148 sp
->creatorid
= s
->creatorid
;
1149 strlcpy(sp
->ifname
, s
->kif
->pfik_name
, sizeof (sp
->ifname
));
1150 pf_state_peer_to_pfsync(&s
->src
, &sp
->src
);
1151 pf_state_peer_to_pfsync(&s
->dst
, &sp
->dst
);
1153 sp
->rule
= s
->rule
.ptr
->nr
;
1154 sp
->nat_rule
= (s
->nat_rule
.ptr
== NULL
) ?
1155 (unsigned)-1 : s
->nat_rule
.ptr
->nr
;
1156 sp
->anchor
= (s
->anchor
.ptr
== NULL
) ?
1157 (unsigned)-1 : s
->anchor
.ptr
->nr
;
1159 pf_state_counter_to_pfsync(s
->bytes
[0], sp
->bytes
[0]);
1160 pf_state_counter_to_pfsync(s
->bytes
[1], sp
->bytes
[1]);
1161 pf_state_counter_to_pfsync(s
->packets
[0], sp
->packets
[0]);
1162 pf_state_counter_to_pfsync(s
->packets
[1], sp
->packets
[1]);
1163 sp
->creation
= secs
- s
->creation
;
1164 sp
->expire
= pf_state_expires(s
);
1166 sp
->allow_opts
= s
->allow_opts
;
1167 sp
->timeout
= s
->timeout
;
1170 sp
->sync_flags
|= PFSYNC_FLAG_SRCNODE
;
1171 if (s
->nat_src_node
)
1172 sp
->sync_flags
|= PFSYNC_FLAG_NATSRCNODE
;
1174 if (sp
->expire
> secs
)
1182 pf_state_import(struct pfsync_state
*sp
, struct pf_state_key
*sk
,
1185 /* copy to state key */
1186 sk
->lan
.addr
= sp
->lan
.addr
;
1187 sk
->lan
.xport
= sp
->lan
.xport
;
1188 sk
->gwy
.addr
= sp
->gwy
.addr
;
1189 sk
->gwy
.xport
= sp
->gwy
.xport
;
1190 sk
->ext_lan
.addr
= sp
->ext_lan
.addr
;
1191 sk
->ext_lan
.xport
= sp
->ext_lan
.xport
;
1192 sk
->ext_gwy
.addr
= sp
->ext_gwy
.addr
;
1193 sk
->ext_gwy
.xport
= sp
->ext_gwy
.xport
;
1194 sk
->proto_variant
= sp
->proto_variant
;
1196 sk
->proto
= sp
->proto
;
1197 sk
->af_lan
= sp
->af_lan
;
1198 sk
->af_gwy
= sp
->af_gwy
;
1199 sk
->direction
= sp
->direction
;
1200 sk
->flowhash
= pf_calc_state_key_flowhash(sk
);
1203 memcpy(&s
->id
, &sp
->id
, sizeof (sp
->id
));
1204 s
->creatorid
= sp
->creatorid
;
1205 pf_state_peer_from_pfsync(&sp
->src
, &s
->src
);
1206 pf_state_peer_from_pfsync(&sp
->dst
, &s
->dst
);
1208 s
->rule
.ptr
= &pf_default_rule
;
1209 s
->nat_rule
.ptr
= NULL
;
1210 s
->anchor
.ptr
= NULL
;
1212 s
->creation
= pf_time_second();
1213 s
->expire
= pf_time_second();
1215 s
->expire
-= pf_default_rule
.timeout
[sp
->timeout
] - sp
->expire
;
1217 s
->packets
[0] = s
->packets
[1] = 0;
1218 s
->bytes
[0] = s
->bytes
[1] = 0;
1222 pf_pooladdr_copyin(struct pf_pooladdr
*src
, struct pf_pooladdr
*dst
)
1224 bcopy(src
, dst
, sizeof (struct pf_pooladdr
));
1226 dst
->entries
.tqe_prev
= NULL
;
1227 dst
->entries
.tqe_next
= NULL
;
1228 dst
->ifname
[sizeof (dst
->ifname
) - 1] = '\0';
1233 pf_pooladdr_copyout(struct pf_pooladdr
*src
, struct pf_pooladdr
*dst
)
1235 bcopy(src
, dst
, sizeof (struct pf_pooladdr
));
1237 dst
->entries
.tqe_prev
= NULL
;
1238 dst
->entries
.tqe_next
= NULL
;
1243 pf_setup_pfsync_matching(struct pf_ruleset
*rs
)
1246 struct pf_rule
*rule
;
1248 u_int8_t digest
[PF_MD5_DIGEST_LENGTH
];
1251 for (rs_cnt
= 0; rs_cnt
< PF_RULESET_MAX
; rs_cnt
++) {
1252 /* XXX PF_RULESET_SCRUB as well? */
1253 if (rs_cnt
== PF_RULESET_SCRUB
)
1256 if (rs
->rules
[rs_cnt
].inactive
.ptr_array
)
1257 _FREE(rs
->rules
[rs_cnt
].inactive
.ptr_array
, M_TEMP
);
1258 rs
->rules
[rs_cnt
].inactive
.ptr_array
= NULL
;
1260 if (rs
->rules
[rs_cnt
].inactive
.rcount
) {
1261 rs
->rules
[rs_cnt
].inactive
.ptr_array
=
1262 _MALLOC(sizeof (caddr_t
) *
1263 rs
->rules
[rs_cnt
].inactive
.rcount
,
1266 if (!rs
->rules
[rs_cnt
].inactive
.ptr_array
)
1270 TAILQ_FOREACH(rule
, rs
->rules
[rs_cnt
].inactive
.ptr
,
1272 pf_hash_rule(&ctx
, rule
);
1273 (rs
->rules
[rs_cnt
].inactive
.ptr_array
)[rule
->nr
] = rule
;
1277 MD5Final(digest
, &ctx
);
1278 memcpy(pf_status
.pf_chksum
, digest
, sizeof (pf_status
.pf_chksum
));
1285 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1287 VERIFY(pf_is_enabled
== 0);
1290 pf_status
.running
= 1;
1291 pf_status
.since
= pf_calendar_time_second();
1292 if (pf_status
.stateid
== 0) {
1293 pf_status
.stateid
= pf_time_second();
1294 pf_status
.stateid
= pf_status
.stateid
<< 32;
1296 wakeup(pf_purge_thread_fn
);
1297 DPFPRINTF(PF_DEBUG_MISC
, ("pf: started\n"));
1303 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1305 VERIFY(pf_is_enabled
);
1307 pf_status
.running
= 0;
1309 pf_status
.since
= pf_calendar_time_second();
1310 wakeup(pf_purge_thread_fn
);
1311 DPFPRINTF(PF_DEBUG_MISC
, ("pf: stopped\n"));
1315 pfioctl(dev_t dev
, u_long cmd
, caddr_t addr
, int flags
, struct proc
*p
)
1318 int p64
= proc_is64bit(p
);
1320 int minordev
= minor(dev
);
1322 if (kauth_cred_issuser(kauth_cred_get()) == 0)
1325 /* XXX keep in sync with switch() below */
1326 if (securelevel
> 1)
1333 case DIOCSETSTATUSIF
:
1339 case DIOCINSERTRULE
:
1340 case DIOCDELETERULE
:
1341 case DIOCGETTIMEOUT
:
1342 case DIOCCLRRULECTRS
:
1347 case DIOCGETRULESETS
:
1348 case DIOCGETRULESET
:
1349 case DIOCRGETTABLES
:
1350 case DIOCRGETTSTATS
:
1351 case DIOCRCLRTSTATS
:
1357 case DIOCRGETASTATS
:
1358 case DIOCRCLRASTATS
:
1361 case DIOCGETSRCNODES
:
1362 case DIOCCLRSRCNODES
:
1363 case DIOCIGETIFACES
:
1368 case DIOCRCLRTABLES
:
1369 case DIOCRADDTABLES
:
1370 case DIOCRDELTABLES
:
1371 case DIOCRSETTFLAGS
: {
1374 bcopy(&((struct pfioc_table
*)(void *)addr
)->
1375 pfrio_flags
, &pfrio_flags
, sizeof (pfrio_flags
));
1377 if (pfrio_flags
& PFR_FLAG_DUMMY
)
1378 break; /* dummy operation ok */
1385 if (!(flags
& FWRITE
))
1391 case DIOCGETSTARTERS
:
1398 case DIOCINSERTRULE
:
1399 case DIOCDELETERULE
:
1400 case DIOCGETTIMEOUT
:
1405 case DIOCGETRULESETS
:
1406 case DIOCGETRULESET
:
1408 case DIOCRGETTABLES
:
1409 case DIOCRGETTSTATS
:
1411 case DIOCRGETASTATS
:
1414 case DIOCGETSRCNODES
:
1415 case DIOCIGETIFACES
:
1418 case DIOCRCLRTABLES
:
1419 case DIOCRADDTABLES
:
1420 case DIOCRDELTABLES
:
1421 case DIOCRCLRTSTATS
:
1426 case DIOCRSETTFLAGS
: {
1429 bcopy(&((struct pfioc_table
*)(void *)addr
)->
1430 pfrio_flags
, &pfrio_flags
, sizeof (pfrio_flags
));
1432 if (pfrio_flags
& PFR_FLAG_DUMMY
) {
1433 flags
|= FWRITE
; /* need write lock for dummy */
1434 break; /* dummy operation ok */
1441 bcopy(&((struct pfioc_rule
*)(void *)addr
)->action
,
1442 &action
, sizeof (action
));
1444 if (action
== PF_GET_CLR_CNTR
)
1453 lck_rw_lock_exclusive(pf_perim_lock
);
1455 lck_rw_lock_shared(pf_perim_lock
);
1457 lck_mtx_lock(pf_lock
);
1462 if (pf_status
.running
) {
1464 * Increment the reference for a simple -e enable, so
1465 * that even if other processes drop their references,
1466 * pf will still be available to processes that turned
1467 * it on without taking a reference
1469 if (nr_tokens
== pf_enabled_ref_count
) {
1470 pf_enabled_ref_count
++;
1471 VERIFY(pf_enabled_ref_count
!= 0);
1474 } else if (pf_purge_thread
== NULL
) {
1478 pf_enabled_ref_count
++;
1479 VERIFY(pf_enabled_ref_count
!= 0);
1483 case DIOCSTARTREF
: /* u_int64_t */
1484 if (pf_purge_thread
== NULL
) {
1489 /* small enough to be on stack */
1490 if ((token
= generate_token(p
)) != 0) {
1491 if (pf_is_enabled
== 0) {
1494 pf_enabled_ref_count
++;
1495 VERIFY(pf_enabled_ref_count
!= 0);
1498 DPFPRINTF(PF_DEBUG_URGENT
,
1499 ("pf: unable to generate token\n"));
1501 bcopy(&token
, addr
, sizeof (token
));
1506 if (!pf_status
.running
) {
1510 pf_enabled_ref_count
= 0;
1511 invalidate_all_tokens();
1515 case DIOCSTOPREF
: /* struct pfioc_remove_token */
1516 if (!pf_status
.running
) {
1519 struct pfioc_remove_token pfrt
;
1521 /* small enough to be on stack */
1522 bcopy(addr
, &pfrt
, sizeof (pfrt
));
1523 if ((error
= remove_token(&pfrt
)) == 0) {
1524 VERIFY(pf_enabled_ref_count
!= 0);
1525 pf_enabled_ref_count
--;
1526 /* return currently held references */
1527 pfrt
.refcount
= pf_enabled_ref_count
;
1528 DPFPRINTF(PF_DEBUG_MISC
,
1529 ("pf: enabled refcount decremented\n"));
1532 DPFPRINTF(PF_DEBUG_URGENT
,
1533 ("pf: token mismatch\n"));
1535 bcopy(&pfrt
, addr
, sizeof (pfrt
));
1537 if (error
== 0 && pf_enabled_ref_count
== 0)
1542 case DIOCGETSTARTERS
: { /* struct pfioc_tokens */
1543 PFIOCX_STRUCT_DECL(pfioc_tokens
);
1545 PFIOCX_STRUCT_BEGIN(addr
, pfioc_tokens
, error
= ENOMEM
; break;);
1546 error
= pfioctl_ioc_tokens(cmd
,
1547 PFIOCX_STRUCT_ADDR32(pfioc_tokens
),
1548 PFIOCX_STRUCT_ADDR64(pfioc_tokens
), p
);
1549 PFIOCX_STRUCT_END(pfioc_tokens
, addr
);
1553 case DIOCADDRULE
: /* struct pfioc_rule */
1554 case DIOCGETRULES
: /* struct pfioc_rule */
1555 case DIOCGETRULE
: /* struct pfioc_rule */
1556 case DIOCCHANGERULE
: /* struct pfioc_rule */
1557 case DIOCINSERTRULE
: /* struct pfioc_rule */
1558 case DIOCDELETERULE
: { /* struct pfioc_rule */
1559 struct pfioc_rule
*pr
= NULL
;
1561 PFIOC_STRUCT_BEGIN(addr
, pr
, error
= ENOMEM
; break;);
1562 error
= pfioctl_ioc_rule(cmd
, minordev
, pr
, p
);
1563 PFIOC_STRUCT_END(pr
, addr
);
1567 case DIOCCLRSTATES
: /* struct pfioc_state_kill */
1568 case DIOCKILLSTATES
: { /* struct pfioc_state_kill */
1569 struct pfioc_state_kill
*psk
= NULL
;
1571 PFIOC_STRUCT_BEGIN(addr
, psk
, error
= ENOMEM
; break;);
1572 error
= pfioctl_ioc_state_kill(cmd
, psk
, p
);
1573 PFIOC_STRUCT_END(psk
, addr
);
1577 case DIOCADDSTATE
: /* struct pfioc_state */
1578 case DIOCGETSTATE
: { /* struct pfioc_state */
1579 struct pfioc_state
*ps
= NULL
;
1581 PFIOC_STRUCT_BEGIN(addr
, ps
, error
= ENOMEM
; break;);
1582 error
= pfioctl_ioc_state(cmd
, ps
, p
);
1583 PFIOC_STRUCT_END(ps
, addr
);
1587 case DIOCGETSTATES
: { /* struct pfioc_states */
1588 PFIOCX_STRUCT_DECL(pfioc_states
);
1590 PFIOCX_STRUCT_BEGIN(addr
, pfioc_states
, error
= ENOMEM
; break;);
1591 error
= pfioctl_ioc_states(cmd
,
1592 PFIOCX_STRUCT_ADDR32(pfioc_states
),
1593 PFIOCX_STRUCT_ADDR64(pfioc_states
), p
);
1594 PFIOCX_STRUCT_END(pfioc_states
, addr
);
1598 case DIOCGETSTATUS
: { /* struct pf_status */
1599 struct pf_status
*s
= NULL
;
1601 PFIOC_STRUCT_BEGIN(&pf_status
, s
, error
= ENOMEM
; break;);
1602 pfi_update_status(s
->ifname
, s
);
1603 PFIOC_STRUCT_END(s
, addr
);
1607 case DIOCSETSTATUSIF
: { /* struct pfioc_if */
1608 struct pfioc_if
*pi
= (struct pfioc_if
*)(void *)addr
;
1610 /* OK for unaligned accesses */
1611 if (pi
->ifname
[0] == 0) {
1612 bzero(pf_status
.ifname
, IFNAMSIZ
);
1615 strlcpy(pf_status
.ifname
, pi
->ifname
, IFNAMSIZ
);
1619 case DIOCCLRSTATUS
: {
1620 bzero(pf_status
.counters
, sizeof (pf_status
.counters
));
1621 bzero(pf_status
.fcounters
, sizeof (pf_status
.fcounters
));
1622 bzero(pf_status
.scounters
, sizeof (pf_status
.scounters
));
1623 pf_status
.since
= pf_calendar_time_second();
1624 if (*pf_status
.ifname
)
1625 pfi_update_status(pf_status
.ifname
, NULL
);
1629 case DIOCNATLOOK
: { /* struct pfioc_natlook */
1630 struct pfioc_natlook
*pnl
= NULL
;
1632 PFIOC_STRUCT_BEGIN(addr
, pnl
, error
= ENOMEM
; break;);
1633 error
= pfioctl_ioc_natlook(cmd
, pnl
, p
);
1634 PFIOC_STRUCT_END(pnl
, addr
);
1638 case DIOCSETTIMEOUT
: /* struct pfioc_tm */
1639 case DIOCGETTIMEOUT
: { /* struct pfioc_tm */
1642 /* small enough to be on stack */
1643 bcopy(addr
, &pt
, sizeof (pt
));
1644 error
= pfioctl_ioc_tm(cmd
, &pt
, p
);
1645 bcopy(&pt
, addr
, sizeof (pt
));
1649 case DIOCGETLIMIT
: /* struct pfioc_limit */
1650 case DIOCSETLIMIT
: { /* struct pfioc_limit */
1651 struct pfioc_limit pl
;
1653 /* small enough to be on stack */
1654 bcopy(addr
, &pl
, sizeof (pl
));
1655 error
= pfioctl_ioc_limit(cmd
, &pl
, p
);
1656 bcopy(&pl
, addr
, sizeof (pl
));
1660 case DIOCSETDEBUG
: { /* u_int32_t */
1661 bcopy(addr
, &pf_status
.debug
, sizeof (u_int32_t
));
1665 case DIOCCLRRULECTRS
: {
1666 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1667 struct pf_ruleset
*ruleset
= &pf_main_ruleset
;
1668 struct pf_rule
*rule
;
1671 ruleset
->rules
[PF_RULESET_FILTER
].active
.ptr
, entries
) {
1672 rule
->evaluations
= 0;
1673 rule
->packets
[0] = rule
->packets
[1] = 0;
1674 rule
->bytes
[0] = rule
->bytes
[1] = 0;
1679 case DIOCGIFSPEED
: {
1680 struct pf_ifspeed
*psp
= (struct pf_ifspeed
*)(void *)addr
;
1681 struct pf_ifspeed ps
;
1685 if (psp
->ifname
[0] != '\0') {
1686 /* Can we completely trust user-land? */
1687 strlcpy(ps
.ifname
, psp
->ifname
, IFNAMSIZ
);
1688 ps
.ifname
[IFNAMSIZ
- 1] = '\0';
1689 ifp
= ifunit(ps
.ifname
);
1691 baudrate
= ifp
->if_output_bw
.max_bw
;
1692 bcopy(&baudrate
, &psp
->baudrate
,
1703 case DIOCBEGINADDRS
: /* struct pfioc_pooladdr */
1704 case DIOCADDADDR
: /* struct pfioc_pooladdr */
1705 case DIOCGETADDRS
: /* struct pfioc_pooladdr */
1706 case DIOCGETADDR
: /* struct pfioc_pooladdr */
1707 case DIOCCHANGEADDR
: { /* struct pfioc_pooladdr */
1708 struct pfioc_pooladdr
*pp
= NULL
;
1710 PFIOC_STRUCT_BEGIN(addr
, pp
, error
= ENOMEM
; break;)
1711 error
= pfioctl_ioc_pooladdr(cmd
, pp
, p
);
1712 PFIOC_STRUCT_END(pp
, addr
);
1716 case DIOCGETRULESETS
: /* struct pfioc_ruleset */
1717 case DIOCGETRULESET
: { /* struct pfioc_ruleset */
1718 struct pfioc_ruleset
*pr
= NULL
;
1720 PFIOC_STRUCT_BEGIN(addr
, pr
, error
= ENOMEM
; break;);
1721 error
= pfioctl_ioc_ruleset(cmd
, pr
, p
);
1722 PFIOC_STRUCT_END(pr
, addr
);
1726 case DIOCRCLRTABLES
: /* struct pfioc_table */
1727 case DIOCRADDTABLES
: /* struct pfioc_table */
1728 case DIOCRDELTABLES
: /* struct pfioc_table */
1729 case DIOCRGETTABLES
: /* struct pfioc_table */
1730 case DIOCRGETTSTATS
: /* struct pfioc_table */
1731 case DIOCRCLRTSTATS
: /* struct pfioc_table */
1732 case DIOCRSETTFLAGS
: /* struct pfioc_table */
1733 case DIOCRCLRADDRS
: /* struct pfioc_table */
1734 case DIOCRADDADDRS
: /* struct pfioc_table */
1735 case DIOCRDELADDRS
: /* struct pfioc_table */
1736 case DIOCRSETADDRS
: /* struct pfioc_table */
1737 case DIOCRGETADDRS
: /* struct pfioc_table */
1738 case DIOCRGETASTATS
: /* struct pfioc_table */
1739 case DIOCRCLRASTATS
: /* struct pfioc_table */
1740 case DIOCRTSTADDRS
: /* struct pfioc_table */
1741 case DIOCRINADEFINE
: { /* struct pfioc_table */
1742 PFIOCX_STRUCT_DECL(pfioc_table
);
1744 PFIOCX_STRUCT_BEGIN(addr
, pfioc_table
, error
= ENOMEM
; break;);
1745 error
= pfioctl_ioc_table(cmd
,
1746 PFIOCX_STRUCT_ADDR32(pfioc_table
),
1747 PFIOCX_STRUCT_ADDR64(pfioc_table
), p
);
1748 PFIOCX_STRUCT_END(pfioc_table
, addr
);
1752 case DIOCOSFPADD
: /* struct pf_osfp_ioctl */
1753 case DIOCOSFPGET
: { /* struct pf_osfp_ioctl */
1754 struct pf_osfp_ioctl
*io
= NULL
;
1756 PFIOC_STRUCT_BEGIN(addr
, io
, error
= ENOMEM
; break;);
1757 if (cmd
== DIOCOSFPADD
) {
1758 error
= pf_osfp_add(io
);
1760 VERIFY(cmd
== DIOCOSFPGET
);
1761 error
= pf_osfp_get(io
);
1763 PFIOC_STRUCT_END(io
, addr
);
1767 case DIOCXBEGIN
: /* struct pfioc_trans */
1768 case DIOCXROLLBACK
: /* struct pfioc_trans */
1769 case DIOCXCOMMIT
: { /* struct pfioc_trans */
1770 PFIOCX_STRUCT_DECL(pfioc_trans
);
1772 PFIOCX_STRUCT_BEGIN(addr
, pfioc_trans
, error
= ENOMEM
; break;);
1773 error
= pfioctl_ioc_trans(cmd
,
1774 PFIOCX_STRUCT_ADDR32(pfioc_trans
),
1775 PFIOCX_STRUCT_ADDR64(pfioc_trans
), p
);
1776 PFIOCX_STRUCT_END(pfioc_trans
, addr
);
1780 case DIOCGETSRCNODES
: { /* struct pfioc_src_nodes */
1781 PFIOCX_STRUCT_DECL(pfioc_src_nodes
);
1783 PFIOCX_STRUCT_BEGIN(addr
, pfioc_src_nodes
,
1784 error
= ENOMEM
; break;);
1785 error
= pfioctl_ioc_src_nodes(cmd
,
1786 PFIOCX_STRUCT_ADDR32(pfioc_src_nodes
),
1787 PFIOCX_STRUCT_ADDR64(pfioc_src_nodes
), p
);
1788 PFIOCX_STRUCT_END(pfioc_src_nodes
, addr
);
1792 case DIOCCLRSRCNODES
: {
1793 struct pf_src_node
*n
;
1794 struct pf_state
*state
;
1796 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
1797 state
->src_node
= NULL
;
1798 state
->nat_src_node
= NULL
;
1800 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
) {
1804 pf_purge_expired_src_nodes();
1805 pf_status
.src_nodes
= 0;
1809 case DIOCKILLSRCNODES
: { /* struct pfioc_src_node_kill */
1810 struct pfioc_src_node_kill
*psnk
= NULL
;
1812 PFIOC_STRUCT_BEGIN(addr
, psnk
, error
= ENOMEM
; break;);
1813 error
= pfioctl_ioc_src_node_kill(cmd
, psnk
, p
);
1814 PFIOC_STRUCT_END(psnk
, addr
);
1818 case DIOCSETHOSTID
: { /* u_int32_t */
1821 /* small enough to be on stack */
1822 bcopy(addr
, &hid
, sizeof (hid
));
1824 pf_status
.hostid
= random();
1826 pf_status
.hostid
= hid
;
1834 case DIOCIGETIFACES
: /* struct pfioc_iface */
1835 case DIOCSETIFFLAG
: /* struct pfioc_iface */
1836 case DIOCCLRIFFLAG
: { /* struct pfioc_iface */
1837 PFIOCX_STRUCT_DECL(pfioc_iface
);
1839 PFIOCX_STRUCT_BEGIN(addr
, pfioc_iface
, error
= ENOMEM
; break;);
1840 error
= pfioctl_ioc_iface(cmd
,
1841 PFIOCX_STRUCT_ADDR32(pfioc_iface
),
1842 PFIOCX_STRUCT_ADDR64(pfioc_iface
), p
);
1843 PFIOCX_STRUCT_END(pfioc_iface
, addr
);
1852 lck_mtx_unlock(pf_lock
);
1853 lck_rw_done(pf_perim_lock
);
1859 pfioctl_ioc_table(u_long cmd
, struct pfioc_table_32
*io32
,
1860 struct pfioc_table_64
*io64
, struct proc
*p
)
1862 int p64
= proc_is64bit(p
);
1869 * 64-bit structure processing
1872 case DIOCRCLRTABLES
:
1873 if (io64
->pfrio_esize
!= 0) {
1877 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
1878 error
= pfr_clr_tables(&io64
->pfrio_table
, &io64
->pfrio_ndel
,
1879 io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
1882 case DIOCRADDTABLES
:
1883 if (io64
->pfrio_esize
!= sizeof (struct pfr_table
)) {
1887 error
= pfr_add_tables(io64
->pfrio_buffer
, io64
->pfrio_size
,
1888 &io64
->pfrio_nadd
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
1891 case DIOCRDELTABLES
:
1892 if (io64
->pfrio_esize
!= sizeof (struct pfr_table
)) {
1896 error
= pfr_del_tables(io64
->pfrio_buffer
, io64
->pfrio_size
,
1897 &io64
->pfrio_ndel
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
1900 case DIOCRGETTABLES
:
1901 if (io64
->pfrio_esize
!= sizeof (struct pfr_table
)) {
1905 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
1906 error
= pfr_get_tables(&io64
->pfrio_table
, io64
->pfrio_buffer
,
1907 &io64
->pfrio_size
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
1910 case DIOCRGETTSTATS
:
1911 if (io64
->pfrio_esize
!= sizeof (struct pfr_tstats
)) {
1915 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
1916 error
= pfr_get_tstats(&io64
->pfrio_table
, io64
->pfrio_buffer
,
1917 &io64
->pfrio_size
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
1920 case DIOCRCLRTSTATS
:
1921 if (io64
->pfrio_esize
!= sizeof (struct pfr_table
)) {
1925 error
= pfr_clr_tstats(io64
->pfrio_buffer
, io64
->pfrio_size
,
1926 &io64
->pfrio_nzero
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
1929 case DIOCRSETTFLAGS
:
1930 if (io64
->pfrio_esize
!= sizeof (struct pfr_table
)) {
1934 error
= pfr_set_tflags(io64
->pfrio_buffer
, io64
->pfrio_size
,
1935 io64
->pfrio_setflag
, io64
->pfrio_clrflag
,
1936 &io64
->pfrio_nchange
, &io64
->pfrio_ndel
,
1937 io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
1941 if (io64
->pfrio_esize
!= 0) {
1945 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
1946 error
= pfr_clr_addrs(&io64
->pfrio_table
, &io64
->pfrio_ndel
,
1947 io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
1951 if (io64
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
1955 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
1956 error
= pfr_add_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
,
1957 io64
->pfrio_size
, &io64
->pfrio_nadd
, io64
->pfrio_flags
|
1958 PFR_FLAG_USERIOCTL
);
1962 if (io64
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
1966 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
1967 error
= pfr_del_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
,
1968 io64
->pfrio_size
, &io64
->pfrio_ndel
, io64
->pfrio_flags
|
1969 PFR_FLAG_USERIOCTL
);
1973 if (io64
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
1977 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
1978 error
= pfr_set_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
,
1979 io64
->pfrio_size
, &io64
->pfrio_size2
, &io64
->pfrio_nadd
,
1980 &io64
->pfrio_ndel
, &io64
->pfrio_nchange
, io64
->pfrio_flags
|
1981 PFR_FLAG_USERIOCTL
, 0);
1985 if (io64
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
1989 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
1990 error
= pfr_get_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
,
1991 &io64
->pfrio_size
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
1994 case DIOCRGETASTATS
:
1995 if (io64
->pfrio_esize
!= sizeof (struct pfr_astats
)) {
1999 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2000 error
= pfr_get_astats(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2001 &io64
->pfrio_size
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2004 case DIOCRCLRASTATS
:
2005 if (io64
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2009 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2010 error
= pfr_clr_astats(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2011 io64
->pfrio_size
, &io64
->pfrio_nzero
, io64
->pfrio_flags
|
2012 PFR_FLAG_USERIOCTL
);
2016 if (io64
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2020 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2021 error
= pfr_tst_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2022 io64
->pfrio_size
, &io64
->pfrio_nmatch
, io64
->pfrio_flags
|
2023 PFR_FLAG_USERIOCTL
);
2026 case DIOCRINADEFINE
:
2027 if (io64
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2031 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2032 error
= pfr_ina_define(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2033 io64
->pfrio_size
, &io64
->pfrio_nadd
, &io64
->pfrio_naddr
,
2034 io64
->pfrio_ticket
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2045 * 32-bit structure processing
2048 case DIOCRCLRTABLES
:
2049 if (io32
->pfrio_esize
!= 0) {
2053 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2054 error
= pfr_clr_tables(&io32
->pfrio_table
, &io32
->pfrio_ndel
,
2055 io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2058 case DIOCRADDTABLES
:
2059 if (io32
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2063 error
= pfr_add_tables(io32
->pfrio_buffer
, io32
->pfrio_size
,
2064 &io32
->pfrio_nadd
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2067 case DIOCRDELTABLES
:
2068 if (io32
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2072 error
= pfr_del_tables(io32
->pfrio_buffer
, io32
->pfrio_size
,
2073 &io32
->pfrio_ndel
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2076 case DIOCRGETTABLES
:
2077 if (io32
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2081 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2082 error
= pfr_get_tables(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2083 &io32
->pfrio_size
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2086 case DIOCRGETTSTATS
:
2087 if (io32
->pfrio_esize
!= sizeof (struct pfr_tstats
)) {
2091 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2092 error
= pfr_get_tstats(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2093 &io32
->pfrio_size
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2096 case DIOCRCLRTSTATS
:
2097 if (io32
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2101 error
= pfr_clr_tstats(io32
->pfrio_buffer
, io32
->pfrio_size
,
2102 &io32
->pfrio_nzero
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2105 case DIOCRSETTFLAGS
:
2106 if (io32
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2110 error
= pfr_set_tflags(io32
->pfrio_buffer
, io32
->pfrio_size
,
2111 io32
->pfrio_setflag
, io32
->pfrio_clrflag
,
2112 &io32
->pfrio_nchange
, &io32
->pfrio_ndel
,
2113 io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2117 if (io32
->pfrio_esize
!= 0) {
2121 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2122 error
= pfr_clr_addrs(&io32
->pfrio_table
, &io32
->pfrio_ndel
,
2123 io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2127 if (io32
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2131 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2132 error
= pfr_add_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2133 io32
->pfrio_size
, &io32
->pfrio_nadd
, io32
->pfrio_flags
|
2134 PFR_FLAG_USERIOCTL
);
2138 if (io32
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2142 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2143 error
= pfr_del_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2144 io32
->pfrio_size
, &io32
->pfrio_ndel
, io32
->pfrio_flags
|
2145 PFR_FLAG_USERIOCTL
);
2149 if (io32
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2153 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2154 error
= pfr_set_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2155 io32
->pfrio_size
, &io32
->pfrio_size2
, &io32
->pfrio_nadd
,
2156 &io32
->pfrio_ndel
, &io32
->pfrio_nchange
, io32
->pfrio_flags
|
2157 PFR_FLAG_USERIOCTL
, 0);
2161 if (io32
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2165 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2166 error
= pfr_get_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2167 &io32
->pfrio_size
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2170 case DIOCRGETASTATS
:
2171 if (io32
->pfrio_esize
!= sizeof (struct pfr_astats
)) {
2175 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2176 error
= pfr_get_astats(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2177 &io32
->pfrio_size
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2180 case DIOCRCLRASTATS
:
2181 if (io32
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2185 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2186 error
= pfr_clr_astats(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2187 io32
->pfrio_size
, &io32
->pfrio_nzero
, io32
->pfrio_flags
|
2188 PFR_FLAG_USERIOCTL
);
2192 if (io32
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2196 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2197 error
= pfr_tst_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2198 io32
->pfrio_size
, &io32
->pfrio_nmatch
, io32
->pfrio_flags
|
2199 PFR_FLAG_USERIOCTL
);
2202 case DIOCRINADEFINE
:
2203 if (io32
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2207 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2208 error
= pfr_ina_define(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2209 io32
->pfrio_size
, &io32
->pfrio_nadd
, &io32
->pfrio_naddr
,
2210 io32
->pfrio_ticket
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2223 pfioctl_ioc_tokens(u_long cmd
, struct pfioc_tokens_32
*tok32
,
2224 struct pfioc_tokens_64
*tok64
, struct proc
*p
)
2226 struct pfioc_token
*tokens
;
2227 struct pfioc_kernel_token
*entry
, *tmp
;
2228 user_addr_t token_buf
;
2229 int ocnt
, cnt
, error
= 0, p64
= proc_is64bit(p
);
2233 case DIOCGETSTARTERS
: {
2236 if (nr_tokens
== 0) {
2241 size
= sizeof (struct pfioc_token
) * nr_tokens
;
2242 ocnt
= cnt
= (p64
? tok64
->size
: tok32
->size
);
2251 token_buf
= (p64
? tok64
->pgt_buf
: tok32
->pgt_buf
);
2252 tokens
= _MALLOC(size
, M_TEMP
, M_WAITOK
|M_ZERO
);
2253 if (tokens
== NULL
) {
2258 ptr
= (void *)tokens
;
2259 SLIST_FOREACH_SAFE(entry
, &token_list_head
, next
, tmp
) {
2260 struct pfioc_token
*t
;
2262 if ((unsigned)cnt
< sizeof (*tokens
))
2263 break; /* no more buffer space left */
2265 t
= (struct pfioc_token
*)(void *)ptr
;
2266 t
->token_value
= entry
->token
.token_value
;
2267 t
->timestamp
= entry
->token
.timestamp
;
2268 t
->pid
= entry
->token
.pid
;
2269 bcopy(entry
->token
.proc_name
, t
->proc_name
,
2270 PFTOK_PROCNAME_LEN
);
2271 ptr
+= sizeof (struct pfioc_token
);
2273 cnt
-= sizeof (struct pfioc_token
);
2277 error
= copyout(tokens
, token_buf
, ocnt
- cnt
);
2280 tok64
->size
= ocnt
- cnt
;
2282 tok32
->size
= ocnt
- cnt
;
2284 _FREE(tokens
, M_TEMP
);
2297 pf_expire_states_and_src_nodes(struct pf_rule
*rule
)
2299 struct pf_state
*state
;
2300 struct pf_src_node
*sn
;
2303 /* expire the states */
2304 state
= TAILQ_FIRST(&state_list
);
2306 if (state
->rule
.ptr
== rule
)
2307 state
->timeout
= PFTM_PURGE
;
2308 state
= TAILQ_NEXT(state
, entry_list
);
2310 pf_purge_expired_states(pf_status
.states
);
2312 /* expire the src_nodes */
2313 RB_FOREACH(sn
, pf_src_tree
, &tree_src_tracking
) {
2314 if (sn
->rule
.ptr
!= rule
)
2316 if (sn
->states
!= 0) {
2317 RB_FOREACH(state
, pf_state_tree_id
,
2319 if (state
->src_node
== sn
)
2320 state
->src_node
= NULL
;
2321 if (state
->nat_src_node
== sn
)
2322 state
->nat_src_node
= NULL
;
2330 pf_purge_expired_src_nodes();
2334 pf_delete_rule_from_ruleset(struct pf_ruleset
*ruleset
, int rs_num
,
2335 struct pf_rule
*rule
)
2340 pf_expire_states_and_src_nodes(rule
);
2342 pf_rm_rule(ruleset
->rules
[rs_num
].active
.ptr
, rule
);
2343 if (ruleset
->rules
[rs_num
].active
.rcount
-- == 0)
2344 panic("%s: rcount value broken!", __func__
);
2345 r
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
2349 r
= TAILQ_NEXT(r
, entries
);
2355 pf_ruleset_cleanup(struct pf_ruleset
*ruleset
, int rs
)
2357 pf_calc_skip_steps(ruleset
->rules
[rs
].active
.ptr
);
2358 ruleset
->rules
[rs
].active
.ticket
=
2359 ++ruleset
->rules
[rs
].inactive
.ticket
;
2363 * req_dev encodes the PF interface. Currently, possible values are
2367 pf_delete_rule_by_ticket(struct pfioc_rule
*pr
, u_int32_t req_dev
)
2369 struct pf_ruleset
*ruleset
;
2370 struct pf_rule
*rule
= NULL
;
2375 is_anchor
= (pr
->anchor_call
[0] != '\0');
2376 if ((ruleset
= pf_find_ruleset_with_owner(pr
->anchor
,
2377 pr
->rule
.owner
, is_anchor
, &error
)) == NULL
)
2380 for (i
= 0; i
< PF_RULESET_MAX
&& rule
== NULL
; i
++) {
2381 rule
= TAILQ_FIRST(ruleset
->rules
[i
].active
.ptr
);
2382 while (rule
&& (rule
->ticket
!= pr
->rule
.ticket
))
2383 rule
= TAILQ_NEXT(rule
, entries
);
2390 if (strcmp(rule
->owner
, pr
->rule
.owner
))
2394 if (rule
->anchor
&& (ruleset
!= &pf_main_ruleset
) &&
2395 ((strcmp(ruleset
->anchor
->owner
, "")) == 0) &&
2396 ((ruleset
->rules
[i
].active
.rcount
- 1) == 0)) {
2397 /* set rule & ruleset to parent and repeat */
2398 struct pf_rule
*delete_rule
= rule
;
2399 struct pf_ruleset
*delete_ruleset
= ruleset
;
2401 #define parent_ruleset ruleset->anchor->parent->ruleset
2402 if (ruleset
->anchor
->parent
== NULL
)
2403 ruleset
= &pf_main_ruleset
;
2405 ruleset
= &parent_ruleset
;
2407 rule
= TAILQ_FIRST(ruleset
->rules
[i
].active
.ptr
);
2409 (rule
->anchor
!= delete_ruleset
->anchor
))
2410 rule
= TAILQ_NEXT(rule
, entries
);
2412 panic("%s: rule not found!", __func__
);
2415 * if reqest device != rule's device, bail :
2416 * with error if ticket matches;
2417 * without error if ticket doesn't match (i.e. its just cleanup)
2419 if ((rule
->rule_flag
& PFRULE_PFM
) ^ req_dev
) {
2420 if (rule
->ticket
!= pr
->rule
.ticket
) {
2427 if (delete_rule
->rule_flag
& PFRULE_PFM
) {
2431 pf_delete_rule_from_ruleset(delete_ruleset
,
2433 delete_ruleset
->rules
[i
].active
.ticket
=
2434 ++delete_ruleset
->rules
[i
].inactive
.ticket
;
2438 * process deleting rule only if device that added the
2439 * rule matches device that issued the request
2441 if ((rule
->rule_flag
& PFRULE_PFM
) ^ req_dev
)
2443 if (rule
->rule_flag
& PFRULE_PFM
)
2445 pf_delete_rule_from_ruleset(ruleset
, i
,
2447 pf_ruleset_cleanup(ruleset
, i
);
2454 * req_dev encodes the PF interface. Currently, possible values are
2458 pf_delete_rule_by_owner(char *owner
, u_int32_t req_dev
)
2460 struct pf_ruleset
*ruleset
;
2461 struct pf_rule
*rule
, *next
;
2464 for (int rs
= 0; rs
< PF_RULESET_MAX
; rs
++) {
2465 rule
= TAILQ_FIRST(pf_main_ruleset
.rules
[rs
].active
.ptr
);
2466 ruleset
= &pf_main_ruleset
;
2468 next
= TAILQ_NEXT(rule
, entries
);
2470 * process deleting rule only if device that added the
2471 * rule matches device that issued the request
2473 if ((rule
->rule_flag
& PFRULE_PFM
) ^ req_dev
) {
2478 if (((strcmp(rule
->owner
, owner
)) == 0) ||
2479 ((strcmp(rule
->owner
, "")) == 0)) {
2480 if (rule
->anchor
->ruleset
.rules
[rs
].active
.rcount
> 0) {
2482 pf_ruleset_cleanup(ruleset
, rs
);
2485 /* step into anchor */
2487 &rule
->anchor
->ruleset
;
2488 rule
= TAILQ_FIRST(ruleset
->rules
[rs
].active
.ptr
);
2491 if (rule
->rule_flag
&
2494 pf_delete_rule_from_ruleset(ruleset
, rs
, rule
);
2501 if (((strcmp(rule
->owner
, owner
)) == 0)) {
2503 if (rule
->rule_flag
& PFRULE_PFM
)
2505 pf_delete_rule_from_ruleset(ruleset
,
2513 pf_ruleset_cleanup(ruleset
, rs
);
2516 if (ruleset
!= &pf_main_ruleset
)
2517 pf_deleterule_anchor_step_out(&ruleset
,
2525 pf_deleterule_anchor_step_out(struct pf_ruleset
**ruleset_ptr
,
2526 int rs
, struct pf_rule
**rule_ptr
)
2528 struct pf_ruleset
*ruleset
= *ruleset_ptr
;
2529 struct pf_rule
*rule
= *rule_ptr
;
2531 /* step out of anchor */
2532 struct pf_ruleset
*rs_copy
= ruleset
;
2533 ruleset
= ruleset
->anchor
->parent
?
2534 &ruleset
->anchor
->parent
->ruleset
:&pf_main_ruleset
;
2536 rule
= TAILQ_FIRST(ruleset
->rules
[rs
].active
.ptr
);
2537 while (rule
&& (rule
->anchor
!= rs_copy
->anchor
))
2538 rule
= TAILQ_NEXT(rule
, entries
);
2540 panic("%s: parent rule of anchor not found!", __func__
);
2541 if (rule
->anchor
->ruleset
.rules
[rs
].active
.rcount
> 0)
2542 rule
= TAILQ_NEXT(rule
, entries
);
2544 *ruleset_ptr
= ruleset
;
2549 pf_addrwrap_setup(struct pf_addr_wrap
*aw
)
2552 bzero(&aw
->p
, sizeof aw
->p
);
2556 pf_rule_setup(struct pfioc_rule
*pr
, struct pf_rule
*rule
,
2557 struct pf_ruleset
*ruleset
) {
2558 struct pf_pooladdr
*apa
;
2561 if (rule
->ifname
[0]) {
2562 rule
->kif
= pfi_kif_get(rule
->ifname
);
2563 if (rule
->kif
== NULL
) {
2564 pool_put(&pf_rule_pl
, rule
);
2567 pfi_kif_ref(rule
->kif
, PFI_KIF_REF_RULE
);
2569 if (rule
->tagname
[0])
2570 if ((rule
->tag
= pf_tagname2tag(rule
->tagname
)) == 0)
2572 if (rule
->match_tagname
[0])
2573 if ((rule
->match_tag
=
2574 pf_tagname2tag(rule
->match_tagname
)) == 0)
2576 if (rule
->rt
&& !rule
->direction
)
2581 if (rule
->logif
>= PFLOGIFS_MAX
)
2584 pf_addrwrap_setup(&rule
->src
.addr
);
2585 pf_addrwrap_setup(&rule
->dst
.addr
);
2586 if (pf_rtlabel_add(&rule
->src
.addr
) ||
2587 pf_rtlabel_add(&rule
->dst
.addr
))
2589 if (pfi_dynaddr_setup(&rule
->src
.addr
, rule
->af
))
2591 if (pfi_dynaddr_setup(&rule
->dst
.addr
, rule
->af
))
2593 if (pf_tbladdr_setup(ruleset
, &rule
->src
.addr
))
2595 if (pf_tbladdr_setup(ruleset
, &rule
->dst
.addr
))
2597 if (pf_anchor_setup(rule
, ruleset
, pr
->anchor_call
))
2599 TAILQ_FOREACH(apa
, &pf_pabuf
, entries
)
2600 if (pf_tbladdr_setup(ruleset
, &apa
->addr
))
2603 if (rule
->overload_tblname
[0]) {
2604 if ((rule
->overload_tbl
= pfr_attach_table(ruleset
,
2605 rule
->overload_tblname
)) == NULL
)
2608 rule
->overload_tbl
->pfrkt_flags
|=
2612 pf_mv_pool(&pf_pabuf
, &rule
->rpool
.list
);
2614 if (((((rule
->action
== PF_NAT
) || (rule
->action
== PF_RDR
) ||
2615 (rule
->action
== PF_BINAT
) || (rule
->action
== PF_NAT64
)) &&
2616 rule
->anchor
== NULL
) ||
2617 (rule
->rt
> PF_FASTROUTE
)) &&
2618 (TAILQ_FIRST(&rule
->rpool
.list
) == NULL
))
2622 pf_rm_rule(NULL
, rule
);
2625 /* For a NAT64 rule the rule's address family is AF_INET6 whereas
2626 * the address pool's family will be AF_INET
2628 rule
->rpool
.af
= (rule
->action
== PF_NAT64
) ? AF_INET
: rule
->af
;
2629 rule
->rpool
.cur
= TAILQ_FIRST(&rule
->rpool
.list
);
2630 rule
->evaluations
= rule
->packets
[0] = rule
->packets
[1] =
2631 rule
->bytes
[0] = rule
->bytes
[1] = 0;
2637 pfioctl_ioc_rule(u_long cmd
, int minordev
, struct pfioc_rule
*pr
, struct proc
*p
)
2640 u_int32_t req_dev
= 0;
2644 struct pf_ruleset
*ruleset
;
2645 struct pf_rule
*rule
, *tail
;
2648 pr
->anchor
[sizeof (pr
->anchor
) - 1] = '\0';
2649 pr
->anchor_call
[sizeof (pr
->anchor_call
) - 1] = '\0';
2650 ruleset
= pf_find_ruleset(pr
->anchor
);
2651 if (ruleset
== NULL
) {
2655 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
2656 if (rs_num
>= PF_RULESET_MAX
) {
2660 if (pr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
2664 if (pr
->ticket
!= ruleset
->rules
[rs_num
].inactive
.ticket
) {
2668 if (pr
->pool_ticket
!= ticket_pabuf
) {
2672 rule
= pool_get(&pf_rule_pl
, PR_WAITOK
);
2677 pf_rule_copyin(&pr
->rule
, rule
, p
, minordev
);
2679 if (rule
->af
== AF_INET
) {
2680 pool_put(&pf_rule_pl
, rule
);
2681 error
= EAFNOSUPPORT
;
2686 if (rule
->af
== AF_INET6
) {
2687 pool_put(&pf_rule_pl
, rule
);
2688 error
= EAFNOSUPPORT
;
2692 tail
= TAILQ_LAST(ruleset
->rules
[rs_num
].inactive
.ptr
,
2695 rule
->nr
= tail
->nr
+ 1;
2699 if ((error
= pf_rule_setup(pr
, rule
, ruleset
)))
2702 TAILQ_INSERT_TAIL(ruleset
->rules
[rs_num
].inactive
.ptr
,
2704 ruleset
->rules
[rs_num
].inactive
.rcount
++;
2705 if (rule
->rule_flag
& PFRULE_PFM
)
2708 if (rule
->action
== PF_NAT64
)
2709 atomic_add_16(&pf_nat64_configured
, 1);
2711 if (pr
->anchor_call
[0] == '\0') {
2712 INC_ATOMIC_INT64_LIM(net_api_stats
.nas_pf_addrule_total
);
2713 if (rule
->rule_flag
& PFRULE_PFM
) {
2714 INC_ATOMIC_INT64_LIM(net_api_stats
.nas_pf_addrule_os
);
2719 if (rule
->action
== PF_DUMMYNET
) {
2720 struct dummynet_event dn_event
;
2721 uint32_t direction
= DN_INOUT
;;
2722 bzero(&dn_event
, sizeof(dn_event
));
2724 dn_event
.dn_event_code
= DUMMYNET_RULE_CONFIG
;
2726 if (rule
->direction
== PF_IN
)
2728 else if (rule
->direction
== PF_OUT
)
2731 dn_event
.dn_event_rule_config
.dir
= direction
;
2732 dn_event
.dn_event_rule_config
.af
= rule
->af
;
2733 dn_event
.dn_event_rule_config
.proto
= rule
->proto
;
2734 dn_event
.dn_event_rule_config
.src_port
= rule
->src
.xport
.range
.port
[0];
2735 dn_event
.dn_event_rule_config
.dst_port
= rule
->dst
.xport
.range
.port
[0];
2736 strlcpy(dn_event
.dn_event_rule_config
.ifname
, rule
->ifname
,
2737 sizeof(dn_event
.dn_event_rule_config
.ifname
));
2739 dummynet_event_enqueue_nwk_wq_entry(&dn_event
);
2745 case DIOCGETRULES
: {
2746 struct pf_ruleset
*ruleset
;
2747 struct pf_rule
*tail
;
2750 pr
->anchor
[sizeof (pr
->anchor
) - 1] = '\0';
2751 pr
->anchor_call
[sizeof (pr
->anchor_call
) - 1] = '\0';
2752 ruleset
= pf_find_ruleset(pr
->anchor
);
2753 if (ruleset
== NULL
) {
2757 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
2758 if (rs_num
>= PF_RULESET_MAX
) {
2762 tail
= TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
,
2765 pr
->nr
= tail
->nr
+ 1;
2768 pr
->ticket
= ruleset
->rules
[rs_num
].active
.ticket
;
2773 struct pf_ruleset
*ruleset
;
2774 struct pf_rule
*rule
;
2777 pr
->anchor
[sizeof (pr
->anchor
) - 1] = '\0';
2778 pr
->anchor_call
[sizeof (pr
->anchor_call
) - 1] = '\0';
2779 ruleset
= pf_find_ruleset(pr
->anchor
);
2780 if (ruleset
== NULL
) {
2784 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
2785 if (rs_num
>= PF_RULESET_MAX
) {
2789 if (pr
->ticket
!= ruleset
->rules
[rs_num
].active
.ticket
) {
2793 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
2794 while ((rule
!= NULL
) && (rule
->nr
!= pr
->nr
))
2795 rule
= TAILQ_NEXT(rule
, entries
);
2800 pf_rule_copyout(rule
, &pr
->rule
);
2801 if (pf_anchor_copyout(ruleset
, rule
, pr
)) {
2805 pfi_dynaddr_copyout(&pr
->rule
.src
.addr
);
2806 pfi_dynaddr_copyout(&pr
->rule
.dst
.addr
);
2807 pf_tbladdr_copyout(&pr
->rule
.src
.addr
);
2808 pf_tbladdr_copyout(&pr
->rule
.dst
.addr
);
2809 pf_rtlabel_copyout(&pr
->rule
.src
.addr
);
2810 pf_rtlabel_copyout(&pr
->rule
.dst
.addr
);
2811 for (i
= 0; i
< PF_SKIP_COUNT
; ++i
)
2812 if (rule
->skip
[i
].ptr
== NULL
)
2813 pr
->rule
.skip
[i
].nr
= -1;
2815 pr
->rule
.skip
[i
].nr
=
2816 rule
->skip
[i
].ptr
->nr
;
2818 if (pr
->action
== PF_GET_CLR_CNTR
) {
2819 rule
->evaluations
= 0;
2820 rule
->packets
[0] = rule
->packets
[1] = 0;
2821 rule
->bytes
[0] = rule
->bytes
[1] = 0;
2826 case DIOCCHANGERULE
: {
2827 struct pfioc_rule
*pcr
= pr
;
2828 struct pf_ruleset
*ruleset
;
2829 struct pf_rule
*oldrule
= NULL
, *newrule
= NULL
;
2830 struct pf_pooladdr
*pa
;
2834 if (!(pcr
->action
== PF_CHANGE_REMOVE
||
2835 pcr
->action
== PF_CHANGE_GET_TICKET
) &&
2836 pcr
->pool_ticket
!= ticket_pabuf
) {
2841 if (pcr
->action
< PF_CHANGE_ADD_HEAD
||
2842 pcr
->action
> PF_CHANGE_GET_TICKET
) {
2846 pcr
->anchor
[sizeof (pcr
->anchor
) - 1] = '\0';
2847 pcr
->anchor_call
[sizeof (pcr
->anchor_call
) - 1] = '\0';
2848 ruleset
= pf_find_ruleset(pcr
->anchor
);
2849 if (ruleset
== NULL
) {
2853 rs_num
= pf_get_ruleset_number(pcr
->rule
.action
);
2854 if (rs_num
>= PF_RULESET_MAX
) {
2859 if (pcr
->action
== PF_CHANGE_GET_TICKET
) {
2860 pcr
->ticket
= ++ruleset
->rules
[rs_num
].active
.ticket
;
2864 ruleset
->rules
[rs_num
].active
.ticket
) {
2868 if (pcr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
2874 if (pcr
->action
!= PF_CHANGE_REMOVE
) {
2875 newrule
= pool_get(&pf_rule_pl
, PR_WAITOK
);
2876 if (newrule
== NULL
) {
2880 pf_rule_copyin(&pcr
->rule
, newrule
, p
, minordev
);
2882 if (newrule
->af
== AF_INET
) {
2883 pool_put(&pf_rule_pl
, newrule
);
2884 error
= EAFNOSUPPORT
;
2889 if (newrule
->af
== AF_INET6
) {
2890 pool_put(&pf_rule_pl
, newrule
);
2891 error
= EAFNOSUPPORT
;
2895 if (newrule
->ifname
[0]) {
2896 newrule
->kif
= pfi_kif_get(newrule
->ifname
);
2897 if (newrule
->kif
== NULL
) {
2898 pool_put(&pf_rule_pl
, newrule
);
2902 pfi_kif_ref(newrule
->kif
, PFI_KIF_REF_RULE
);
2904 newrule
->kif
= NULL
;
2906 if (newrule
->tagname
[0])
2908 pf_tagname2tag(newrule
->tagname
)) == 0)
2910 if (newrule
->match_tagname
[0])
2911 if ((newrule
->match_tag
= pf_tagname2tag(
2912 newrule
->match_tagname
)) == 0)
2914 if (newrule
->rt
&& !newrule
->direction
)
2919 if (newrule
->logif
>= PFLOGIFS_MAX
)
2922 pf_addrwrap_setup(&newrule
->src
.addr
);
2923 pf_addrwrap_setup(&newrule
->dst
.addr
);
2924 if (pf_rtlabel_add(&newrule
->src
.addr
) ||
2925 pf_rtlabel_add(&newrule
->dst
.addr
))
2927 if (pfi_dynaddr_setup(&newrule
->src
.addr
, newrule
->af
))
2929 if (pfi_dynaddr_setup(&newrule
->dst
.addr
, newrule
->af
))
2931 if (pf_tbladdr_setup(ruleset
, &newrule
->src
.addr
))
2933 if (pf_tbladdr_setup(ruleset
, &newrule
->dst
.addr
))
2935 if (pf_anchor_setup(newrule
, ruleset
, pcr
->anchor_call
))
2937 TAILQ_FOREACH(pa
, &pf_pabuf
, entries
)
2938 if (pf_tbladdr_setup(ruleset
, &pa
->addr
))
2941 if (newrule
->overload_tblname
[0]) {
2942 if ((newrule
->overload_tbl
= pfr_attach_table(
2943 ruleset
, newrule
->overload_tblname
)) ==
2947 newrule
->overload_tbl
->pfrkt_flags
|=
2951 pf_mv_pool(&pf_pabuf
, &newrule
->rpool
.list
);
2952 if (((((newrule
->action
== PF_NAT
) ||
2953 (newrule
->action
== PF_RDR
) ||
2954 (newrule
->action
== PF_BINAT
) ||
2955 (newrule
->rt
> PF_FASTROUTE
)) &&
2956 !newrule
->anchor
)) &&
2957 (TAILQ_FIRST(&newrule
->rpool
.list
) == NULL
))
2961 pf_rm_rule(NULL
, newrule
);
2964 newrule
->rpool
.cur
= TAILQ_FIRST(&newrule
->rpool
.list
);
2965 newrule
->evaluations
= 0;
2966 newrule
->packets
[0] = newrule
->packets
[1] = 0;
2967 newrule
->bytes
[0] = newrule
->bytes
[1] = 0;
2969 pf_empty_pool(&pf_pabuf
);
2971 if (pcr
->action
== PF_CHANGE_ADD_HEAD
)
2972 oldrule
= TAILQ_FIRST(
2973 ruleset
->rules
[rs_num
].active
.ptr
);
2974 else if (pcr
->action
== PF_CHANGE_ADD_TAIL
)
2975 oldrule
= TAILQ_LAST(
2976 ruleset
->rules
[rs_num
].active
.ptr
, pf_rulequeue
);
2978 oldrule
= TAILQ_FIRST(
2979 ruleset
->rules
[rs_num
].active
.ptr
);
2980 while ((oldrule
!= NULL
) && (oldrule
->nr
!= pcr
->nr
))
2981 oldrule
= TAILQ_NEXT(oldrule
, entries
);
2982 if (oldrule
== NULL
) {
2983 if (newrule
!= NULL
)
2984 pf_rm_rule(NULL
, newrule
);
2990 if (pcr
->action
== PF_CHANGE_REMOVE
) {
2991 pf_rm_rule(ruleset
->rules
[rs_num
].active
.ptr
, oldrule
);
2992 ruleset
->rules
[rs_num
].active
.rcount
--;
2994 if (oldrule
== NULL
)
2996 ruleset
->rules
[rs_num
].active
.ptr
,
2998 else if (pcr
->action
== PF_CHANGE_ADD_HEAD
||
2999 pcr
->action
== PF_CHANGE_ADD_BEFORE
)
3000 TAILQ_INSERT_BEFORE(oldrule
, newrule
, entries
);
3003 ruleset
->rules
[rs_num
].active
.ptr
,
3004 oldrule
, newrule
, entries
);
3005 ruleset
->rules
[rs_num
].active
.rcount
++;
3009 TAILQ_FOREACH(oldrule
,
3010 ruleset
->rules
[rs_num
].active
.ptr
, entries
)
3013 ruleset
->rules
[rs_num
].active
.ticket
++;
3015 pf_calc_skip_steps(ruleset
->rules
[rs_num
].active
.ptr
);
3016 pf_remove_if_empty_ruleset(ruleset
);
3021 case DIOCINSERTRULE
: {
3022 struct pf_ruleset
*ruleset
;
3023 struct pf_rule
*rule
, *tail
, *r
;
3027 pr
->anchor
[sizeof (pr
->anchor
) - 1] = '\0';
3028 pr
->anchor_call
[sizeof (pr
->anchor_call
) - 1] = '\0';
3029 is_anchor
= (pr
->anchor_call
[0] != '\0');
3031 if ((ruleset
= pf_find_ruleset_with_owner(pr
->anchor
,
3032 pr
->rule
.owner
, is_anchor
, &error
)) == NULL
)
3035 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
3036 if (rs_num
>= PF_RULESET_MAX
) {
3040 if (pr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
3045 /* make sure this anchor rule doesn't exist already */
3047 r
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
3050 ((strcmp(r
->anchor
->name
,
3051 pr
->anchor_call
)) == 0)) {
3052 if (((strcmp(pr
->rule
.owner
,
3054 ((strcmp(r
->owner
, "")) == 0))
3060 r
= TAILQ_NEXT(r
, entries
);
3066 rule
= pool_get(&pf_rule_pl
, PR_WAITOK
);
3071 pf_rule_copyin(&pr
->rule
, rule
, p
, minordev
);
3073 if (rule
->af
== AF_INET
) {
3074 pool_put(&pf_rule_pl
, rule
);
3075 error
= EAFNOSUPPORT
;
3080 if (rule
->af
== AF_INET6
) {
3081 pool_put(&pf_rule_pl
, rule
);
3082 error
= EAFNOSUPPORT
;
3087 r
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
3088 while ((r
!= NULL
) && (rule
->priority
>= (unsigned)r
->priority
))
3089 r
= TAILQ_NEXT(r
, entries
);
3092 TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
,
3093 pf_rulequeue
)) != NULL
)
3094 rule
->nr
= tail
->nr
+ 1;
3101 if ((error
= pf_rule_setup(pr
, rule
, ruleset
)))
3104 if (rule
->anchor
!= NULL
)
3105 strlcpy(rule
->anchor
->owner
, rule
->owner
,
3106 PF_OWNER_NAME_SIZE
);
3109 TAILQ_INSERT_BEFORE(r
, rule
, entries
);
3110 while (r
&& ++r
->nr
)
3111 r
= TAILQ_NEXT(r
, entries
);
3113 TAILQ_INSERT_TAIL(ruleset
->rules
[rs_num
].active
.ptr
,
3115 ruleset
->rules
[rs_num
].active
.rcount
++;
3117 /* Calculate checksum for the main ruleset */
3118 if (ruleset
== &pf_main_ruleset
)
3119 error
= pf_setup_pfsync_matching(ruleset
);
3121 pf_ruleset_cleanup(ruleset
, rs_num
);
3122 rule
->ticket
= VM_KERNEL_ADDRPERM((u_int64_t
)(uintptr_t)rule
);
3124 pr
->rule
.ticket
= rule
->ticket
;
3125 pf_rule_copyout(rule
, &pr
->rule
);
3126 if (rule
->rule_flag
& PFRULE_PFM
)
3128 if (rule
->action
== PF_NAT64
)
3129 atomic_add_16(&pf_nat64_configured
, 1);
3131 if (pr
->anchor_call
[0] == '\0') {
3132 INC_ATOMIC_INT64_LIM(net_api_stats
.nas_pf_addrule_total
);
3133 if (rule
->rule_flag
& PFRULE_PFM
) {
3134 INC_ATOMIC_INT64_LIM(net_api_stats
.nas_pf_addrule_os
);
3140 case DIOCDELETERULE
: {
3141 pr
->anchor
[sizeof (pr
->anchor
) - 1] = '\0';
3142 pr
->anchor_call
[sizeof (pr
->anchor_call
) - 1] = '\0';
3144 if (pr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
3149 /* get device through which request is made */
3150 if ((uint8_t)minordev
== PFDEV_PFM
)
3151 req_dev
|= PFRULE_PFM
;
3153 if (pr
->rule
.ticket
) {
3154 if ((error
= pf_delete_rule_by_ticket(pr
, req_dev
)))
3157 pf_delete_rule_by_owner(pr
->rule
.owner
, req_dev
);
3159 if (pr
->rule
.action
== PF_NAT64
)
3160 atomic_add_16(&pf_nat64_configured
, -1);
3173 pfioctl_ioc_state_kill(u_long cmd
, struct pfioc_state_kill
*psk
, struct proc
*p
)
3178 psk
->psk_ifname
[sizeof (psk
->psk_ifname
) - 1] = '\0';
3179 psk
->psk_ownername
[sizeof(psk
->psk_ownername
) - 1] = '\0';
3181 bool ifname_matched
= true;
3182 bool owner_matched
= true;
3185 case DIOCCLRSTATES
: {
3186 struct pf_state
*s
, *nexts
;
3189 for (s
= RB_MIN(pf_state_tree_id
, &tree_id
); s
; s
= nexts
) {
3190 nexts
= RB_NEXT(pf_state_tree_id
, &tree_id
, s
);
3192 * Purge all states only when neither ifname
3193 * or owner is provided. If any of these are provided
3194 * we purge only the states with meta data that match
3196 bool unlink_state
= false;
3197 ifname_matched
= true;
3198 owner_matched
= true;
3200 if (psk
->psk_ifname
[0] &&
3201 strcmp(psk
->psk_ifname
, s
->kif
->pfik_name
)) {
3202 ifname_matched
= false;
3205 if (psk
->psk_ownername
[0] &&
3206 ((NULL
== s
->rule
.ptr
) ||
3207 strcmp(psk
->psk_ownername
, s
->rule
.ptr
->owner
))) {
3208 owner_matched
= false;
3211 unlink_state
= ifname_matched
&& owner_matched
;
3215 /* don't send out individual delete messages */
3216 s
->sync_flags
= PFSTATE_NOSYNC
;
3222 psk
->psk_af
= killed
;
3224 pfsync_clear_states(pf_status
.hostid
, psk
->psk_ifname
);
3229 case DIOCKILLSTATES
: {
3230 struct pf_state
*s
, *nexts
;
3231 struct pf_state_key
*sk
;
3232 struct pf_state_host
*src
, *dst
;
3235 for (s
= RB_MIN(pf_state_tree_id
, &tree_id
); s
;
3237 nexts
= RB_NEXT(pf_state_tree_id
, &tree_id
, s
);
3239 ifname_matched
= true;
3240 owner_matched
= true;
3242 if (psk
->psk_ifname
[0] &&
3243 strcmp(psk
->psk_ifname
, s
->kif
->pfik_name
)) {
3244 ifname_matched
= false;
3247 if (psk
->psk_ownername
[0] &&
3248 ((NULL
== s
->rule
.ptr
) ||
3249 strcmp(psk
->psk_ownername
, s
->rule
.ptr
->owner
))) {
3250 owner_matched
= false;
3253 if (sk
->direction
== PF_OUT
) {
3260 if ((!psk
->psk_af
|| sk
->af_lan
== psk
->psk_af
) &&
3261 (!psk
->psk_proto
|| psk
->psk_proto
== sk
->proto
) &&
3262 PF_MATCHA(psk
->psk_src
.neg
,
3263 &psk
->psk_src
.addr
.v
.a
.addr
,
3264 &psk
->psk_src
.addr
.v
.a
.mask
,
3265 &src
->addr
, sk
->af_lan
) &&
3266 PF_MATCHA(psk
->psk_dst
.neg
,
3267 &psk
->psk_dst
.addr
.v
.a
.addr
,
3268 &psk
->psk_dst
.addr
.v
.a
.mask
,
3269 &dst
->addr
, sk
->af_lan
) &&
3270 (pf_match_xport(psk
->psk_proto
,
3271 psk
->psk_proto_variant
, &psk
->psk_src
.xport
,
3273 (pf_match_xport(psk
->psk_proto
,
3274 psk
->psk_proto_variant
, &psk
->psk_dst
.xport
,
3279 /* send immediate delete of state */
3280 pfsync_delete_state(s
);
3281 s
->sync_flags
|= PFSTATE_NOSYNC
;
3287 psk
->psk_af
= killed
;
3300 pfioctl_ioc_state(u_long cmd
, struct pfioc_state
*ps
, struct proc
*p
)
3306 case DIOCADDSTATE
: {
3307 struct pfsync_state
*sp
= &ps
->state
;
3309 struct pf_state_key
*sk
;
3310 struct pfi_kif
*kif
;
3312 if (sp
->timeout
>= PFTM_MAX
) {
3316 s
= pool_get(&pf_state_pl
, PR_WAITOK
);
3321 bzero(s
, sizeof (struct pf_state
));
3322 if ((sk
= pf_alloc_state_key(s
, NULL
)) == NULL
) {
3323 pool_put(&pf_state_pl
, s
);
3327 pf_state_import(sp
, sk
, s
);
3328 kif
= pfi_kif_get(sp
->ifname
);
3330 pool_put(&pf_state_pl
, s
);
3331 pool_put(&pf_state_key_pl
, sk
);
3335 TAILQ_INIT(&s
->unlink_hooks
);
3336 s
->state_key
->app_state
= 0;
3337 if (pf_insert_state(kif
, s
)) {
3338 pfi_kif_unref(kif
, PFI_KIF_REF_NONE
);
3339 pool_put(&pf_state_pl
, s
);
3343 pf_default_rule
.states
++;
3344 VERIFY(pf_default_rule
.states
!= 0);
3348 case DIOCGETSTATE
: {
3350 struct pf_state_cmp id_key
;
3352 bcopy(ps
->state
.id
, &id_key
.id
, sizeof (id_key
.id
));
3353 id_key
.creatorid
= ps
->state
.creatorid
;
3355 s
= pf_find_state_byid(&id_key
);
3361 pf_state_export(&ps
->state
, s
->state_key
, s
);
3374 pfioctl_ioc_states(u_long cmd
, struct pfioc_states_32
*ps32
,
3375 struct pfioc_states_64
*ps64
, struct proc
*p
)
3377 int p64
= proc_is64bit(p
);
3381 case DIOCGETSTATES
: { /* struct pfioc_states */
3382 struct pf_state
*state
;
3383 struct pfsync_state
*pstore
;
3388 len
= (p64
? ps64
->ps_len
: ps32
->ps_len
);
3390 size
= sizeof (struct pfsync_state
) * pf_status
.states
;
3392 ps64
->ps_len
= size
;
3394 ps32
->ps_len
= size
;
3398 pstore
= _MALLOC(sizeof (*pstore
), M_TEMP
, M_WAITOK
);
3399 if (pstore
== NULL
) {
3403 buf
= (p64
? ps64
->ps_buf
: ps32
->ps_buf
);
3405 state
= TAILQ_FIRST(&state_list
);
3407 if (state
->timeout
!= PFTM_UNLINKED
) {
3408 if ((nr
+ 1) * sizeof (*pstore
) > (unsigned)len
)
3411 pf_state_export(pstore
,
3412 state
->state_key
, state
);
3413 error
= copyout(pstore
, buf
, sizeof (*pstore
));
3415 _FREE(pstore
, M_TEMP
);
3418 buf
+= sizeof (*pstore
);
3421 state
= TAILQ_NEXT(state
, entry_list
);
3424 size
= sizeof (struct pfsync_state
) * nr
;
3426 ps64
->ps_len
= size
;
3428 ps32
->ps_len
= size
;
3430 _FREE(pstore
, M_TEMP
);
3443 pfioctl_ioc_natlook(u_long cmd
, struct pfioc_natlook
*pnl
, struct proc
*p
)
3450 struct pf_state_key
*sk
;
3451 struct pf_state
*state
;
3452 struct pf_state_key_cmp key
;
3453 int m
= 0, direction
= pnl
->direction
;
3455 key
.proto
= pnl
->proto
;
3456 key
.proto_variant
= pnl
->proto_variant
;
3459 PF_AZERO(&pnl
->saddr
, pnl
->af
) ||
3460 PF_AZERO(&pnl
->daddr
, pnl
->af
) ||
3461 ((pnl
->proto
== IPPROTO_TCP
||
3462 pnl
->proto
== IPPROTO_UDP
) &&
3463 (!pnl
->dxport
.port
|| !pnl
->sxport
.port
)))
3467 * userland gives us source and dest of connection,
3468 * reverse the lookup so we ask for what happens with
3469 * the return traffic, enabling us to find it in the
3472 if (direction
== PF_IN
) {
3473 key
.af_gwy
= pnl
->af
;
3474 PF_ACPY(&key
.ext_gwy
.addr
, &pnl
->daddr
,
3476 memcpy(&key
.ext_gwy
.xport
, &pnl
->dxport
,
3477 sizeof (key
.ext_gwy
.xport
));
3478 PF_ACPY(&key
.gwy
.addr
, &pnl
->saddr
, pnl
->af
);
3479 memcpy(&key
.gwy
.xport
, &pnl
->sxport
,
3480 sizeof (key
.gwy
.xport
));
3481 state
= pf_find_state_all(&key
, PF_IN
, &m
);
3483 key
.af_lan
= pnl
->af
;
3484 PF_ACPY(&key
.lan
.addr
, &pnl
->daddr
, pnl
->af
);
3485 memcpy(&key
.lan
.xport
, &pnl
->dxport
,
3486 sizeof (key
.lan
.xport
));
3487 PF_ACPY(&key
.ext_lan
.addr
, &pnl
->saddr
,
3489 memcpy(&key
.ext_lan
.xport
, &pnl
->sxport
,
3490 sizeof (key
.ext_lan
.xport
));
3491 state
= pf_find_state_all(&key
, PF_OUT
, &m
);
3494 error
= E2BIG
; /* more than one state */
3495 else if (state
!= NULL
) {
3496 sk
= state
->state_key
;
3497 if (direction
== PF_IN
) {
3498 PF_ACPY(&pnl
->rsaddr
, &sk
->lan
.addr
,
3500 memcpy(&pnl
->rsxport
, &sk
->lan
.xport
,
3501 sizeof (pnl
->rsxport
));
3502 PF_ACPY(&pnl
->rdaddr
, &pnl
->daddr
,
3504 memcpy(&pnl
->rdxport
, &pnl
->dxport
,
3505 sizeof (pnl
->rdxport
));
3507 PF_ACPY(&pnl
->rdaddr
, &sk
->gwy
.addr
,
3509 memcpy(&pnl
->rdxport
, &sk
->gwy
.xport
,
3510 sizeof (pnl
->rdxport
));
3511 PF_ACPY(&pnl
->rsaddr
, &pnl
->saddr
,
3513 memcpy(&pnl
->rsxport
, &pnl
->sxport
,
3514 sizeof (pnl
->rsxport
));
3531 pfioctl_ioc_tm(u_long cmd
, struct pfioc_tm
*pt
, struct proc
*p
)
3537 case DIOCSETTIMEOUT
: {
3540 if (pt
->timeout
< 0 || pt
->timeout
>= PFTM_MAX
||
3545 old
= pf_default_rule
.timeout
[pt
->timeout
];
3546 if (pt
->timeout
== PFTM_INTERVAL
&& pt
->seconds
== 0)
3548 pf_default_rule
.timeout
[pt
->timeout
] = pt
->seconds
;
3549 if (pt
->timeout
== PFTM_INTERVAL
&& pt
->seconds
< old
)
3550 wakeup(pf_purge_thread_fn
);
3555 case DIOCGETTIMEOUT
: {
3556 if (pt
->timeout
< 0 || pt
->timeout
>= PFTM_MAX
) {
3560 pt
->seconds
= pf_default_rule
.timeout
[pt
->timeout
];
3573 pfioctl_ioc_limit(u_long cmd
, struct pfioc_limit
*pl
, struct proc
*p
)
3579 case DIOCGETLIMIT
: {
3581 if (pl
->index
< 0 || pl
->index
>= PF_LIMIT_MAX
) {
3585 pl
->limit
= pf_pool_limits
[pl
->index
].limit
;
3589 case DIOCSETLIMIT
: {
3592 if (pl
->index
< 0 || pl
->index
>= PF_LIMIT_MAX
||
3593 pf_pool_limits
[pl
->index
].pp
== NULL
) {
3597 pool_sethardlimit(pf_pool_limits
[pl
->index
].pp
,
3598 pl
->limit
, NULL
, 0);
3599 old_limit
= pf_pool_limits
[pl
->index
].limit
;
3600 pf_pool_limits
[pl
->index
].limit
= pl
->limit
;
3601 pl
->limit
= old_limit
;
3614 pfioctl_ioc_pooladdr(u_long cmd
, struct pfioc_pooladdr
*pp
, struct proc
*p
)
3617 struct pf_pooladdr
*pa
= NULL
;
3618 struct pf_pool
*pool
= NULL
;
3622 case DIOCBEGINADDRS
: {
3623 pf_empty_pool(&pf_pabuf
);
3624 pp
->ticket
= ++ticket_pabuf
;
3629 pp
->anchor
[sizeof (pp
->anchor
) - 1] = '\0';
3630 if (pp
->ticket
!= ticket_pabuf
) {
3635 if (pp
->af
== AF_INET
) {
3636 error
= EAFNOSUPPORT
;
3641 if (pp
->af
== AF_INET6
) {
3642 error
= EAFNOSUPPORT
;
3646 if (pp
->addr
.addr
.type
!= PF_ADDR_ADDRMASK
&&
3647 pp
->addr
.addr
.type
!= PF_ADDR_DYNIFTL
&&
3648 pp
->addr
.addr
.type
!= PF_ADDR_TABLE
) {
3652 pa
= pool_get(&pf_pooladdr_pl
, PR_WAITOK
);
3657 pf_pooladdr_copyin(&pp
->addr
, pa
);
3658 if (pa
->ifname
[0]) {
3659 pa
->kif
= pfi_kif_get(pa
->ifname
);
3660 if (pa
->kif
== NULL
) {
3661 pool_put(&pf_pooladdr_pl
, pa
);
3665 pfi_kif_ref(pa
->kif
, PFI_KIF_REF_RULE
);
3667 pf_addrwrap_setup(&pa
->addr
);
3668 if (pfi_dynaddr_setup(&pa
->addr
, pp
->af
)) {
3669 pfi_dynaddr_remove(&pa
->addr
);
3670 pfi_kif_unref(pa
->kif
, PFI_KIF_REF_RULE
);
3671 pool_put(&pf_pooladdr_pl
, pa
);
3675 TAILQ_INSERT_TAIL(&pf_pabuf
, pa
, entries
);
3679 case DIOCGETADDRS
: {
3681 pp
->anchor
[sizeof (pp
->anchor
) - 1] = '\0';
3682 pool
= pf_get_pool(pp
->anchor
, pp
->ticket
, pp
->r_action
,
3683 pp
->r_num
, 0, 1, 0);
3688 TAILQ_FOREACH(pa
, &pool
->list
, entries
)
3696 pp
->anchor
[sizeof (pp
->anchor
) - 1] = '\0';
3697 pool
= pf_get_pool(pp
->anchor
, pp
->ticket
, pp
->r_action
,
3698 pp
->r_num
, 0, 1, 1);
3703 pa
= TAILQ_FIRST(&pool
->list
);
3704 while ((pa
!= NULL
) && (nr
< pp
->nr
)) {
3705 pa
= TAILQ_NEXT(pa
, entries
);
3712 pf_pooladdr_copyout(pa
, &pp
->addr
);
3713 pfi_dynaddr_copyout(&pp
->addr
.addr
);
3714 pf_tbladdr_copyout(&pp
->addr
.addr
);
3715 pf_rtlabel_copyout(&pp
->addr
.addr
);
3719 case DIOCCHANGEADDR
: {
3720 struct pfioc_pooladdr
*pca
= pp
;
3721 struct pf_pooladdr
*oldpa
= NULL
, *newpa
= NULL
;
3722 struct pf_ruleset
*ruleset
;
3724 if (pca
->action
< PF_CHANGE_ADD_HEAD
||
3725 pca
->action
> PF_CHANGE_REMOVE
) {
3729 if (pca
->addr
.addr
.type
!= PF_ADDR_ADDRMASK
&&
3730 pca
->addr
.addr
.type
!= PF_ADDR_DYNIFTL
&&
3731 pca
->addr
.addr
.type
!= PF_ADDR_TABLE
) {
3736 pca
->anchor
[sizeof (pca
->anchor
) - 1] = '\0';
3737 ruleset
= pf_find_ruleset(pca
->anchor
);
3738 if (ruleset
== NULL
) {
3742 pool
= pf_get_pool(pca
->anchor
, pca
->ticket
, pca
->r_action
,
3743 pca
->r_num
, pca
->r_last
, 1, 1);
3748 if (pca
->action
!= PF_CHANGE_REMOVE
) {
3749 newpa
= pool_get(&pf_pooladdr_pl
, PR_WAITOK
);
3750 if (newpa
== NULL
) {
3754 pf_pooladdr_copyin(&pca
->addr
, newpa
);
3756 if (pca
->af
== AF_INET
) {
3757 pool_put(&pf_pooladdr_pl
, newpa
);
3758 error
= EAFNOSUPPORT
;
3763 if (pca
->af
== AF_INET6
) {
3764 pool_put(&pf_pooladdr_pl
, newpa
);
3765 error
= EAFNOSUPPORT
;
3769 if (newpa
->ifname
[0]) {
3770 newpa
->kif
= pfi_kif_get(newpa
->ifname
);
3771 if (newpa
->kif
== NULL
) {
3772 pool_put(&pf_pooladdr_pl
, newpa
);
3776 pfi_kif_ref(newpa
->kif
, PFI_KIF_REF_RULE
);
3779 pf_addrwrap_setup(&newpa
->addr
);
3780 if (pfi_dynaddr_setup(&newpa
->addr
, pca
->af
) ||
3781 pf_tbladdr_setup(ruleset
, &newpa
->addr
)) {
3782 pfi_dynaddr_remove(&newpa
->addr
);
3783 pfi_kif_unref(newpa
->kif
, PFI_KIF_REF_RULE
);
3784 pool_put(&pf_pooladdr_pl
, newpa
);
3790 if (pca
->action
== PF_CHANGE_ADD_HEAD
)
3791 oldpa
= TAILQ_FIRST(&pool
->list
);
3792 else if (pca
->action
== PF_CHANGE_ADD_TAIL
)
3793 oldpa
= TAILQ_LAST(&pool
->list
, pf_palist
);
3797 oldpa
= TAILQ_FIRST(&pool
->list
);
3798 while ((oldpa
!= NULL
) && (i
< (int)pca
->nr
)) {
3799 oldpa
= TAILQ_NEXT(oldpa
, entries
);
3802 if (oldpa
== NULL
) {
3808 if (pca
->action
== PF_CHANGE_REMOVE
) {
3809 TAILQ_REMOVE(&pool
->list
, oldpa
, entries
);
3810 pfi_dynaddr_remove(&oldpa
->addr
);
3811 pf_tbladdr_remove(&oldpa
->addr
);
3812 pfi_kif_unref(oldpa
->kif
, PFI_KIF_REF_RULE
);
3813 pool_put(&pf_pooladdr_pl
, oldpa
);
3816 TAILQ_INSERT_TAIL(&pool
->list
, newpa
, entries
);
3817 else if (pca
->action
== PF_CHANGE_ADD_HEAD
||
3818 pca
->action
== PF_CHANGE_ADD_BEFORE
)
3819 TAILQ_INSERT_BEFORE(oldpa
, newpa
, entries
);
3821 TAILQ_INSERT_AFTER(&pool
->list
, oldpa
,
3825 pool
->cur
= TAILQ_FIRST(&pool
->list
);
3826 PF_ACPY(&pool
->counter
, &pool
->cur
->addr
.v
.a
.addr
,
3840 pfioctl_ioc_ruleset(u_long cmd
, struct pfioc_ruleset
*pr
, struct proc
*p
)
3846 case DIOCGETRULESETS
: {
3847 struct pf_ruleset
*ruleset
;
3848 struct pf_anchor
*anchor
;
3850 pr
->path
[sizeof (pr
->path
) - 1] = '\0';
3851 pr
->name
[sizeof (pr
->name
) - 1] = '\0';
3852 if ((ruleset
= pf_find_ruleset(pr
->path
)) == NULL
) {
3857 if (ruleset
->anchor
== NULL
) {
3858 /* XXX kludge for pf_main_ruleset */
3859 RB_FOREACH(anchor
, pf_anchor_global
, &pf_anchors
)
3860 if (anchor
->parent
== NULL
)
3863 RB_FOREACH(anchor
, pf_anchor_node
,
3864 &ruleset
->anchor
->children
)
3870 case DIOCGETRULESET
: {
3871 struct pf_ruleset
*ruleset
;
3872 struct pf_anchor
*anchor
;
3875 pr
->path
[sizeof (pr
->path
) - 1] = '\0';
3876 if ((ruleset
= pf_find_ruleset(pr
->path
)) == NULL
) {
3881 if (ruleset
->anchor
== NULL
) {
3882 /* XXX kludge for pf_main_ruleset */
3883 RB_FOREACH(anchor
, pf_anchor_global
, &pf_anchors
)
3884 if (anchor
->parent
== NULL
&& nr
++ == pr
->nr
) {
3885 strlcpy(pr
->name
, anchor
->name
,
3890 RB_FOREACH(anchor
, pf_anchor_node
,
3891 &ruleset
->anchor
->children
)
3892 if (nr
++ == pr
->nr
) {
3893 strlcpy(pr
->name
, anchor
->name
,
3912 pfioctl_ioc_trans(u_long cmd
, struct pfioc_trans_32
*io32
,
3913 struct pfioc_trans_64
*io64
, struct proc
*p
)
3915 int p64
= proc_is64bit(p
);
3916 int error
= 0, esize
, size
;
3919 esize
= (p64
? io64
->esize
: io32
->esize
);
3920 size
= (p64
? io64
->size
: io32
->size
);
3921 buf
= (p64
? io64
->array
: io32
->array
);
3925 struct pfioc_trans_e
*ioe
;
3926 struct pfr_table
*table
;
3929 if (esize
!= sizeof (*ioe
)) {
3933 ioe
= _MALLOC(sizeof (*ioe
), M_TEMP
, M_WAITOK
);
3934 table
= _MALLOC(sizeof (*table
), M_TEMP
, M_WAITOK
);
3935 for (i
= 0; i
< size
; i
++, buf
+= sizeof (*ioe
)) {
3936 if (copyin(buf
, ioe
, sizeof (*ioe
))) {
3937 _FREE(table
, M_TEMP
);
3942 ioe
->anchor
[sizeof (ioe
->anchor
) - 1] = '\0';
3943 switch (ioe
->rs_num
) {
3944 case PF_RULESET_ALTQ
:
3946 case PF_RULESET_TABLE
:
3947 bzero(table
, sizeof (*table
));
3948 strlcpy(table
->pfrt_anchor
, ioe
->anchor
,
3949 sizeof (table
->pfrt_anchor
));
3950 if ((error
= pfr_ina_begin(table
,
3951 &ioe
->ticket
, NULL
, 0))) {
3952 _FREE(table
, M_TEMP
);
3958 if ((error
= pf_begin_rules(&ioe
->ticket
,
3959 ioe
->rs_num
, ioe
->anchor
))) {
3960 _FREE(table
, M_TEMP
);
3966 if (copyout(ioe
, buf
, sizeof (*ioe
))) {
3967 _FREE(table
, M_TEMP
);
3973 _FREE(table
, M_TEMP
);
3978 case DIOCXROLLBACK
: {
3979 struct pfioc_trans_e
*ioe
;
3980 struct pfr_table
*table
;
3983 if (esize
!= sizeof (*ioe
)) {
3987 ioe
= _MALLOC(sizeof (*ioe
), M_TEMP
, M_WAITOK
);
3988 table
= _MALLOC(sizeof (*table
), M_TEMP
, M_WAITOK
);
3989 for (i
= 0; i
< size
; i
++, buf
+= sizeof (*ioe
)) {
3990 if (copyin(buf
, ioe
, sizeof (*ioe
))) {
3991 _FREE(table
, M_TEMP
);
3996 ioe
->anchor
[sizeof (ioe
->anchor
) - 1] = '\0';
3997 switch (ioe
->rs_num
) {
3998 case PF_RULESET_ALTQ
:
4000 case PF_RULESET_TABLE
:
4001 bzero(table
, sizeof (*table
));
4002 strlcpy(table
->pfrt_anchor
, ioe
->anchor
,
4003 sizeof (table
->pfrt_anchor
));
4004 if ((error
= pfr_ina_rollback(table
,
4005 ioe
->ticket
, NULL
, 0))) {
4006 _FREE(table
, M_TEMP
);
4008 goto fail
; /* really bad */
4012 if ((error
= pf_rollback_rules(ioe
->ticket
,
4013 ioe
->rs_num
, ioe
->anchor
))) {
4014 _FREE(table
, M_TEMP
);
4016 goto fail
; /* really bad */
4021 _FREE(table
, M_TEMP
);
4027 struct pfioc_trans_e
*ioe
;
4028 struct pfr_table
*table
;
4029 struct pf_ruleset
*rs
;
4030 user_addr_t _buf
= buf
;
4033 if (esize
!= sizeof (*ioe
)) {
4037 ioe
= _MALLOC(sizeof (*ioe
), M_TEMP
, M_WAITOK
);
4038 table
= _MALLOC(sizeof (*table
), M_TEMP
, M_WAITOK
);
4039 /* first makes sure everything will succeed */
4040 for (i
= 0; i
< size
; i
++, buf
+= sizeof (*ioe
)) {
4041 if (copyin(buf
, ioe
, sizeof (*ioe
))) {
4042 _FREE(table
, M_TEMP
);
4047 ioe
->anchor
[sizeof (ioe
->anchor
) - 1] = '\0';
4048 switch (ioe
->rs_num
) {
4049 case PF_RULESET_ALTQ
:
4051 case PF_RULESET_TABLE
:
4052 rs
= pf_find_ruleset(ioe
->anchor
);
4053 if (rs
== NULL
|| !rs
->topen
|| ioe
->ticket
!=
4055 _FREE(table
, M_TEMP
);
4062 if (ioe
->rs_num
< 0 || ioe
->rs_num
>=
4064 _FREE(table
, M_TEMP
);
4069 rs
= pf_find_ruleset(ioe
->anchor
);
4071 !rs
->rules
[ioe
->rs_num
].inactive
.open
||
4072 rs
->rules
[ioe
->rs_num
].inactive
.ticket
!=
4074 _FREE(table
, M_TEMP
);
4083 /* now do the commit - no errors should happen here */
4084 for (i
= 0; i
< size
; i
++, buf
+= sizeof (*ioe
)) {
4085 if (copyin(buf
, ioe
, sizeof (*ioe
))) {
4086 _FREE(table
, M_TEMP
);
4091 ioe
->anchor
[sizeof (ioe
->anchor
) - 1] = '\0';
4092 switch (ioe
->rs_num
) {
4093 case PF_RULESET_ALTQ
:
4095 case PF_RULESET_TABLE
:
4096 bzero(table
, sizeof (*table
));
4097 strlcpy(table
->pfrt_anchor
, ioe
->anchor
,
4098 sizeof (table
->pfrt_anchor
));
4099 if ((error
= pfr_ina_commit(table
, ioe
->ticket
,
4101 _FREE(table
, M_TEMP
);
4103 goto fail
; /* really bad */
4107 if ((error
= pf_commit_rules(ioe
->ticket
,
4108 ioe
->rs_num
, ioe
->anchor
))) {
4109 _FREE(table
, M_TEMP
);
4111 goto fail
; /* really bad */
4116 _FREE(table
, M_TEMP
);
4130 pfioctl_ioc_src_nodes(u_long cmd
, struct pfioc_src_nodes_32
*psn32
,
4131 struct pfioc_src_nodes_64
*psn64
, struct proc
*p
)
4133 int p64
= proc_is64bit(p
);
4137 case DIOCGETSRCNODES
: {
4138 struct pf_src_node
*n
, *pstore
;
4143 space
= (p64
? psn64
->psn_len
: psn32
->psn_len
);
4145 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
)
4148 size
= sizeof (struct pf_src_node
) * nr
;
4150 psn64
->psn_len
= size
;
4152 psn32
->psn_len
= size
;
4156 pstore
= _MALLOC(sizeof (*pstore
), M_TEMP
, M_WAITOK
);
4157 if (pstore
== NULL
) {
4161 buf
= (p64
? psn64
->psn_buf
: psn32
->psn_buf
);
4163 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
) {
4164 uint64_t secs
= pf_time_second(), diff
;
4166 if ((nr
+ 1) * sizeof (*pstore
) > (unsigned)space
)
4169 bcopy(n
, pstore
, sizeof (*pstore
));
4170 if (n
->rule
.ptr
!= NULL
)
4171 pstore
->rule
.nr
= n
->rule
.ptr
->nr
;
4172 pstore
->creation
= secs
- pstore
->creation
;
4173 if (pstore
->expire
> secs
)
4174 pstore
->expire
-= secs
;
4178 /* adjust the connection rate estimate */
4179 diff
= secs
- n
->conn_rate
.last
;
4180 if (diff
>= n
->conn_rate
.seconds
)
4181 pstore
->conn_rate
.count
= 0;
4183 pstore
->conn_rate
.count
-=
4184 n
->conn_rate
.count
* diff
/
4185 n
->conn_rate
.seconds
;
4187 _RB_PARENT(pstore
, entry
) = NULL
;
4188 RB_LEFT(pstore
, entry
) = RB_RIGHT(pstore
, entry
) = NULL
;
4191 error
= copyout(pstore
, buf
, sizeof (*pstore
));
4193 _FREE(pstore
, M_TEMP
);
4196 buf
+= sizeof (*pstore
);
4200 size
= sizeof (struct pf_src_node
) * nr
;
4202 psn64
->psn_len
= size
;
4204 psn32
->psn_len
= size
;
4206 _FREE(pstore
, M_TEMP
);
4220 pfioctl_ioc_src_node_kill(u_long cmd
, struct pfioc_src_node_kill
*psnk
,
4227 case DIOCKILLSRCNODES
: {
4228 struct pf_src_node
*sn
;
4232 RB_FOREACH(sn
, pf_src_tree
, &tree_src_tracking
) {
4233 if (PF_MATCHA(psnk
->psnk_src
.neg
,
4234 &psnk
->psnk_src
.addr
.v
.a
.addr
,
4235 &psnk
->psnk_src
.addr
.v
.a
.mask
,
4236 &sn
->addr
, sn
->af
) &&
4237 PF_MATCHA(psnk
->psnk_dst
.neg
,
4238 &psnk
->psnk_dst
.addr
.v
.a
.addr
,
4239 &psnk
->psnk_dst
.addr
.v
.a
.mask
,
4240 &sn
->raddr
, sn
->af
)) {
4241 /* Handle state to src_node linkage */
4242 if (sn
->states
!= 0) {
4243 RB_FOREACH(s
, pf_state_tree_id
,
4245 if (s
->src_node
== sn
)
4247 if (s
->nat_src_node
== sn
)
4248 s
->nat_src_node
= NULL
;
4258 pf_purge_expired_src_nodes();
4260 psnk
->psnk_af
= killed
;
4273 pfioctl_ioc_iface(u_long cmd
, struct pfioc_iface_32
*io32
,
4274 struct pfioc_iface_64
*io64
, struct proc
*p
)
4276 int p64
= proc_is64bit(p
);
4280 case DIOCIGETIFACES
: {
4284 buf
= (p64
? io64
->pfiio_buffer
: io32
->pfiio_buffer
);
4285 esize
= (p64
? io64
->pfiio_esize
: io32
->pfiio_esize
);
4287 /* esize must be that of the user space version of pfi_kif */
4288 if (esize
!= sizeof (struct pfi_uif
)) {
4293 io64
->pfiio_name
[sizeof (io64
->pfiio_name
) - 1] = '\0';
4295 io32
->pfiio_name
[sizeof (io32
->pfiio_name
) - 1] = '\0';
4296 error
= pfi_get_ifaces(
4297 p64
? io64
->pfiio_name
: io32
->pfiio_name
, buf
,
4298 p64
? &io64
->pfiio_size
: &io32
->pfiio_size
);
4302 case DIOCSETIFFLAG
: {
4304 io64
->pfiio_name
[sizeof (io64
->pfiio_name
) - 1] = '\0';
4306 io32
->pfiio_name
[sizeof (io32
->pfiio_name
) - 1] = '\0';
4308 error
= pfi_set_flags(
4309 p64
? io64
->pfiio_name
: io32
->pfiio_name
,
4310 p64
? io64
->pfiio_flags
: io32
->pfiio_flags
);
4314 case DIOCCLRIFFLAG
: {
4316 io64
->pfiio_name
[sizeof (io64
->pfiio_name
) - 1] = '\0';
4318 io32
->pfiio_name
[sizeof (io32
->pfiio_name
) - 1] = '\0';
4320 error
= pfi_clear_flags(
4321 p64
? io64
->pfiio_name
: io32
->pfiio_name
,
4322 p64
? io64
->pfiio_flags
: io32
->pfiio_flags
);
4335 pf_af_hook(struct ifnet
*ifp
, struct mbuf
**mppn
, struct mbuf
**mp
,
4336 unsigned int af
, int input
, struct ip_fw_args
*fwa
)
4339 struct mbuf
*nextpkt
;
4340 net_thread_marks_t marks
;
4341 struct ifnet
* pf_ifp
= ifp
;
4343 /* Always allow traffic on co-processor interfaces. */
4344 if (!intcoproc_unrestricted
&& ifp
&& IFNET_IS_INTCOPROC(ifp
))
4347 marks
= net_thread_marks_push(NET_THREAD_HELD_PF
);
4349 if (marks
!= net_thread_marks_none
) {
4350 lck_rw_lock_shared(pf_perim_lock
);
4353 lck_mtx_lock(pf_lock
);
4356 if (mppn
!= NULL
&& *mppn
!= NULL
)
4357 VERIFY(*mppn
== *mp
);
4358 if ((nextpkt
= (*mp
)->m_nextpkt
) != NULL
)
4359 (*mp
)->m_nextpkt
= NULL
;
4362 * For packets destined to locally hosted IP address
4363 * ip_output_list sets Mbuf's pkt header's rcvif to
4364 * the interface hosting the IP address.
4365 * While on the output path ifp passed to pf_af_hook
4366 * to such local communication is the loopback interface,
4367 * the input path derives ifp from mbuf packet header's
4369 * This asymmetry caues issues with PF.
4370 * To handle that case, we have a limited change here to
4371 * pass interface as loopback if packets are looped in.
4373 if (input
&& ((*mp
)->m_pkthdr
.pkt_flags
& PKTF_LOOP
)) {
4380 error
= pf_inet_hook(pf_ifp
, mp
, input
, fwa
);
4386 error
= pf_inet6_hook(pf_ifp
, mp
, input
, fwa
);
4393 /* When packet valid, link to the next packet */
4394 if (*mp
!= NULL
&& nextpkt
!= NULL
) {
4395 struct mbuf
*m
= *mp
;
4396 while (m
->m_nextpkt
!= NULL
)
4398 m
->m_nextpkt
= nextpkt
;
4400 /* Fix up linkage of previous packet in the chain */
4408 if (marks
!= net_thread_marks_none
)
4409 lck_mtx_unlock(pf_lock
);
4412 if (marks
!= net_thread_marks_none
)
4413 lck_rw_done(pf_perim_lock
);
4415 net_thread_marks_pop(marks
);
4422 pf_inet_hook(struct ifnet
*ifp
, struct mbuf
**mp
, int input
,
4423 struct ip_fw_args
*fwa
)
4425 struct mbuf
*m
= *mp
;
4426 #if BYTE_ORDER != BIG_ENDIAN
4427 struct ip
*ip
= mtod(m
, struct ip
*);
4432 * If the packet is outbound, is originated locally, is flagged for
4433 * delayed UDP/TCP checksum calculation, and is about to be processed
4434 * for an interface that doesn't support the appropriate checksum
4435 * offloading, then calculated the checksum here so that PF can adjust
4438 if (!input
&& m
->m_pkthdr
.rcvif
== NULL
) {
4439 static const int mask
= CSUM_DELAY_DATA
;
4440 const int flags
= m
->m_pkthdr
.csum_flags
&
4441 ~IF_HWASSIST_CSUM_FLAGS(ifp
->if_hwassist
);
4444 in_delayed_cksum(m
);
4445 m
->m_pkthdr
.csum_flags
&= ~mask
;
4449 #if BYTE_ORDER != BIG_ENDIAN
4453 if (pf_test_mbuf(input
? PF_IN
: PF_OUT
, ifp
, mp
, NULL
, fwa
) != PF_PASS
) {
4457 error
= EHOSTUNREACH
;
4462 #if BYTE_ORDER != BIG_ENDIAN
4465 ip
= mtod(*mp
, struct ip
*);
4477 pf_inet6_hook(struct ifnet
*ifp
, struct mbuf
**mp
, int input
,
4478 struct ip_fw_args
*fwa
)
4483 * If the packet is outbound, is originated locally, is flagged for
4484 * delayed UDP/TCP checksum calculation, and is about to be processed
4485 * for an interface that doesn't support the appropriate checksum
4486 * offloading, then calculated the checksum here so that PF can adjust
4489 if (!input
&& (*mp
)->m_pkthdr
.rcvif
== NULL
) {
4490 static const int mask
= CSUM_DELAY_IPV6_DATA
;
4491 const int flags
= (*mp
)->m_pkthdr
.csum_flags
&
4492 ~IF_HWASSIST_CSUM_FLAGS(ifp
->if_hwassist
);
4496 * Checksum offload should not have been enabled
4497 * when extension headers exist, thus 0 for optlen.
4499 in6_delayed_cksum(*mp
);
4500 (*mp
)->m_pkthdr
.csum_flags
&= ~mask
;
4504 if (pf_test6_mbuf(input
? PF_IN
: PF_OUT
, ifp
, mp
, NULL
, fwa
) != PF_PASS
) {
4508 error
= EHOSTUNREACH
;
4518 pf_ifaddr_hook(struct ifnet
*ifp
)
4520 struct pfi_kif
*kif
= ifp
->if_pf_kif
;
4523 lck_rw_lock_shared(pf_perim_lock
);
4524 lck_mtx_lock(pf_lock
);
4526 pfi_kifaddr_update(kif
);
4528 lck_mtx_unlock(pf_lock
);
4529 lck_rw_done(pf_perim_lock
);
4535 * Caller acquires dlil lock as writer (exclusive)
4538 pf_ifnet_hook(struct ifnet
*ifp
, int attach
)
4540 lck_rw_lock_shared(pf_perim_lock
);
4541 lck_mtx_lock(pf_lock
);
4543 pfi_attach_ifnet(ifp
);
4545 pfi_detach_ifnet(ifp
);
4546 lck_mtx_unlock(pf_lock
);
4547 lck_rw_done(pf_perim_lock
);
4551 pf_attach_hooks(void)
4553 ifnet_head_lock_shared();
4555 * Check against ifnet_addrs[] before proceeding, in case this
4556 * is called very early on, e.g. during dlil_init() before any
4557 * network interface is attached.
4559 if (ifnet_addrs
!= NULL
) {
4562 for (i
= 0; i
<= if_index
; i
++) {
4563 struct ifnet
*ifp
= ifindex2ifnet
[i
];
4565 pfi_attach_ifnet(ifp
);
4573 /* currently unused along with pfdetach() */
4575 pf_detach_hooks(void)
4577 ifnet_head_lock_shared();
4578 if (ifnet_addrs
!= NULL
) {
4579 for (i
= 0; i
<= if_index
; i
++) {
4582 struct ifnet
*ifp
= ifindex2ifnet
[i
];
4583 if (ifp
!= NULL
&& ifp
->if_pf_kif
!= NULL
) {
4584 pfi_detach_ifnet(ifp
);
4595 * The switch statement below does nothing at runtime, as it serves as a
4596 * compile time check to ensure that all of the socket 'D' ioctls (those
4597 * in the 'D' group going thru soo_ioctl) that are made available by the
4598 * networking stack is unique. This works as long as this routine gets
4599 * updated each time a new interface ioctl gets added.
4601 * Any failures at compile time indicates duplicated ioctl values.
4603 static __attribute__((unused
)) void
4604 pfioctl_cassert(void)
4607 * This is equivalent to _CASSERT() and the compiler wouldn't
4608 * generate any instructions, thus for compile time only.
4610 switch ((u_long
)0) {
4613 /* bsd/net/pfvar.h */
4617 case DIOCGETSTARTERS
:
4624 case DIOCSETSTATUSIF
:
4630 case DIOCCHANGERULE
:
4631 case DIOCINSERTRULE
:
4632 case DIOCDELETERULE
:
4633 case DIOCSETTIMEOUT
:
4634 case DIOCGETTIMEOUT
:
4636 case DIOCCLRRULECTRS
:
4639 case DIOCKILLSTATES
:
4645 case DIOCCHANGEALTQ
:
4647 case DIOCBEGINADDRS
:
4651 case DIOCCHANGEADDR
:
4652 case DIOCGETRULESETS
:
4653 case DIOCGETRULESET
:
4654 case DIOCRCLRTABLES
:
4655 case DIOCRADDTABLES
:
4656 case DIOCRDELTABLES
:
4657 case DIOCRGETTABLES
:
4658 case DIOCRGETTSTATS
:
4659 case DIOCRCLRTSTATS
:
4665 case DIOCRGETASTATS
:
4666 case DIOCRCLRASTATS
:
4668 case DIOCRSETTFLAGS
:
4669 case DIOCRINADEFINE
:
4676 case DIOCGETSRCNODES
:
4677 case DIOCCLRSRCNODES
:
4679 case DIOCIGETIFACES
:
4682 case DIOCKILLSRCNODES
: