2 * Copyright (c) 2007-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */
30 /* $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * - Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * - Redistributions in binary form must reproduce the above
44 * copyright notice, this list of conditions and the following
45 * disclaimer in the documentation and/or other materials provided
46 * with the distribution.
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
71 #include <sys/filio.h>
72 #include <sys/fcntl.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/kernel.h>
77 #include <sys/proc_internal.h>
78 #include <sys/malloc.h>
79 #include <sys/kauth.h>
81 #include <sys/mcache.h>
82 #include <sys/queue.h>
84 #include <mach/vm_param.h>
88 #include <net/if_types.h>
89 #include <net/route.h>
91 #include <netinet/in.h>
92 #include <netinet/in_var.h>
93 #include <netinet/in_systm.h>
94 #include <netinet/ip.h>
95 #include <netinet/ip_var.h>
96 #include <netinet/ip_icmp.h>
97 #include <netinet/if_ether.h>
100 #include <netinet/ip_dummynet.h>
103 #endif /* DUMMYNET */
105 #include <libkern/crypto/md5.h>
107 #include <machine/machine_routines.h>
109 #include <miscfs/devfs/devfs.h>
111 #include <net/pfvar.h>
114 #include <net/if_pfsync.h>
118 #include <net/if_pflog.h>
122 #include <netinet/ip6.h>
123 #include <netinet/in_pcb.h>
127 #include <net/altq/altq.h>
128 #include <net/altq/altq_cbq.h>
129 #include <net/classq/classq_red.h>
130 #include <net/classq/classq_rio.h>
131 #include <net/classq/classq_blue.h>
132 #include <net/classq/classq_sfb.h>
135 #include <dev/random/randomdev.h>
138 static void pfdetach(void);
140 static int pfopen(dev_t
, int, int, struct proc
*);
141 static int pfclose(dev_t
, int, int, struct proc
*);
142 static int pfioctl(dev_t
, u_long
, caddr_t
, int, struct proc
*);
143 static int pfioctl_ioc_table(u_long
, struct pfioc_table_32
*,
144 struct pfioc_table_64
*, struct proc
*);
145 static int pfioctl_ioc_tokens(u_long
, struct pfioc_tokens_32
*,
146 struct pfioc_tokens_64
*, struct proc
*);
147 static int pfioctl_ioc_rule(u_long
, int, struct pfioc_rule
*, struct proc
*);
148 static int pfioctl_ioc_state_kill(u_long
, struct pfioc_state_kill
*,
150 static int pfioctl_ioc_state(u_long
, struct pfioc_state
*, struct proc
*);
151 static int pfioctl_ioc_states(u_long
, struct pfioc_states_32
*,
152 struct pfioc_states_64
*, struct proc
*);
153 static int pfioctl_ioc_natlook(u_long
, struct pfioc_natlook
*, struct proc
*);
154 static int pfioctl_ioc_tm(u_long
, struct pfioc_tm
*, struct proc
*);
155 static int pfioctl_ioc_limit(u_long
, struct pfioc_limit
*, struct proc
*);
156 static int pfioctl_ioc_pooladdr(u_long
, struct pfioc_pooladdr
*, struct proc
*);
157 static int pfioctl_ioc_ruleset(u_long
, struct pfioc_ruleset
*, struct proc
*);
158 static int pfioctl_ioc_trans(u_long
, struct pfioc_trans_32
*,
159 struct pfioc_trans_64
*, struct proc
*);
160 static int pfioctl_ioc_src_nodes(u_long
, struct pfioc_src_nodes_32
*,
161 struct pfioc_src_nodes_64
*, struct proc
*);
162 static int pfioctl_ioc_src_node_kill(u_long
, struct pfioc_src_node_kill
*,
164 static int pfioctl_ioc_iface(u_long
, struct pfioc_iface_32
*,
165 struct pfioc_iface_64
*, struct proc
*);
166 static struct pf_pool
*pf_get_pool(char *, u_int32_t
, u_int8_t
, u_int32_t
,
167 u_int8_t
, u_int8_t
, u_int8_t
);
168 static void pf_mv_pool(struct pf_palist
*, struct pf_palist
*);
169 static void pf_empty_pool(struct pf_palist
*);
171 static int pf_begin_altq(u_int32_t
*);
172 static int pf_rollback_altq(u_int32_t
);
173 static int pf_commit_altq(u_int32_t
);
174 static int pf_enable_altq(struct pf_altq
*);
175 static int pf_disable_altq(struct pf_altq
*);
176 static void pf_altq_copyin(struct pf_altq
*, struct pf_altq
*);
177 static void pf_altq_copyout(struct pf_altq
*, struct pf_altq
*);
179 static int pf_begin_rules(u_int32_t
*, int, const char *);
180 static int pf_rollback_rules(u_int32_t
, int, char *);
181 static int pf_setup_pfsync_matching(struct pf_ruleset
*);
182 static void pf_hash_rule(MD5_CTX
*, struct pf_rule
*);
183 static void pf_hash_rule_addr(MD5_CTX
*, struct pf_rule_addr
*, u_int8_t
);
184 static int pf_commit_rules(u_int32_t
, int, char *);
185 static void pf_rule_copyin(struct pf_rule
*, struct pf_rule
*, struct proc
*,
187 static void pf_rule_copyout(struct pf_rule
*, struct pf_rule
*);
188 static void pf_state_export(struct pfsync_state
*, struct pf_state_key
*,
190 static void pf_state_import(struct pfsync_state
*, struct pf_state_key
*,
192 static void pf_pooladdr_copyin(struct pf_pooladdr
*, struct pf_pooladdr
*);
193 static void pf_pooladdr_copyout(struct pf_pooladdr
*, struct pf_pooladdr
*);
194 static void pf_expire_states_and_src_nodes(struct pf_rule
*);
195 static void pf_delete_rule_from_ruleset(struct pf_ruleset
*,
196 int, struct pf_rule
*);
197 static void pf_addrwrap_setup(struct pf_addr_wrap
*);
198 static int pf_rule_setup(struct pfioc_rule
*, struct pf_rule
*,
199 struct pf_ruleset
*);
200 static void pf_delete_rule_by_owner(char *, u_int32_t
);
201 static int pf_delete_rule_by_ticket(struct pfioc_rule
*, u_int32_t
);
202 static void pf_ruleset_cleanup(struct pf_ruleset
*, int);
203 static void pf_deleterule_anchor_step_out(struct pf_ruleset
**,
204 int, struct pf_rule
**);
206 #define PF_CDEV_MAJOR (-1)
208 static struct cdevsw pf_cdevsw
= {
211 /* read */ eno_rdwrt
,
212 /* write */ eno_rdwrt
,
215 /* reset */ eno_reset
,
217 /* select */ eno_select
,
219 /* strategy */ eno_strat
,
225 static void pf_attach_hooks(void);
227 /* currently unused along with pfdetach() */
228 static void pf_detach_hooks(void);
232 * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer,
233 * and used in pf_af_hook() for performance optimization, such that packets
234 * will enter pf_test() or pf_test6() only when PF is running.
236 int pf_is_enabled
= 0;
239 u_int32_t altq_allowed
= 0;
242 u_int32_t pf_hash_seed
;
243 int16_t pf_nat64_configured
= 0;
246 * These are the pf enabled reference counting variables
248 static u_int64_t pf_enabled_ref_count
;
249 static u_int32_t nr_tokens
= 0;
250 static u_int64_t pffwrules
;
251 static u_int32_t pfdevcnt
;
253 SLIST_HEAD(list_head
, pfioc_kernel_token
);
254 static struct list_head token_list_head
;
256 struct pf_rule pf_default_rule
;
258 static int pf_altq_running
;
261 #define TAGID_MAX 50000
263 static TAILQ_HEAD(pf_tags
, pf_tagname
) pf_tags
=
264 TAILQ_HEAD_INITIALIZER(pf_tags
);
266 static TAILQ_HEAD(pf_tags
, pf_tagname
)
267 pf_tags
= TAILQ_HEAD_INITIALIZER(pf_tags
),
268 pf_qids
= TAILQ_HEAD_INITIALIZER(pf_qids
);
271 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
272 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
274 static u_int16_t
tagname2tag(struct pf_tags
*, char *);
275 static void tag2tagname(struct pf_tags
*, u_int16_t
, char *);
276 static void tag_unref(struct pf_tags
*, u_int16_t
);
277 static int pf_rtlabel_add(struct pf_addr_wrap
*);
278 static void pf_rtlabel_remove(struct pf_addr_wrap
*);
279 static void pf_rtlabel_copyout(struct pf_addr_wrap
*);
282 static int pf_inet_hook(struct ifnet
*, struct mbuf
**, int,
283 struct ip_fw_args
*);
286 static int pf_inet6_hook(struct ifnet
*, struct mbuf
**, int,
287 struct ip_fw_args
*);
290 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
293 * Helper macros for ioctl structures which vary in size (32-bit vs. 64-bit)
295 #define PFIOCX_STRUCT_DECL(s) \
298 struct s##_32 _s##_32; \
299 struct s##_64 _s##_64; \
303 #define PFIOCX_STRUCT_BEGIN(a, s, _action) { \
304 VERIFY(s##_un == NULL); \
305 s##_un = _MALLOC(sizeof (*s##_un), M_TEMP, M_WAITOK|M_ZERO); \
306 if (s##_un == NULL) { \
310 bcopy(a, &s##_un->_u._s##_64, \
311 sizeof (struct s##_64)); \
313 bcopy(a, &s##_un->_u._s##_32, \
314 sizeof (struct s##_32)); \
318 #define PFIOCX_STRUCT_END(s, a) { \
319 VERIFY(s##_un != NULL); \
321 bcopy(&s##_un->_u._s##_64, a, sizeof (struct s##_64)); \
323 bcopy(&s##_un->_u._s##_32, a, sizeof (struct s##_32)); \
324 _FREE(s##_un, M_TEMP); \
328 #define PFIOCX_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
329 #define PFIOCX_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
332 * Helper macros for regular ioctl structures.
334 #define PFIOC_STRUCT_BEGIN(a, v, _action) { \
335 VERIFY((v) == NULL); \
336 (v) = _MALLOC(sizeof (*(v)), M_TEMP, M_WAITOK|M_ZERO); \
340 bcopy(a, v, sizeof (*(v))); \
344 #define PFIOC_STRUCT_END(v, a) { \
345 VERIFY((v) != NULL); \
346 bcopy(v, a, sizeof (*(v))); \
351 #define PFIOC_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
352 #define PFIOC_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
354 static lck_attr_t
*pf_perim_lock_attr
;
355 static lck_grp_t
*pf_perim_lock_grp
;
356 static lck_grp_attr_t
*pf_perim_lock_grp_attr
;
358 static lck_attr_t
*pf_lock_attr
;
359 static lck_grp_t
*pf_lock_grp
;
360 static lck_grp_attr_t
*pf_lock_grp_attr
;
362 struct thread
*pf_purge_thread
;
364 extern void pfi_kifaddr_update(void *);
366 /* pf enable ref-counting helper functions */
367 static u_int64_t
generate_token(struct proc
*);
368 static int remove_token(struct pfioc_remove_token
*);
369 static void invalidate_all_tokens(void);
372 generate_token(struct proc
*p
)
374 u_int64_t token_value
;
375 struct pfioc_kernel_token
*new_token
;
377 new_token
= _MALLOC(sizeof (struct pfioc_kernel_token
), M_TEMP
,
380 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
382 if (new_token
== NULL
) {
383 /* malloc failed! bail! */
384 printf("%s: unable to allocate pf token structure!", __func__
);
388 token_value
= VM_KERNEL_ADDRPERM((u_int64_t
)(uintptr_t)new_token
);
390 new_token
->token
.token_value
= token_value
;
391 new_token
->token
.pid
= proc_pid(p
);
392 proc_name(new_token
->token
.pid
, new_token
->token
.proc_name
,
393 sizeof (new_token
->token
.proc_name
));
394 new_token
->token
.timestamp
= pf_calendar_time_second();
396 SLIST_INSERT_HEAD(&token_list_head
, new_token
, next
);
399 return (token_value
);
403 remove_token(struct pfioc_remove_token
*tok
)
405 struct pfioc_kernel_token
*entry
, *tmp
;
407 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
409 SLIST_FOREACH_SAFE(entry
, &token_list_head
, next
, tmp
) {
410 if (tok
->token_value
== entry
->token
.token_value
) {
411 SLIST_REMOVE(&token_list_head
, entry
,
412 pfioc_kernel_token
, next
);
413 _FREE(entry
, M_TEMP
);
415 return (0); /* success */
419 printf("pf : remove failure\n");
420 return (ESRCH
); /* failure */
424 invalidate_all_tokens(void)
426 struct pfioc_kernel_token
*entry
, *tmp
;
428 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
430 SLIST_FOREACH_SAFE(entry
, &token_list_head
, next
, tmp
) {
431 SLIST_REMOVE(&token_list_head
, entry
, pfioc_kernel_token
, next
);
432 _FREE(entry
, M_TEMP
);
441 u_int32_t
*t
= pf_default_rule
.timeout
;
444 pf_perim_lock_grp_attr
= lck_grp_attr_alloc_init();
445 pf_perim_lock_grp
= lck_grp_alloc_init("pf_perim",
446 pf_perim_lock_grp_attr
);
447 pf_perim_lock_attr
= lck_attr_alloc_init();
448 lck_rw_init(pf_perim_lock
, pf_perim_lock_grp
, pf_perim_lock_attr
);
450 pf_lock_grp_attr
= lck_grp_attr_alloc_init();
451 pf_lock_grp
= lck_grp_alloc_init("pf", pf_lock_grp_attr
);
452 pf_lock_attr
= lck_attr_alloc_init();
453 lck_mtx_init(pf_lock
, pf_lock_grp
, pf_lock_attr
);
455 pool_init(&pf_rule_pl
, sizeof (struct pf_rule
), 0, 0, 0, "pfrulepl",
457 pool_init(&pf_src_tree_pl
, sizeof (struct pf_src_node
), 0, 0, 0,
459 pool_init(&pf_state_pl
, sizeof (struct pf_state
), 0, 0, 0, "pfstatepl",
461 pool_init(&pf_state_key_pl
, sizeof (struct pf_state_key
), 0, 0, 0,
462 "pfstatekeypl", NULL
);
463 pool_init(&pf_app_state_pl
, sizeof (struct pf_app_state
), 0, 0, 0,
464 "pfappstatepl", NULL
);
466 pool_init(&pf_altq_pl
, sizeof (struct pf_altq
), 0, 0, 0, "pfaltqpl",
469 pool_init(&pf_pooladdr_pl
, sizeof (struct pf_pooladdr
), 0, 0, 0,
470 "pfpooladdrpl", NULL
);
473 pf_osfp_initialize();
475 pool_sethardlimit(pf_pool_limits
[PF_LIMIT_STATES
].pp
,
476 pf_pool_limits
[PF_LIMIT_STATES
].limit
, NULL
, 0);
478 if (max_mem
<= 256*1024*1024)
479 pf_pool_limits
[PF_LIMIT_TABLE_ENTRIES
].limit
=
480 PFR_KENTRY_HIWAT_SMALL
;
482 RB_INIT(&tree_src_tracking
);
483 RB_INIT(&pf_anchors
);
484 pf_init_ruleset(&pf_main_ruleset
);
485 TAILQ_INIT(&pf_pabuf
);
486 TAILQ_INIT(&state_list
);
488 TAILQ_INIT(&pf_altqs
[0]);
489 TAILQ_INIT(&pf_altqs
[1]);
490 pf_altqs_active
= &pf_altqs
[0];
491 pf_altqs_inactive
= &pf_altqs
[1];
493 PE_parse_boot_argn("altq", &altq_allowed
, sizeof (altq_allowed
));
495 _CASSERT(ALTRQ_PURGE
== CLASSQRQ_PURGE
);
496 _CASSERT(ALTRQ_PURGE_SC
== CLASSQRQ_PURGE_SC
);
497 _CASSERT(ALTRQ_EVENT
== CLASSQRQ_EVENT
);
499 _CASSERT(ALTDQ_REMOVE
== CLASSQDQ_REMOVE
);
500 _CASSERT(ALTDQ_POLL
== CLASSQDQ_POLL
);
503 _CASSERT((SC_BE
& SCIDX_MASK
) == SCIDX_BE
);
504 _CASSERT((SC_BK_SYS
& SCIDX_MASK
) == SCIDX_BK_SYS
);
505 _CASSERT((SC_BK
& SCIDX_MASK
) == SCIDX_BK
);
506 _CASSERT((SC_RD
& SCIDX_MASK
) == SCIDX_RD
);
507 _CASSERT((SC_OAM
& SCIDX_MASK
) == SCIDX_OAM
);
508 _CASSERT((SC_AV
& SCIDX_MASK
) == SCIDX_AV
);
509 _CASSERT((SC_RV
& SCIDX_MASK
) == SCIDX_RV
);
510 _CASSERT((SC_VI
& SCIDX_MASK
) == SCIDX_VI
);
511 _CASSERT((SC_VO
& SCIDX_MASK
) == SCIDX_VO
);
512 _CASSERT((SC_CTL
& SCIDX_MASK
) == SCIDX_CTL
);
514 /* default rule should never be garbage collected */
515 pf_default_rule
.entries
.tqe_prev
= &pf_default_rule
.entries
.tqe_next
;
516 pf_default_rule
.action
= PF_PASS
;
517 pf_default_rule
.nr
= -1;
518 pf_default_rule
.rtableid
= IFSCOPE_NONE
;
520 /* initialize default timeouts */
521 t
[PFTM_TCP_FIRST_PACKET
] = PFTM_TCP_FIRST_PACKET_VAL
;
522 t
[PFTM_TCP_OPENING
] = PFTM_TCP_OPENING_VAL
;
523 t
[PFTM_TCP_ESTABLISHED
] = PFTM_TCP_ESTABLISHED_VAL
;
524 t
[PFTM_TCP_CLOSING
] = PFTM_TCP_CLOSING_VAL
;
525 t
[PFTM_TCP_FIN_WAIT
] = PFTM_TCP_FIN_WAIT_VAL
;
526 t
[PFTM_TCP_CLOSED
] = PFTM_TCP_CLOSED_VAL
;
527 t
[PFTM_UDP_FIRST_PACKET
] = PFTM_UDP_FIRST_PACKET_VAL
;
528 t
[PFTM_UDP_SINGLE
] = PFTM_UDP_SINGLE_VAL
;
529 t
[PFTM_UDP_MULTIPLE
] = PFTM_UDP_MULTIPLE_VAL
;
530 t
[PFTM_ICMP_FIRST_PACKET
] = PFTM_ICMP_FIRST_PACKET_VAL
;
531 t
[PFTM_ICMP_ERROR_REPLY
] = PFTM_ICMP_ERROR_REPLY_VAL
;
532 t
[PFTM_GREv1_FIRST_PACKET
] = PFTM_GREv1_FIRST_PACKET_VAL
;
533 t
[PFTM_GREv1_INITIATING
] = PFTM_GREv1_INITIATING_VAL
;
534 t
[PFTM_GREv1_ESTABLISHED
] = PFTM_GREv1_ESTABLISHED_VAL
;
535 t
[PFTM_ESP_FIRST_PACKET
] = PFTM_ESP_FIRST_PACKET_VAL
;
536 t
[PFTM_ESP_INITIATING
] = PFTM_ESP_INITIATING_VAL
;
537 t
[PFTM_ESP_ESTABLISHED
] = PFTM_ESP_ESTABLISHED_VAL
;
538 t
[PFTM_OTHER_FIRST_PACKET
] = PFTM_OTHER_FIRST_PACKET_VAL
;
539 t
[PFTM_OTHER_SINGLE
] = PFTM_OTHER_SINGLE_VAL
;
540 t
[PFTM_OTHER_MULTIPLE
] = PFTM_OTHER_MULTIPLE_VAL
;
541 t
[PFTM_FRAG
] = PFTM_FRAG_VAL
;
542 t
[PFTM_INTERVAL
] = PFTM_INTERVAL_VAL
;
543 t
[PFTM_SRC_NODE
] = PFTM_SRC_NODE_VAL
;
544 t
[PFTM_TS_DIFF
] = PFTM_TS_DIFF_VAL
;
545 t
[PFTM_ADAPTIVE_START
] = PFSTATE_ADAPT_START
;
546 t
[PFTM_ADAPTIVE_END
] = PFSTATE_ADAPT_END
;
549 bzero(&pf_status
, sizeof (pf_status
));
550 pf_status
.debug
= PF_DEBUG_URGENT
;
551 pf_hash_seed
= RandomULong();
553 /* XXX do our best to avoid a conflict */
554 pf_status
.hostid
= random();
556 if (kernel_thread_start(pf_purge_thread_fn
, NULL
,
557 &pf_purge_thread
) != 0) {
558 printf("%s: unable to start purge thread!", __func__
);
562 maj
= cdevsw_add(PF_CDEV_MAJOR
, &pf_cdevsw
);
564 printf("%s: failed to allocate major number!\n", __func__
);
567 (void) devfs_make_node(makedev(maj
, PFDEV_PF
), DEVFS_CHAR
,
568 UID_ROOT
, GID_WHEEL
, 0600, "pf", 0);
570 (void) devfs_make_node(makedev(maj
, PFDEV_PFM
), DEVFS_CHAR
,
571 UID_ROOT
, GID_WHEEL
, 0600, "pfm", 0);
580 struct pf_anchor
*anchor
;
581 struct pf_state
*state
;
582 struct pf_src_node
*node
;
583 struct pfioc_table pt
;
590 pf_status
.running
= 0;
591 wakeup(pf_purge_thread_fn
);
593 /* clear the rulesets */
594 for (i
= 0; i
< PF_RULESET_MAX
; i
++)
595 if (pf_begin_rules(&ticket
, i
, &r
) == 0)
596 pf_commit_rules(ticket
, i
, &r
);
598 if (pf_begin_altq(&ticket
) == 0)
599 pf_commit_altq(ticket
);
603 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
604 state
->timeout
= PFTM_PURGE
;
606 state
->sync_flags
= PFSTATE_NOSYNC
;
609 pf_purge_expired_states(pf_status
.states
);
612 pfsync_clear_states(pf_status
.hostid
, NULL
);
615 /* clear source nodes */
616 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
617 state
->src_node
= NULL
;
618 state
->nat_src_node
= NULL
;
620 RB_FOREACH(node
, pf_src_tree
, &tree_src_tracking
) {
624 pf_purge_expired_src_nodes();
627 memset(&pt
, '\0', sizeof (pt
));
628 pfr_clr_tables(&pt
.pfrio_table
, &pt
.pfrio_ndel
, pt
.pfrio_flags
);
630 /* destroy anchors */
631 while ((anchor
= RB_MIN(pf_anchor_global
, &pf_anchors
)) != NULL
) {
632 for (i
= 0; i
< PF_RULESET_MAX
; i
++)
633 if (pf_begin_rules(&ticket
, i
, anchor
->name
) == 0)
634 pf_commit_rules(ticket
, i
, anchor
->name
);
637 /* destroy main ruleset */
638 pf_remove_if_empty_ruleset(&pf_main_ruleset
);
640 /* destroy the pools */
641 pool_destroy(&pf_pooladdr_pl
);
643 pool_destroy(&pf_altq_pl
);
645 pool_destroy(&pf_state_pl
);
646 pool_destroy(&pf_rule_pl
);
647 pool_destroy(&pf_src_tree_pl
);
649 /* destroy subsystems */
650 pf_normalize_destroy();
658 pfopen(dev_t dev
, int flags
, int fmt
, struct proc
*p
)
660 #pragma unused(flags, fmt, p)
661 if (minor(dev
) >= PFDEV_MAX
)
664 if (minor(dev
) == PFDEV_PFM
) {
665 lck_mtx_lock(pf_lock
);
667 lck_mtx_unlock(pf_lock
);
671 lck_mtx_unlock(pf_lock
);
677 pfclose(dev_t dev
, int flags
, int fmt
, struct proc
*p
)
679 #pragma unused(flags, fmt, p)
680 if (minor(dev
) >= PFDEV_MAX
)
683 if (minor(dev
) == PFDEV_PFM
) {
684 lck_mtx_lock(pf_lock
);
685 VERIFY(pfdevcnt
> 0);
687 lck_mtx_unlock(pf_lock
);
692 static struct pf_pool
*
693 pf_get_pool(char *anchor
, u_int32_t ticket
, u_int8_t rule_action
,
694 u_int32_t rule_number
, u_int8_t r_last
, u_int8_t active
,
695 u_int8_t check_ticket
)
697 struct pf_ruleset
*ruleset
;
698 struct pf_rule
*rule
;
701 ruleset
= pf_find_ruleset(anchor
);
704 rs_num
= pf_get_ruleset_number(rule_action
);
705 if (rs_num
>= PF_RULESET_MAX
)
708 if (check_ticket
&& ticket
!=
709 ruleset
->rules
[rs_num
].active
.ticket
)
712 rule
= TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
,
715 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
717 if (check_ticket
&& ticket
!=
718 ruleset
->rules
[rs_num
].inactive
.ticket
)
721 rule
= TAILQ_LAST(ruleset
->rules
[rs_num
].inactive
.ptr
,
724 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].inactive
.ptr
);
727 while ((rule
!= NULL
) && (rule
->nr
!= rule_number
))
728 rule
= TAILQ_NEXT(rule
, entries
);
733 return (&rule
->rpool
);
737 pf_mv_pool(struct pf_palist
*poola
, struct pf_palist
*poolb
)
739 struct pf_pooladdr
*mv_pool_pa
;
741 while ((mv_pool_pa
= TAILQ_FIRST(poola
)) != NULL
) {
742 TAILQ_REMOVE(poola
, mv_pool_pa
, entries
);
743 TAILQ_INSERT_TAIL(poolb
, mv_pool_pa
, entries
);
748 pf_empty_pool(struct pf_palist
*poola
)
750 struct pf_pooladdr
*empty_pool_pa
;
752 while ((empty_pool_pa
= TAILQ_FIRST(poola
)) != NULL
) {
753 pfi_dynaddr_remove(&empty_pool_pa
->addr
);
754 pf_tbladdr_remove(&empty_pool_pa
->addr
);
755 pfi_kif_unref(empty_pool_pa
->kif
, PFI_KIF_REF_RULE
);
756 TAILQ_REMOVE(poola
, empty_pool_pa
, entries
);
757 pool_put(&pf_pooladdr_pl
, empty_pool_pa
);
762 pf_rm_rule(struct pf_rulequeue
*rulequeue
, struct pf_rule
*rule
)
764 if (rulequeue
!= NULL
) {
765 if (rule
->states
<= 0) {
767 * XXX - we need to remove the table *before* detaching
768 * the rule to make sure the table code does not delete
769 * the anchor under our feet.
771 pf_tbladdr_remove(&rule
->src
.addr
);
772 pf_tbladdr_remove(&rule
->dst
.addr
);
773 if (rule
->overload_tbl
)
774 pfr_detach_table(rule
->overload_tbl
);
776 TAILQ_REMOVE(rulequeue
, rule
, entries
);
777 rule
->entries
.tqe_prev
= NULL
;
781 if (rule
->states
> 0 || rule
->src_nodes
> 0 ||
782 rule
->entries
.tqe_prev
!= NULL
)
784 pf_tag_unref(rule
->tag
);
785 pf_tag_unref(rule
->match_tag
);
788 if (rule
->pqid
!= rule
->qid
)
789 pf_qid_unref(rule
->pqid
);
790 pf_qid_unref(rule
->qid
);
793 pf_rtlabel_remove(&rule
->src
.addr
);
794 pf_rtlabel_remove(&rule
->dst
.addr
);
795 pfi_dynaddr_remove(&rule
->src
.addr
);
796 pfi_dynaddr_remove(&rule
->dst
.addr
);
797 if (rulequeue
== NULL
) {
798 pf_tbladdr_remove(&rule
->src
.addr
);
799 pf_tbladdr_remove(&rule
->dst
.addr
);
800 if (rule
->overload_tbl
)
801 pfr_detach_table(rule
->overload_tbl
);
803 pfi_kif_unref(rule
->kif
, PFI_KIF_REF_RULE
);
804 pf_anchor_remove(rule
);
805 pf_empty_pool(&rule
->rpool
.list
);
806 pool_put(&pf_rule_pl
, rule
);
810 tagname2tag(struct pf_tags
*head
, char *tagname
)
812 struct pf_tagname
*tag
, *p
= NULL
;
813 u_int16_t new_tagid
= 1;
815 TAILQ_FOREACH(tag
, head
, entries
)
816 if (strcmp(tagname
, tag
->name
) == 0) {
822 * to avoid fragmentation, we do a linear search from the beginning
823 * and take the first free slot we find. if there is none or the list
824 * is empty, append a new entry at the end.
828 if (!TAILQ_EMPTY(head
))
829 for (p
= TAILQ_FIRST(head
); p
!= NULL
&&
830 p
->tag
== new_tagid
; p
= TAILQ_NEXT(p
, entries
))
831 new_tagid
= p
->tag
+ 1;
833 if (new_tagid
> TAGID_MAX
)
836 /* allocate and fill new struct pf_tagname */
837 tag
= _MALLOC(sizeof (*tag
), M_TEMP
, M_WAITOK
|M_ZERO
);
840 strlcpy(tag
->name
, tagname
, sizeof (tag
->name
));
841 tag
->tag
= new_tagid
;
844 if (p
!= NULL
) /* insert new entry before p */
845 TAILQ_INSERT_BEFORE(p
, tag
, entries
);
846 else /* either list empty or no free slot in between */
847 TAILQ_INSERT_TAIL(head
, tag
, entries
);
853 tag2tagname(struct pf_tags
*head
, u_int16_t tagid
, char *p
)
855 struct pf_tagname
*tag
;
857 TAILQ_FOREACH(tag
, head
, entries
)
858 if (tag
->tag
== tagid
) {
859 strlcpy(p
, tag
->name
, PF_TAG_NAME_SIZE
);
865 tag_unref(struct pf_tags
*head
, u_int16_t tag
)
867 struct pf_tagname
*p
, *next
;
872 for (p
= TAILQ_FIRST(head
); p
!= NULL
; p
= next
) {
873 next
= TAILQ_NEXT(p
, entries
);
876 TAILQ_REMOVE(head
, p
, entries
);
885 pf_tagname2tag(char *tagname
)
887 return (tagname2tag(&pf_tags
, tagname
));
891 pf_tag2tagname(u_int16_t tagid
, char *p
)
893 tag2tagname(&pf_tags
, tagid
, p
);
897 pf_tag_ref(u_int16_t tag
)
899 struct pf_tagname
*t
;
901 TAILQ_FOREACH(t
, &pf_tags
, entries
)
909 pf_tag_unref(u_int16_t tag
)
911 tag_unref(&pf_tags
, tag
);
915 pf_rtlabel_add(struct pf_addr_wrap
*a
)
922 pf_rtlabel_remove(struct pf_addr_wrap
*a
)
928 pf_rtlabel_copyout(struct pf_addr_wrap
*a
)
935 pf_qname2qid(char *qname
)
937 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
939 return ((u_int32_t
)tagname2tag(&pf_qids
, qname
));
943 pf_qid2qname(u_int32_t qid
, char *p
)
945 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
947 tag2tagname(&pf_qids
, (u_int16_t
)qid
, p
);
951 pf_qid_unref(u_int32_t qid
)
953 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
955 tag_unref(&pf_qids
, (u_int16_t
)qid
);
959 pf_begin_altq(u_int32_t
*ticket
)
961 struct pf_altq
*altq
;
964 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
966 /* Purge the old altq list */
967 while ((altq
= TAILQ_FIRST(pf_altqs_inactive
)) != NULL
) {
968 TAILQ_REMOVE(pf_altqs_inactive
, altq
, entries
);
969 if (altq
->qname
[0] == '\0') {
970 /* detach and destroy the discipline */
971 error
= altq_remove(altq
);
973 pf_qid_unref(altq
->qid
);
974 pool_put(&pf_altq_pl
, altq
);
978 *ticket
= ++ticket_altqs_inactive
;
979 altqs_inactive_open
= 1;
984 pf_rollback_altq(u_int32_t ticket
)
986 struct pf_altq
*altq
;
989 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
991 if (!altqs_inactive_open
|| ticket
!= ticket_altqs_inactive
)
993 /* Purge the old altq list */
994 while ((altq
= TAILQ_FIRST(pf_altqs_inactive
)) != NULL
) {
995 TAILQ_REMOVE(pf_altqs_inactive
, altq
, entries
);
996 if (altq
->qname
[0] == '\0') {
997 /* detach and destroy the discipline */
998 error
= altq_remove(altq
);
1000 pf_qid_unref(altq
->qid
);
1001 pool_put(&pf_altq_pl
, altq
);
1003 altqs_inactive_open
= 0;
1008 pf_commit_altq(u_int32_t ticket
)
1010 struct pf_altqqueue
*old_altqs
;
1011 struct pf_altq
*altq
;
1014 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1016 if (!altqs_inactive_open
|| ticket
!= ticket_altqs_inactive
)
1019 /* swap altqs, keep the old. */
1020 old_altqs
= pf_altqs_active
;
1021 pf_altqs_active
= pf_altqs_inactive
;
1022 pf_altqs_inactive
= old_altqs
;
1023 ticket_altqs_active
= ticket_altqs_inactive
;
1025 /* Attach new disciplines */
1026 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
) {
1027 if (altq
->qname
[0] == '\0') {
1028 /* attach the discipline */
1029 error
= altq_pfattach(altq
);
1030 if (error
== 0 && pf_altq_running
)
1031 error
= pf_enable_altq(altq
);
1038 /* Purge the old altq list */
1039 while ((altq
= TAILQ_FIRST(pf_altqs_inactive
)) != NULL
) {
1040 TAILQ_REMOVE(pf_altqs_inactive
, altq
, entries
);
1041 if (altq
->qname
[0] == '\0') {
1042 /* detach and destroy the discipline */
1043 if (pf_altq_running
)
1044 error
= pf_disable_altq(altq
);
1045 err
= altq_pfdetach(altq
);
1046 if (err
!= 0 && error
== 0)
1048 err
= altq_remove(altq
);
1049 if (err
!= 0 && error
== 0)
1052 pf_qid_unref(altq
->qid
);
1053 pool_put(&pf_altq_pl
, altq
);
1056 altqs_inactive_open
= 0;
1061 pf_enable_altq(struct pf_altq
*altq
)
1064 struct ifclassq
*ifq
;
1067 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1069 if ((ifp
= ifunit(altq
->ifname
)) == NULL
)
1074 if (IFCQ_ALTQ(ifq
)->altq_type
!= ALTQT_NONE
)
1075 error
= altq_enable(IFCQ_ALTQ(ifq
));
1077 /* set or clear tokenbucket regulator */
1078 if (error
== 0 && ifp
!= NULL
&& ALTQ_IS_ENABLED(IFCQ_ALTQ(ifq
))) {
1079 struct tb_profile tb
= { 0, 0, 0 };
1081 if (altq
->aflags
& PF_ALTQF_TBR
) {
1082 if (altq
->bwtype
!= PF_ALTQ_BW_ABSOLUTE
&&
1083 altq
->bwtype
!= PF_ALTQ_BW_PERCENT
) {
1086 if (altq
->bwtype
== PF_ALTQ_BW_ABSOLUTE
)
1087 tb
.rate
= altq
->ifbandwidth
;
1089 tb
.percent
= altq
->ifbandwidth
;
1090 tb
.depth
= altq
->tbrsize
;
1091 error
= ifclassq_tbr_set(ifq
, &tb
, TRUE
);
1093 } else if (IFCQ_TBR_IS_ENABLED(ifq
)) {
1094 error
= ifclassq_tbr_set(ifq
, &tb
, TRUE
);
1103 pf_disable_altq(struct pf_altq
*altq
)
1106 struct ifclassq
*ifq
;
1109 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1111 if ((ifp
= ifunit(altq
->ifname
)) == NULL
)
1115 * when the discipline is no longer referenced, it was overridden
1116 * by a new one. if so, just return.
1120 if (altq
->altq_disc
!= IFCQ_ALTQ(ifq
)->altq_disc
) {
1125 error
= altq_disable(IFCQ_ALTQ(ifq
));
1127 if (error
== 0 && IFCQ_TBR_IS_ENABLED(ifq
)) {
1128 /* clear tokenbucket regulator */
1129 struct tb_profile tb
= { 0, 0, 0 };
1130 error
= ifclassq_tbr_set(ifq
, &tb
, TRUE
);
1138 pf_altq_copyin(struct pf_altq
*src
, struct pf_altq
*dst
)
1140 bcopy(src
, dst
, sizeof (struct pf_altq
));
1142 dst
->ifname
[sizeof (dst
->ifname
) - 1] = '\0';
1143 dst
->qname
[sizeof (dst
->qname
) - 1] = '\0';
1144 dst
->parent
[sizeof (dst
->parent
) - 1] = '\0';
1145 dst
->altq_disc
= NULL
;
1146 dst
->entries
.tqe_next
= NULL
;
1147 dst
->entries
.tqe_prev
= NULL
;
1151 pf_altq_copyout(struct pf_altq
*src
, struct pf_altq
*dst
)
1155 bcopy(src
, &pa
, sizeof (struct pf_altq
));
1156 pa
.altq_disc
= NULL
;
1157 pa
.entries
.tqe_next
= NULL
;
1158 pa
.entries
.tqe_prev
= NULL
;
1159 bcopy(&pa
, dst
, sizeof (struct pf_altq
));
1161 #endif /* PF_ALTQ */
1164 pf_begin_rules(u_int32_t
*ticket
, int rs_num
, const char *anchor
)
1166 struct pf_ruleset
*rs
;
1167 struct pf_rule
*rule
;
1169 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
1171 rs
= pf_find_or_create_ruleset(anchor
);
1174 while ((rule
= TAILQ_FIRST(rs
->rules
[rs_num
].inactive
.ptr
)) != NULL
) {
1175 pf_rm_rule(rs
->rules
[rs_num
].inactive
.ptr
, rule
);
1176 rs
->rules
[rs_num
].inactive
.rcount
--;
1178 *ticket
= ++rs
->rules
[rs_num
].inactive
.ticket
;
1179 rs
->rules
[rs_num
].inactive
.open
= 1;
1184 pf_rollback_rules(u_int32_t ticket
, int rs_num
, char *anchor
)
1186 struct pf_ruleset
*rs
;
1187 struct pf_rule
*rule
;
1189 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
1191 rs
= pf_find_ruleset(anchor
);
1192 if (rs
== NULL
|| !rs
->rules
[rs_num
].inactive
.open
||
1193 rs
->rules
[rs_num
].inactive
.ticket
!= ticket
)
1195 while ((rule
= TAILQ_FIRST(rs
->rules
[rs_num
].inactive
.ptr
)) != NULL
) {
1196 pf_rm_rule(rs
->rules
[rs_num
].inactive
.ptr
, rule
);
1197 rs
->rules
[rs_num
].inactive
.rcount
--;
1199 rs
->rules
[rs_num
].inactive
.open
= 0;
1203 #define PF_MD5_UPD(st, elm) \
1204 MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm))
1206 #define PF_MD5_UPD_STR(st, elm) \
1207 MD5Update(ctx, (u_int8_t *)(st)->elm, strlen((st)->elm))
1209 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
1210 (stor) = htonl((st)->elm); \
1211 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t)); \
1214 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
1215 (stor) = htons((st)->elm); \
1216 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t)); \
1220 pf_hash_rule_addr(MD5_CTX
*ctx
, struct pf_rule_addr
*pfr
, u_int8_t proto
)
1222 PF_MD5_UPD(pfr
, addr
.type
);
1223 switch (pfr
->addr
.type
) {
1224 case PF_ADDR_DYNIFTL
:
1225 PF_MD5_UPD(pfr
, addr
.v
.ifname
);
1226 PF_MD5_UPD(pfr
, addr
.iflags
);
1229 PF_MD5_UPD(pfr
, addr
.v
.tblname
);
1231 case PF_ADDR_ADDRMASK
:
1232 /* XXX ignore af? */
1233 PF_MD5_UPD(pfr
, addr
.v
.a
.addr
.addr32
);
1234 PF_MD5_UPD(pfr
, addr
.v
.a
.mask
.addr32
);
1236 case PF_ADDR_RTLABEL
:
1237 PF_MD5_UPD(pfr
, addr
.v
.rtlabelname
);
1244 PF_MD5_UPD(pfr
, xport
.range
.port
[0]);
1245 PF_MD5_UPD(pfr
, xport
.range
.port
[1]);
1246 PF_MD5_UPD(pfr
, xport
.range
.op
);
1253 PF_MD5_UPD(pfr
, neg
);
1257 pf_hash_rule(MD5_CTX
*ctx
, struct pf_rule
*rule
)
1262 pf_hash_rule_addr(ctx
, &rule
->src
, rule
->proto
);
1263 pf_hash_rule_addr(ctx
, &rule
->dst
, rule
->proto
);
1264 PF_MD5_UPD_STR(rule
, label
);
1265 PF_MD5_UPD_STR(rule
, ifname
);
1266 PF_MD5_UPD_STR(rule
, match_tagname
);
1267 PF_MD5_UPD_HTONS(rule
, match_tag
, x
); /* dup? */
1268 PF_MD5_UPD_HTONL(rule
, os_fingerprint
, y
);
1269 PF_MD5_UPD_HTONL(rule
, prob
, y
);
1270 PF_MD5_UPD_HTONL(rule
, uid
.uid
[0], y
);
1271 PF_MD5_UPD_HTONL(rule
, uid
.uid
[1], y
);
1272 PF_MD5_UPD(rule
, uid
.op
);
1273 PF_MD5_UPD_HTONL(rule
, gid
.gid
[0], y
);
1274 PF_MD5_UPD_HTONL(rule
, gid
.gid
[1], y
);
1275 PF_MD5_UPD(rule
, gid
.op
);
1276 PF_MD5_UPD_HTONL(rule
, rule_flag
, y
);
1277 PF_MD5_UPD(rule
, action
);
1278 PF_MD5_UPD(rule
, direction
);
1279 PF_MD5_UPD(rule
, af
);
1280 PF_MD5_UPD(rule
, quick
);
1281 PF_MD5_UPD(rule
, ifnot
);
1282 PF_MD5_UPD(rule
, match_tag_not
);
1283 PF_MD5_UPD(rule
, natpass
);
1284 PF_MD5_UPD(rule
, keep_state
);
1285 PF_MD5_UPD(rule
, proto
);
1286 PF_MD5_UPD(rule
, type
);
1287 PF_MD5_UPD(rule
, code
);
1288 PF_MD5_UPD(rule
, flags
);
1289 PF_MD5_UPD(rule
, flagset
);
1290 PF_MD5_UPD(rule
, allow_opts
);
1291 PF_MD5_UPD(rule
, rt
);
1292 PF_MD5_UPD(rule
, tos
);
1296 pf_commit_rules(u_int32_t ticket
, int rs_num
, char *anchor
)
1298 struct pf_ruleset
*rs
;
1299 struct pf_rule
*rule
, **old_array
, *r
;
1300 struct pf_rulequeue
*old_rules
;
1302 u_int32_t old_rcount
;
1304 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1306 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
1308 rs
= pf_find_ruleset(anchor
);
1309 if (rs
== NULL
|| !rs
->rules
[rs_num
].inactive
.open
||
1310 ticket
!= rs
->rules
[rs_num
].inactive
.ticket
)
1313 /* Calculate checksum for the main ruleset */
1314 if (rs
== &pf_main_ruleset
) {
1315 error
= pf_setup_pfsync_matching(rs
);
1320 /* Swap rules, keep the old. */
1321 old_rules
= rs
->rules
[rs_num
].active
.ptr
;
1322 old_rcount
= rs
->rules
[rs_num
].active
.rcount
;
1323 old_array
= rs
->rules
[rs_num
].active
.ptr_array
;
1325 if(old_rcount
!= 0) {
1326 r
= TAILQ_FIRST(rs
->rules
[rs_num
].active
.ptr
);
1328 if (r
->rule_flag
& PFRULE_PFM
)
1330 r
= TAILQ_NEXT(r
, entries
);
1335 rs
->rules
[rs_num
].active
.ptr
=
1336 rs
->rules
[rs_num
].inactive
.ptr
;
1337 rs
->rules
[rs_num
].active
.ptr_array
=
1338 rs
->rules
[rs_num
].inactive
.ptr_array
;
1339 rs
->rules
[rs_num
].active
.rcount
=
1340 rs
->rules
[rs_num
].inactive
.rcount
;
1341 rs
->rules
[rs_num
].inactive
.ptr
= old_rules
;
1342 rs
->rules
[rs_num
].inactive
.ptr_array
= old_array
;
1343 rs
->rules
[rs_num
].inactive
.rcount
= old_rcount
;
1345 rs
->rules
[rs_num
].active
.ticket
=
1346 rs
->rules
[rs_num
].inactive
.ticket
;
1347 pf_calc_skip_steps(rs
->rules
[rs_num
].active
.ptr
);
1350 /* Purge the old rule list. */
1351 while ((rule
= TAILQ_FIRST(old_rules
)) != NULL
)
1352 pf_rm_rule(old_rules
, rule
);
1353 if (rs
->rules
[rs_num
].inactive
.ptr_array
)
1354 _FREE(rs
->rules
[rs_num
].inactive
.ptr_array
, M_TEMP
);
1355 rs
->rules
[rs_num
].inactive
.ptr_array
= NULL
;
1356 rs
->rules
[rs_num
].inactive
.rcount
= 0;
1357 rs
->rules
[rs_num
].inactive
.open
= 0;
1358 pf_remove_if_empty_ruleset(rs
);
1363 pf_rule_copyin(struct pf_rule
*src
, struct pf_rule
*dst
, struct proc
*p
,
1366 bcopy(src
, dst
, sizeof (struct pf_rule
));
1368 dst
->label
[sizeof (dst
->label
) - 1] = '\0';
1369 dst
->ifname
[sizeof (dst
->ifname
) - 1] = '\0';
1370 dst
->qname
[sizeof (dst
->qname
) - 1] = '\0';
1371 dst
->pqname
[sizeof (dst
->pqname
) - 1] = '\0';
1372 dst
->tagname
[sizeof (dst
->tagname
) - 1] = '\0';
1373 dst
->match_tagname
[sizeof (dst
->match_tagname
) - 1] = '\0';
1374 dst
->overload_tblname
[sizeof (dst
->overload_tblname
) - 1] = '\0';
1376 dst
->cuid
= kauth_cred_getuid(p
->p_ucred
);
1377 dst
->cpid
= p
->p_pid
;
1381 dst
->overload_tbl
= NULL
;
1383 TAILQ_INIT(&dst
->rpool
.list
);
1384 dst
->rpool
.cur
= NULL
;
1386 /* initialize refcounting */
1390 dst
->entries
.tqe_prev
= NULL
;
1391 dst
->entries
.tqe_next
= NULL
;
1392 if ((uint8_t)minordev
== PFDEV_PFM
)
1393 dst
->rule_flag
|= PFRULE_PFM
;
1397 pf_rule_copyout(struct pf_rule
*src
, struct pf_rule
*dst
)
1399 bcopy(src
, dst
, sizeof (struct pf_rule
));
1403 dst
->overload_tbl
= NULL
;
1405 TAILQ_INIT(&dst
->rpool
.list
);
1406 dst
->rpool
.cur
= NULL
;
1408 dst
->entries
.tqe_prev
= NULL
;
1409 dst
->entries
.tqe_next
= NULL
;
1413 pf_state_export(struct pfsync_state
*sp
, struct pf_state_key
*sk
,
1416 uint64_t secs
= pf_time_second();
1417 bzero(sp
, sizeof (struct pfsync_state
));
1419 /* copy from state key */
1420 sp
->lan
.addr
= sk
->lan
.addr
;
1421 sp
->lan
.xport
= sk
->lan
.xport
;
1422 sp
->gwy
.addr
= sk
->gwy
.addr
;
1423 sp
->gwy
.xport
= sk
->gwy
.xport
;
1424 sp
->ext_lan
.addr
= sk
->ext_lan
.addr
;
1425 sp
->ext_lan
.xport
= sk
->ext_lan
.xport
;
1426 sp
->ext_gwy
.addr
= sk
->ext_gwy
.addr
;
1427 sp
->ext_gwy
.xport
= sk
->ext_gwy
.xport
;
1428 sp
->proto_variant
= sk
->proto_variant
;
1430 sp
->proto
= sk
->proto
;
1431 sp
->af_lan
= sk
->af_lan
;
1432 sp
->af_gwy
= sk
->af_gwy
;
1433 sp
->direction
= sk
->direction
;
1434 sp
->flowhash
= sk
->flowhash
;
1436 /* copy from state */
1437 memcpy(&sp
->id
, &s
->id
, sizeof (sp
->id
));
1438 sp
->creatorid
= s
->creatorid
;
1439 strlcpy(sp
->ifname
, s
->kif
->pfik_name
, sizeof (sp
->ifname
));
1440 pf_state_peer_to_pfsync(&s
->src
, &sp
->src
);
1441 pf_state_peer_to_pfsync(&s
->dst
, &sp
->dst
);
1443 sp
->rule
= s
->rule
.ptr
->nr
;
1444 sp
->nat_rule
= (s
->nat_rule
.ptr
== NULL
) ?
1445 (unsigned)-1 : s
->nat_rule
.ptr
->nr
;
1446 sp
->anchor
= (s
->anchor
.ptr
== NULL
) ?
1447 (unsigned)-1 : s
->anchor
.ptr
->nr
;
1449 pf_state_counter_to_pfsync(s
->bytes
[0], sp
->bytes
[0]);
1450 pf_state_counter_to_pfsync(s
->bytes
[1], sp
->bytes
[1]);
1451 pf_state_counter_to_pfsync(s
->packets
[0], sp
->packets
[0]);
1452 pf_state_counter_to_pfsync(s
->packets
[1], sp
->packets
[1]);
1453 sp
->creation
= secs
- s
->creation
;
1454 sp
->expire
= pf_state_expires(s
);
1456 sp
->allow_opts
= s
->allow_opts
;
1457 sp
->timeout
= s
->timeout
;
1460 sp
->sync_flags
|= PFSYNC_FLAG_SRCNODE
;
1461 if (s
->nat_src_node
)
1462 sp
->sync_flags
|= PFSYNC_FLAG_NATSRCNODE
;
1464 if (sp
->expire
> secs
)
1472 pf_state_import(struct pfsync_state
*sp
, struct pf_state_key
*sk
,
1475 /* copy to state key */
1476 sk
->lan
.addr
= sp
->lan
.addr
;
1477 sk
->lan
.xport
= sp
->lan
.xport
;
1478 sk
->gwy
.addr
= sp
->gwy
.addr
;
1479 sk
->gwy
.xport
= sp
->gwy
.xport
;
1480 sk
->ext_lan
.addr
= sp
->ext_lan
.addr
;
1481 sk
->ext_lan
.xport
= sp
->ext_lan
.xport
;
1482 sk
->ext_gwy
.addr
= sp
->ext_gwy
.addr
;
1483 sk
->ext_gwy
.xport
= sp
->ext_gwy
.xport
;
1484 sk
->proto_variant
= sp
->proto_variant
;
1486 sk
->proto
= sp
->proto
;
1487 sk
->af_lan
= sp
->af_lan
;
1488 sk
->af_gwy
= sp
->af_gwy
;
1489 sk
->direction
= sp
->direction
;
1490 sk
->flowhash
= pf_calc_state_key_flowhash(sk
);
1493 memcpy(&s
->id
, &sp
->id
, sizeof (sp
->id
));
1494 s
->creatorid
= sp
->creatorid
;
1495 pf_state_peer_from_pfsync(&sp
->src
, &s
->src
);
1496 pf_state_peer_from_pfsync(&sp
->dst
, &s
->dst
);
1498 s
->rule
.ptr
= &pf_default_rule
;
1499 s
->nat_rule
.ptr
= NULL
;
1500 s
->anchor
.ptr
= NULL
;
1502 s
->creation
= pf_time_second();
1503 s
->expire
= pf_time_second();
1505 s
->expire
-= pf_default_rule
.timeout
[sp
->timeout
] - sp
->expire
;
1507 s
->packets
[0] = s
->packets
[1] = 0;
1508 s
->bytes
[0] = s
->bytes
[1] = 0;
1512 pf_pooladdr_copyin(struct pf_pooladdr
*src
, struct pf_pooladdr
*dst
)
1514 bcopy(src
, dst
, sizeof (struct pf_pooladdr
));
1516 dst
->entries
.tqe_prev
= NULL
;
1517 dst
->entries
.tqe_next
= NULL
;
1518 dst
->ifname
[sizeof (dst
->ifname
) - 1] = '\0';
1523 pf_pooladdr_copyout(struct pf_pooladdr
*src
, struct pf_pooladdr
*dst
)
1525 bcopy(src
, dst
, sizeof (struct pf_pooladdr
));
1527 dst
->entries
.tqe_prev
= NULL
;
1528 dst
->entries
.tqe_next
= NULL
;
1533 pf_setup_pfsync_matching(struct pf_ruleset
*rs
)
1536 struct pf_rule
*rule
;
1538 u_int8_t digest
[PF_MD5_DIGEST_LENGTH
];
1541 for (rs_cnt
= 0; rs_cnt
< PF_RULESET_MAX
; rs_cnt
++) {
1542 /* XXX PF_RULESET_SCRUB as well? */
1543 if (rs_cnt
== PF_RULESET_SCRUB
)
1546 if (rs
->rules
[rs_cnt
].inactive
.ptr_array
)
1547 _FREE(rs
->rules
[rs_cnt
].inactive
.ptr_array
, M_TEMP
);
1548 rs
->rules
[rs_cnt
].inactive
.ptr_array
= NULL
;
1550 if (rs
->rules
[rs_cnt
].inactive
.rcount
) {
1551 rs
->rules
[rs_cnt
].inactive
.ptr_array
=
1552 _MALLOC(sizeof (caddr_t
) *
1553 rs
->rules
[rs_cnt
].inactive
.rcount
,
1556 if (!rs
->rules
[rs_cnt
].inactive
.ptr_array
)
1560 TAILQ_FOREACH(rule
, rs
->rules
[rs_cnt
].inactive
.ptr
,
1562 pf_hash_rule(&ctx
, rule
);
1563 (rs
->rules
[rs_cnt
].inactive
.ptr_array
)[rule
->nr
] = rule
;
1567 MD5Final(digest
, &ctx
);
1568 memcpy(pf_status
.pf_chksum
, digest
, sizeof (pf_status
.pf_chksum
));
1575 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1577 VERIFY(pf_is_enabled
== 0);
1580 pf_status
.running
= 1;
1581 pf_status
.since
= pf_calendar_time_second();
1582 if (pf_status
.stateid
== 0) {
1583 pf_status
.stateid
= pf_time_second();
1584 pf_status
.stateid
= pf_status
.stateid
<< 32;
1586 wakeup(pf_purge_thread_fn
);
1587 DPFPRINTF(PF_DEBUG_MISC
, ("pf: started\n"));
1593 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1595 VERIFY(pf_is_enabled
);
1597 pf_status
.running
= 0;
1599 pf_status
.since
= pf_calendar_time_second();
1600 wakeup(pf_purge_thread_fn
);
1601 DPFPRINTF(PF_DEBUG_MISC
, ("pf: stopped\n"));
1605 pfioctl(dev_t dev
, u_long cmd
, caddr_t addr
, int flags
, struct proc
*p
)
1608 int p64
= proc_is64bit(p
);
1610 int minordev
= minor(dev
);
1612 if (kauth_cred_issuser(kauth_cred_get()) == 0)
1615 /* XXX keep in sync with switch() below */
1616 if (securelevel
> 1)
1623 case DIOCSETSTATUSIF
:
1629 case DIOCINSERTRULE
:
1630 case DIOCDELETERULE
:
1631 case DIOCGETTIMEOUT
:
1632 case DIOCCLRRULECTRS
:
1637 case DIOCGETRULESETS
:
1638 case DIOCGETRULESET
:
1639 case DIOCRGETTABLES
:
1640 case DIOCRGETTSTATS
:
1641 case DIOCRCLRTSTATS
:
1647 case DIOCRGETASTATS
:
1648 case DIOCRCLRASTATS
:
1651 case DIOCGETSRCNODES
:
1652 case DIOCCLRSRCNODES
:
1653 case DIOCIGETIFACES
:
1658 case DIOCRCLRTABLES
:
1659 case DIOCRADDTABLES
:
1660 case DIOCRDELTABLES
:
1661 case DIOCRSETTFLAGS
: {
1664 bcopy(&((struct pfioc_table
*)(void *)addr
)->
1665 pfrio_flags
, &pfrio_flags
, sizeof (pfrio_flags
));
1667 if (pfrio_flags
& PFR_FLAG_DUMMY
)
1668 break; /* dummy operation ok */
1675 if (!(flags
& FWRITE
))
1681 case DIOCGETSTARTERS
:
1688 case DIOCINSERTRULE
:
1689 case DIOCDELETERULE
:
1690 case DIOCGETTIMEOUT
:
1695 case DIOCGETRULESETS
:
1696 case DIOCGETRULESET
:
1698 case DIOCRGETTABLES
:
1699 case DIOCRGETTSTATS
:
1701 case DIOCRGETASTATS
:
1704 case DIOCGETSRCNODES
:
1705 case DIOCIGETIFACES
:
1708 case DIOCRCLRTABLES
:
1709 case DIOCRADDTABLES
:
1710 case DIOCRDELTABLES
:
1711 case DIOCRCLRTSTATS
:
1716 case DIOCRSETTFLAGS
: {
1719 bcopy(&((struct pfioc_table
*)(void *)addr
)->
1720 pfrio_flags
, &pfrio_flags
, sizeof (pfrio_flags
));
1722 if (pfrio_flags
& PFR_FLAG_DUMMY
) {
1723 flags
|= FWRITE
; /* need write lock for dummy */
1724 break; /* dummy operation ok */
1731 bcopy(&((struct pfioc_rule
*)(void *)addr
)->action
,
1732 &action
, sizeof (action
));
1734 if (action
== PF_GET_CLR_CNTR
)
1749 case DIOCCHANGEALTQ
:
1751 /* fail if ALTQ is disabled */
1756 #endif /* PF_ALTQ */
1759 lck_rw_lock_exclusive(pf_perim_lock
);
1761 lck_rw_lock_shared(pf_perim_lock
);
1763 lck_mtx_lock(pf_lock
);
1768 if (pf_status
.running
) {
1770 * Increment the reference for a simple -e enable, so
1771 * that even if other processes drop their references,
1772 * pf will still be available to processes that turned
1773 * it on without taking a reference
1775 if (nr_tokens
== pf_enabled_ref_count
) {
1776 pf_enabled_ref_count
++;
1777 VERIFY(pf_enabled_ref_count
!= 0);
1780 } else if (pf_purge_thread
== NULL
) {
1784 pf_enabled_ref_count
++;
1785 VERIFY(pf_enabled_ref_count
!= 0);
1789 case DIOCSTARTREF
: /* u_int64_t */
1790 if (pf_purge_thread
== NULL
) {
1795 /* small enough to be on stack */
1796 if ((token
= generate_token(p
)) != 0) {
1797 if (pf_is_enabled
== 0) {
1800 pf_enabled_ref_count
++;
1801 VERIFY(pf_enabled_ref_count
!= 0);
1804 DPFPRINTF(PF_DEBUG_URGENT
,
1805 ("pf: unable to generate token\n"));
1807 bcopy(&token
, addr
, sizeof (token
));
1812 if (!pf_status
.running
) {
1816 pf_enabled_ref_count
= 0;
1817 invalidate_all_tokens();
1821 case DIOCSTOPREF
: /* struct pfioc_remove_token */
1822 if (!pf_status
.running
) {
1825 struct pfioc_remove_token pfrt
;
1827 /* small enough to be on stack */
1828 bcopy(addr
, &pfrt
, sizeof (pfrt
));
1829 if ((error
= remove_token(&pfrt
)) == 0) {
1830 VERIFY(pf_enabled_ref_count
!= 0);
1831 pf_enabled_ref_count
--;
1832 /* return currently held references */
1833 pfrt
.refcount
= pf_enabled_ref_count
;
1834 DPFPRINTF(PF_DEBUG_MISC
,
1835 ("pf: enabled refcount decremented\n"));
1838 DPFPRINTF(PF_DEBUG_URGENT
,
1839 ("pf: token mismatch\n"));
1841 bcopy(&pfrt
, addr
, sizeof (pfrt
));
1843 if (error
== 0 && pf_enabled_ref_count
== 0)
1848 case DIOCGETSTARTERS
: { /* struct pfioc_tokens */
1849 PFIOCX_STRUCT_DECL(pfioc_tokens
);
1851 PFIOCX_STRUCT_BEGIN(addr
, pfioc_tokens
, error
= ENOMEM
; break;);
1852 error
= pfioctl_ioc_tokens(cmd
,
1853 PFIOCX_STRUCT_ADDR32(pfioc_tokens
),
1854 PFIOCX_STRUCT_ADDR64(pfioc_tokens
), p
);
1855 PFIOCX_STRUCT_END(pfioc_tokens
, addr
);
1859 case DIOCADDRULE
: /* struct pfioc_rule */
1860 case DIOCGETRULES
: /* struct pfioc_rule */
1861 case DIOCGETRULE
: /* struct pfioc_rule */
1862 case DIOCCHANGERULE
: /* struct pfioc_rule */
1863 case DIOCINSERTRULE
: /* struct pfioc_rule */
1864 case DIOCDELETERULE
: { /* struct pfioc_rule */
1865 struct pfioc_rule
*pr
= NULL
;
1867 PFIOC_STRUCT_BEGIN(addr
, pr
, error
= ENOMEM
; break;);
1868 error
= pfioctl_ioc_rule(cmd
, minordev
, pr
, p
);
1869 PFIOC_STRUCT_END(pr
, addr
);
1873 case DIOCCLRSTATES
: /* struct pfioc_state_kill */
1874 case DIOCKILLSTATES
: { /* struct pfioc_state_kill */
1875 struct pfioc_state_kill
*psk
= NULL
;
1877 PFIOC_STRUCT_BEGIN(addr
, psk
, error
= ENOMEM
; break;);
1878 error
= pfioctl_ioc_state_kill(cmd
, psk
, p
);
1879 PFIOC_STRUCT_END(psk
, addr
);
1883 case DIOCADDSTATE
: /* struct pfioc_state */
1884 case DIOCGETSTATE
: { /* struct pfioc_state */
1885 struct pfioc_state
*ps
= NULL
;
1887 PFIOC_STRUCT_BEGIN(addr
, ps
, error
= ENOMEM
; break;);
1888 error
= pfioctl_ioc_state(cmd
, ps
, p
);
1889 PFIOC_STRUCT_END(ps
, addr
);
1893 case DIOCGETSTATES
: { /* struct pfioc_states */
1894 PFIOCX_STRUCT_DECL(pfioc_states
);
1896 PFIOCX_STRUCT_BEGIN(addr
, pfioc_states
, error
= ENOMEM
; break;);
1897 error
= pfioctl_ioc_states(cmd
,
1898 PFIOCX_STRUCT_ADDR32(pfioc_states
),
1899 PFIOCX_STRUCT_ADDR64(pfioc_states
), p
);
1900 PFIOCX_STRUCT_END(pfioc_states
, addr
);
1904 case DIOCGETSTATUS
: { /* struct pf_status */
1905 struct pf_status
*s
= NULL
;
1907 PFIOC_STRUCT_BEGIN(&pf_status
, s
, error
= ENOMEM
; break;);
1908 pfi_update_status(s
->ifname
, s
);
1909 PFIOC_STRUCT_END(s
, addr
);
1913 case DIOCSETSTATUSIF
: { /* struct pfioc_if */
1914 struct pfioc_if
*pi
= (struct pfioc_if
*)(void *)addr
;
1916 /* OK for unaligned accesses */
1917 if (pi
->ifname
[0] == 0) {
1918 bzero(pf_status
.ifname
, IFNAMSIZ
);
1921 strlcpy(pf_status
.ifname
, pi
->ifname
, IFNAMSIZ
);
1925 case DIOCCLRSTATUS
: {
1926 bzero(pf_status
.counters
, sizeof (pf_status
.counters
));
1927 bzero(pf_status
.fcounters
, sizeof (pf_status
.fcounters
));
1928 bzero(pf_status
.scounters
, sizeof (pf_status
.scounters
));
1929 pf_status
.since
= pf_calendar_time_second();
1930 if (*pf_status
.ifname
)
1931 pfi_update_status(pf_status
.ifname
, NULL
);
1935 case DIOCNATLOOK
: { /* struct pfioc_natlook */
1936 struct pfioc_natlook
*pnl
= NULL
;
1938 PFIOC_STRUCT_BEGIN(addr
, pnl
, error
= ENOMEM
; break;);
1939 error
= pfioctl_ioc_natlook(cmd
, pnl
, p
);
1940 PFIOC_STRUCT_END(pnl
, addr
);
1944 case DIOCSETTIMEOUT
: /* struct pfioc_tm */
1945 case DIOCGETTIMEOUT
: { /* struct pfioc_tm */
1948 /* small enough to be on stack */
1949 bcopy(addr
, &pt
, sizeof (pt
));
1950 error
= pfioctl_ioc_tm(cmd
, &pt
, p
);
1951 bcopy(&pt
, addr
, sizeof (pt
));
1955 case DIOCGETLIMIT
: /* struct pfioc_limit */
1956 case DIOCSETLIMIT
: { /* struct pfioc_limit */
1957 struct pfioc_limit pl
;
1959 /* small enough to be on stack */
1960 bcopy(addr
, &pl
, sizeof (pl
));
1961 error
= pfioctl_ioc_limit(cmd
, &pl
, p
);
1962 bcopy(&pl
, addr
, sizeof (pl
));
1966 case DIOCSETDEBUG
: { /* u_int32_t */
1967 bcopy(addr
, &pf_status
.debug
, sizeof (u_int32_t
));
1971 case DIOCCLRRULECTRS
: {
1972 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1973 struct pf_ruleset
*ruleset
= &pf_main_ruleset
;
1974 struct pf_rule
*rule
;
1977 ruleset
->rules
[PF_RULESET_FILTER
].active
.ptr
, entries
) {
1978 rule
->evaluations
= 0;
1979 rule
->packets
[0] = rule
->packets
[1] = 0;
1980 rule
->bytes
[0] = rule
->bytes
[1] = 0;
1985 case DIOCGIFSPEED
: {
1986 struct pf_ifspeed
*psp
= (struct pf_ifspeed
*)(void *)addr
;
1987 struct pf_ifspeed ps
;
1991 if (psp
->ifname
[0] != '\0') {
1992 /* Can we completely trust user-land? */
1993 strlcpy(ps
.ifname
, psp
->ifname
, IFNAMSIZ
);
1994 ps
.ifname
[IFNAMSIZ
- 1] = '\0';
1995 ifp
= ifunit(ps
.ifname
);
1997 baudrate
= ifp
->if_output_bw
.max_bw
;
1998 bcopy(&baudrate
, &psp
->baudrate
,
2010 case DIOCSTARTALTQ
: {
2011 struct pf_altq
*altq
;
2013 VERIFY(altq_allowed
);
2014 /* enable all altq interfaces on active list */
2015 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
) {
2016 if (altq
->qname
[0] == '\0') {
2017 error
= pf_enable_altq(altq
);
2023 pf_altq_running
= 1;
2024 DPFPRINTF(PF_DEBUG_MISC
, ("altq: started\n"));
2028 case DIOCSTOPALTQ
: {
2029 struct pf_altq
*altq
;
2031 VERIFY(altq_allowed
);
2032 /* disable all altq interfaces on active list */
2033 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
) {
2034 if (altq
->qname
[0] == '\0') {
2035 error
= pf_disable_altq(altq
);
2041 pf_altq_running
= 0;
2042 DPFPRINTF(PF_DEBUG_MISC
, ("altq: stopped\n"));
2046 case DIOCADDALTQ
: { /* struct pfioc_altq */
2047 struct pfioc_altq
*pa
= (struct pfioc_altq
*)(void *)addr
;
2048 struct pf_altq
*altq
, *a
;
2051 VERIFY(altq_allowed
);
2052 bcopy(&pa
->ticket
, &ticket
, sizeof (ticket
));
2053 if (ticket
!= ticket_altqs_inactive
) {
2057 altq
= pool_get(&pf_altq_pl
, PR_WAITOK
);
2062 pf_altq_copyin(&pa
->altq
, altq
);
2065 * if this is for a queue, find the discipline and
2066 * copy the necessary fields
2068 if (altq
->qname
[0] != '\0') {
2069 if ((altq
->qid
= pf_qname2qid(altq
->qname
)) == 0) {
2071 pool_put(&pf_altq_pl
, altq
);
2074 altq
->altq_disc
= NULL
;
2075 TAILQ_FOREACH(a
, pf_altqs_inactive
, entries
) {
2076 if (strncmp(a
->ifname
, altq
->ifname
,
2077 IFNAMSIZ
) == 0 && a
->qname
[0] == '\0') {
2078 altq
->altq_disc
= a
->altq_disc
;
2084 error
= altq_add(altq
);
2086 pool_put(&pf_altq_pl
, altq
);
2090 TAILQ_INSERT_TAIL(pf_altqs_inactive
, altq
, entries
);
2091 pf_altq_copyout(altq
, &pa
->altq
);
2095 case DIOCGETALTQS
: {
2096 struct pfioc_altq
*pa
= (struct pfioc_altq
*)(void *)addr
;
2097 struct pf_altq
*altq
;
2100 VERIFY(altq_allowed
);
2102 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
)
2104 bcopy(&nr
, &pa
->nr
, sizeof (nr
));
2105 bcopy(&ticket_altqs_active
, &pa
->ticket
, sizeof (pa
->ticket
));
2110 struct pfioc_altq
*pa
= (struct pfioc_altq
*)(void *)addr
;
2111 struct pf_altq
*altq
;
2112 u_int32_t nr
, pa_nr
, ticket
;
2114 VERIFY(altq_allowed
);
2115 bcopy(&pa
->ticket
, &ticket
, sizeof (ticket
));
2116 if (ticket
!= ticket_altqs_active
) {
2120 bcopy(&pa
->nr
, &pa_nr
, sizeof (pa_nr
));
2122 altq
= TAILQ_FIRST(pf_altqs_active
);
2123 while ((altq
!= NULL
) && (nr
< pa_nr
)) {
2124 altq
= TAILQ_NEXT(altq
, entries
);
2131 pf_altq_copyout(altq
, &pa
->altq
);
2135 case DIOCCHANGEALTQ
:
2136 VERIFY(altq_allowed
);
2137 /* CHANGEALTQ not supported yet! */
2141 case DIOCGETQSTATS
: {
2142 struct pfioc_qstats
*pq
= (struct pfioc_qstats
*)(void *)addr
;
2143 struct pf_altq
*altq
;
2144 u_int32_t nr
, pq_nr
, ticket
;
2147 VERIFY(altq_allowed
);
2148 bcopy(&pq
->ticket
, &ticket
, sizeof (ticket
));
2149 if (ticket
!= ticket_altqs_active
) {
2153 bcopy(&pq
->nr
, &pq_nr
, sizeof (pq_nr
));
2155 altq
= TAILQ_FIRST(pf_altqs_active
);
2156 while ((altq
!= NULL
) && (nr
< pq_nr
)) {
2157 altq
= TAILQ_NEXT(altq
, entries
);
2164 bcopy(&pq
->nbytes
, &nbytes
, sizeof (nbytes
));
2165 error
= altq_getqstats(altq
, pq
->buf
, &nbytes
);
2167 pq
->scheduler
= altq
->scheduler
;
2168 bcopy(&nbytes
, &pq
->nbytes
, sizeof (nbytes
));
2172 #endif /* PF_ALTQ */
2174 case DIOCBEGINADDRS
: /* struct pfioc_pooladdr */
2175 case DIOCADDADDR
: /* struct pfioc_pooladdr */
2176 case DIOCGETADDRS
: /* struct pfioc_pooladdr */
2177 case DIOCGETADDR
: /* struct pfioc_pooladdr */
2178 case DIOCCHANGEADDR
: { /* struct pfioc_pooladdr */
2179 struct pfioc_pooladdr
*pp
= NULL
;
2181 PFIOC_STRUCT_BEGIN(addr
, pp
, error
= ENOMEM
; break;)
2182 error
= pfioctl_ioc_pooladdr(cmd
, pp
, p
);
2183 PFIOC_STRUCT_END(pp
, addr
);
2187 case DIOCGETRULESETS
: /* struct pfioc_ruleset */
2188 case DIOCGETRULESET
: { /* struct pfioc_ruleset */
2189 struct pfioc_ruleset
*pr
= NULL
;
2191 PFIOC_STRUCT_BEGIN(addr
, pr
, error
= ENOMEM
; break;);
2192 error
= pfioctl_ioc_ruleset(cmd
, pr
, p
);
2193 PFIOC_STRUCT_END(pr
, addr
);
2197 case DIOCRCLRTABLES
: /* struct pfioc_table */
2198 case DIOCRADDTABLES
: /* struct pfioc_table */
2199 case DIOCRDELTABLES
: /* struct pfioc_table */
2200 case DIOCRGETTABLES
: /* struct pfioc_table */
2201 case DIOCRGETTSTATS
: /* struct pfioc_table */
2202 case DIOCRCLRTSTATS
: /* struct pfioc_table */
2203 case DIOCRSETTFLAGS
: /* struct pfioc_table */
2204 case DIOCRCLRADDRS
: /* struct pfioc_table */
2205 case DIOCRADDADDRS
: /* struct pfioc_table */
2206 case DIOCRDELADDRS
: /* struct pfioc_table */
2207 case DIOCRSETADDRS
: /* struct pfioc_table */
2208 case DIOCRGETADDRS
: /* struct pfioc_table */
2209 case DIOCRGETASTATS
: /* struct pfioc_table */
2210 case DIOCRCLRASTATS
: /* struct pfioc_table */
2211 case DIOCRTSTADDRS
: /* struct pfioc_table */
2212 case DIOCRINADEFINE
: { /* struct pfioc_table */
2213 PFIOCX_STRUCT_DECL(pfioc_table
);
2215 PFIOCX_STRUCT_BEGIN(addr
, pfioc_table
, error
= ENOMEM
; break;);
2216 error
= pfioctl_ioc_table(cmd
,
2217 PFIOCX_STRUCT_ADDR32(pfioc_table
),
2218 PFIOCX_STRUCT_ADDR64(pfioc_table
), p
);
2219 PFIOCX_STRUCT_END(pfioc_table
, addr
);
2223 case DIOCOSFPADD
: /* struct pf_osfp_ioctl */
2224 case DIOCOSFPGET
: { /* struct pf_osfp_ioctl */
2225 struct pf_osfp_ioctl
*io
= NULL
;
2227 PFIOC_STRUCT_BEGIN(addr
, io
, error
= ENOMEM
; break;);
2228 if (cmd
== DIOCOSFPADD
) {
2229 error
= pf_osfp_add(io
);
2231 VERIFY(cmd
== DIOCOSFPGET
);
2232 error
= pf_osfp_get(io
);
2234 PFIOC_STRUCT_END(io
, addr
);
2238 case DIOCXBEGIN
: /* struct pfioc_trans */
2239 case DIOCXROLLBACK
: /* struct pfioc_trans */
2240 case DIOCXCOMMIT
: { /* struct pfioc_trans */
2241 PFIOCX_STRUCT_DECL(pfioc_trans
);
2243 PFIOCX_STRUCT_BEGIN(addr
, pfioc_trans
, error
= ENOMEM
; break;);
2244 error
= pfioctl_ioc_trans(cmd
,
2245 PFIOCX_STRUCT_ADDR32(pfioc_trans
),
2246 PFIOCX_STRUCT_ADDR64(pfioc_trans
), p
);
2247 PFIOCX_STRUCT_END(pfioc_trans
, addr
);
2251 case DIOCGETSRCNODES
: { /* struct pfioc_src_nodes */
2252 PFIOCX_STRUCT_DECL(pfioc_src_nodes
);
2254 PFIOCX_STRUCT_BEGIN(addr
, pfioc_src_nodes
,
2255 error
= ENOMEM
; break;);
2256 error
= pfioctl_ioc_src_nodes(cmd
,
2257 PFIOCX_STRUCT_ADDR32(pfioc_src_nodes
),
2258 PFIOCX_STRUCT_ADDR64(pfioc_src_nodes
), p
);
2259 PFIOCX_STRUCT_END(pfioc_src_nodes
, addr
);
2263 case DIOCCLRSRCNODES
: {
2264 struct pf_src_node
*n
;
2265 struct pf_state
*state
;
2267 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
2268 state
->src_node
= NULL
;
2269 state
->nat_src_node
= NULL
;
2271 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
) {
2275 pf_purge_expired_src_nodes();
2276 pf_status
.src_nodes
= 0;
2280 case DIOCKILLSRCNODES
: { /* struct pfioc_src_node_kill */
2281 struct pfioc_src_node_kill
*psnk
= NULL
;
2283 PFIOC_STRUCT_BEGIN(addr
, psnk
, error
= ENOMEM
; break;);
2284 error
= pfioctl_ioc_src_node_kill(cmd
, psnk
, p
);
2285 PFIOC_STRUCT_END(psnk
, addr
);
2289 case DIOCSETHOSTID
: { /* u_int32_t */
2292 /* small enough to be on stack */
2293 bcopy(addr
, &hid
, sizeof (hid
));
2295 pf_status
.hostid
= random();
2297 pf_status
.hostid
= hid
;
2305 case DIOCIGETIFACES
: /* struct pfioc_iface */
2306 case DIOCSETIFFLAG
: /* struct pfioc_iface */
2307 case DIOCCLRIFFLAG
: { /* struct pfioc_iface */
2308 PFIOCX_STRUCT_DECL(pfioc_iface
);
2310 PFIOCX_STRUCT_BEGIN(addr
, pfioc_iface
, error
= ENOMEM
; break;);
2311 error
= pfioctl_ioc_iface(cmd
,
2312 PFIOCX_STRUCT_ADDR32(pfioc_iface
),
2313 PFIOCX_STRUCT_ADDR64(pfioc_iface
), p
);
2314 PFIOCX_STRUCT_END(pfioc_iface
, addr
);
2323 lck_mtx_unlock(pf_lock
);
2324 lck_rw_done(pf_perim_lock
);
2330 pfioctl_ioc_table(u_long cmd
, struct pfioc_table_32
*io32
,
2331 struct pfioc_table_64
*io64
, struct proc
*p
)
2333 int p64
= proc_is64bit(p
);
2340 * 64-bit structure processing
2343 case DIOCRCLRTABLES
:
2344 if (io64
->pfrio_esize
!= 0) {
2348 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2349 error
= pfr_clr_tables(&io64
->pfrio_table
, &io64
->pfrio_ndel
,
2350 io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2353 case DIOCRADDTABLES
:
2354 if (io64
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2358 error
= pfr_add_tables(io64
->pfrio_buffer
, io64
->pfrio_size
,
2359 &io64
->pfrio_nadd
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2362 case DIOCRDELTABLES
:
2363 if (io64
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2367 error
= pfr_del_tables(io64
->pfrio_buffer
, io64
->pfrio_size
,
2368 &io64
->pfrio_ndel
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2371 case DIOCRGETTABLES
:
2372 if (io64
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2376 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2377 error
= pfr_get_tables(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2378 &io64
->pfrio_size
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2381 case DIOCRGETTSTATS
:
2382 if (io64
->pfrio_esize
!= sizeof (struct pfr_tstats
)) {
2386 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2387 error
= pfr_get_tstats(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2388 &io64
->pfrio_size
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2391 case DIOCRCLRTSTATS
:
2392 if (io64
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2396 error
= pfr_clr_tstats(io64
->pfrio_buffer
, io64
->pfrio_size
,
2397 &io64
->pfrio_nzero
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2400 case DIOCRSETTFLAGS
:
2401 if (io64
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2405 error
= pfr_set_tflags(io64
->pfrio_buffer
, io64
->pfrio_size
,
2406 io64
->pfrio_setflag
, io64
->pfrio_clrflag
,
2407 &io64
->pfrio_nchange
, &io64
->pfrio_ndel
,
2408 io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2412 if (io64
->pfrio_esize
!= 0) {
2416 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2417 error
= pfr_clr_addrs(&io64
->pfrio_table
, &io64
->pfrio_ndel
,
2418 io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2422 if (io64
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2426 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2427 error
= pfr_add_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2428 io64
->pfrio_size
, &io64
->pfrio_nadd
, io64
->pfrio_flags
|
2429 PFR_FLAG_USERIOCTL
);
2433 if (io64
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2437 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2438 error
= pfr_del_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2439 io64
->pfrio_size
, &io64
->pfrio_ndel
, io64
->pfrio_flags
|
2440 PFR_FLAG_USERIOCTL
);
2444 if (io64
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2448 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2449 error
= pfr_set_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2450 io64
->pfrio_size
, &io64
->pfrio_size2
, &io64
->pfrio_nadd
,
2451 &io64
->pfrio_ndel
, &io64
->pfrio_nchange
, io64
->pfrio_flags
|
2452 PFR_FLAG_USERIOCTL
, 0);
2456 if (io64
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2460 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2461 error
= pfr_get_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2462 &io64
->pfrio_size
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2465 case DIOCRGETASTATS
:
2466 if (io64
->pfrio_esize
!= sizeof (struct pfr_astats
)) {
2470 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2471 error
= pfr_get_astats(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2472 &io64
->pfrio_size
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2475 case DIOCRCLRASTATS
:
2476 if (io64
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2480 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2481 error
= pfr_clr_astats(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2482 io64
->pfrio_size
, &io64
->pfrio_nzero
, io64
->pfrio_flags
|
2483 PFR_FLAG_USERIOCTL
);
2487 if (io64
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2491 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2492 error
= pfr_tst_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2493 io64
->pfrio_size
, &io64
->pfrio_nmatch
, io64
->pfrio_flags
|
2494 PFR_FLAG_USERIOCTL
);
2497 case DIOCRINADEFINE
:
2498 if (io64
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2502 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2503 error
= pfr_ina_define(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2504 io64
->pfrio_size
, &io64
->pfrio_nadd
, &io64
->pfrio_naddr
,
2505 io64
->pfrio_ticket
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2516 * 32-bit structure processing
2519 case DIOCRCLRTABLES
:
2520 if (io32
->pfrio_esize
!= 0) {
2524 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2525 error
= pfr_clr_tables(&io32
->pfrio_table
, &io32
->pfrio_ndel
,
2526 io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2529 case DIOCRADDTABLES
:
2530 if (io32
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2534 error
= pfr_add_tables(io32
->pfrio_buffer
, io32
->pfrio_size
,
2535 &io32
->pfrio_nadd
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2538 case DIOCRDELTABLES
:
2539 if (io32
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2543 error
= pfr_del_tables(io32
->pfrio_buffer
, io32
->pfrio_size
,
2544 &io32
->pfrio_ndel
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2547 case DIOCRGETTABLES
:
2548 if (io32
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2552 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2553 error
= pfr_get_tables(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2554 &io32
->pfrio_size
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2557 case DIOCRGETTSTATS
:
2558 if (io32
->pfrio_esize
!= sizeof (struct pfr_tstats
)) {
2562 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2563 error
= pfr_get_tstats(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2564 &io32
->pfrio_size
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2567 case DIOCRCLRTSTATS
:
2568 if (io32
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2572 error
= pfr_clr_tstats(io32
->pfrio_buffer
, io32
->pfrio_size
,
2573 &io32
->pfrio_nzero
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2576 case DIOCRSETTFLAGS
:
2577 if (io32
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2581 error
= pfr_set_tflags(io32
->pfrio_buffer
, io32
->pfrio_size
,
2582 io32
->pfrio_setflag
, io32
->pfrio_clrflag
,
2583 &io32
->pfrio_nchange
, &io32
->pfrio_ndel
,
2584 io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2588 if (io32
->pfrio_esize
!= 0) {
2592 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2593 error
= pfr_clr_addrs(&io32
->pfrio_table
, &io32
->pfrio_ndel
,
2594 io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2598 if (io32
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2602 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2603 error
= pfr_add_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2604 io32
->pfrio_size
, &io32
->pfrio_nadd
, io32
->pfrio_flags
|
2605 PFR_FLAG_USERIOCTL
);
2609 if (io32
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2613 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2614 error
= pfr_del_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2615 io32
->pfrio_size
, &io32
->pfrio_ndel
, io32
->pfrio_flags
|
2616 PFR_FLAG_USERIOCTL
);
2620 if (io32
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2624 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2625 error
= pfr_set_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2626 io32
->pfrio_size
, &io32
->pfrio_size2
, &io32
->pfrio_nadd
,
2627 &io32
->pfrio_ndel
, &io32
->pfrio_nchange
, io32
->pfrio_flags
|
2628 PFR_FLAG_USERIOCTL
, 0);
2632 if (io32
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2636 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2637 error
= pfr_get_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2638 &io32
->pfrio_size
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2641 case DIOCRGETASTATS
:
2642 if (io32
->pfrio_esize
!= sizeof (struct pfr_astats
)) {
2646 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2647 error
= pfr_get_astats(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2648 &io32
->pfrio_size
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2651 case DIOCRCLRASTATS
:
2652 if (io32
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2656 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2657 error
= pfr_clr_astats(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2658 io32
->pfrio_size
, &io32
->pfrio_nzero
, io32
->pfrio_flags
|
2659 PFR_FLAG_USERIOCTL
);
2663 if (io32
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2667 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2668 error
= pfr_tst_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2669 io32
->pfrio_size
, &io32
->pfrio_nmatch
, io32
->pfrio_flags
|
2670 PFR_FLAG_USERIOCTL
);
2673 case DIOCRINADEFINE
:
2674 if (io32
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2678 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2679 error
= pfr_ina_define(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2680 io32
->pfrio_size
, &io32
->pfrio_nadd
, &io32
->pfrio_naddr
,
2681 io32
->pfrio_ticket
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2694 pfioctl_ioc_tokens(u_long cmd
, struct pfioc_tokens_32
*tok32
,
2695 struct pfioc_tokens_64
*tok64
, struct proc
*p
)
2697 struct pfioc_token
*tokens
;
2698 struct pfioc_kernel_token
*entry
, *tmp
;
2699 user_addr_t token_buf
;
2700 int ocnt
, cnt
, error
= 0, p64
= proc_is64bit(p
);
2704 case DIOCGETSTARTERS
: {
2707 if (nr_tokens
== 0) {
2712 size
= sizeof (struct pfioc_token
) * nr_tokens
;
2713 ocnt
= cnt
= (p64
? tok64
->size
: tok32
->size
);
2722 token_buf
= (p64
? tok64
->pgt_buf
: tok32
->pgt_buf
);
2723 tokens
= _MALLOC(size
, M_TEMP
, M_WAITOK
|M_ZERO
);
2724 if (tokens
== NULL
) {
2729 ptr
= (void *)tokens
;
2730 SLIST_FOREACH_SAFE(entry
, &token_list_head
, next
, tmp
) {
2731 struct pfioc_token
*t
;
2733 if ((unsigned)cnt
< sizeof (*tokens
))
2734 break; /* no more buffer space left */
2736 t
= (struct pfioc_token
*)(void *)ptr
;
2737 t
->token_value
= entry
->token
.token_value
;
2738 t
->timestamp
= entry
->token
.timestamp
;
2739 t
->pid
= entry
->token
.pid
;
2740 bcopy(entry
->token
.proc_name
, t
->proc_name
,
2741 PFTOK_PROCNAME_LEN
);
2742 ptr
+= sizeof (struct pfioc_token
);
2744 cnt
-= sizeof (struct pfioc_token
);
2748 error
= copyout(tokens
, token_buf
, ocnt
- cnt
);
2751 tok64
->size
= ocnt
- cnt
;
2753 tok32
->size
= ocnt
- cnt
;
2755 _FREE(tokens
, M_TEMP
);
2768 pf_expire_states_and_src_nodes(struct pf_rule
*rule
)
2770 struct pf_state
*state
;
2771 struct pf_src_node
*sn
;
2774 /* expire the states */
2775 state
= TAILQ_FIRST(&state_list
);
2777 if (state
->rule
.ptr
== rule
)
2778 state
->timeout
= PFTM_PURGE
;
2779 state
= TAILQ_NEXT(state
, entry_list
);
2781 pf_purge_expired_states(pf_status
.states
);
2783 /* expire the src_nodes */
2784 RB_FOREACH(sn
, pf_src_tree
, &tree_src_tracking
) {
2785 if (sn
->rule
.ptr
!= rule
)
2787 if (sn
->states
!= 0) {
2788 RB_FOREACH(state
, pf_state_tree_id
,
2790 if (state
->src_node
== sn
)
2791 state
->src_node
= NULL
;
2792 if (state
->nat_src_node
== sn
)
2793 state
->nat_src_node
= NULL
;
2801 pf_purge_expired_src_nodes();
2805 pf_delete_rule_from_ruleset(struct pf_ruleset
*ruleset
, int rs_num
,
2806 struct pf_rule
*rule
)
2811 pf_expire_states_and_src_nodes(rule
);
2813 pf_rm_rule(ruleset
->rules
[rs_num
].active
.ptr
, rule
);
2814 if (ruleset
->rules
[rs_num
].active
.rcount
-- == 0)
2815 panic("%s: rcount value broken!", __func__
);
2816 r
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
2820 r
= TAILQ_NEXT(r
, entries
);
2826 pf_ruleset_cleanup(struct pf_ruleset
*ruleset
, int rs
)
2828 pf_calc_skip_steps(ruleset
->rules
[rs
].active
.ptr
);
2829 ruleset
->rules
[rs
].active
.ticket
=
2830 ++ruleset
->rules
[rs
].inactive
.ticket
;
2834 * req_dev encodes the PF interface. Currently, possible values are
2838 pf_delete_rule_by_ticket(struct pfioc_rule
*pr
, u_int32_t req_dev
)
2840 struct pf_ruleset
*ruleset
;
2841 struct pf_rule
*rule
= NULL
;
2846 is_anchor
= (pr
->anchor_call
[0] != '\0');
2847 if ((ruleset
= pf_find_ruleset_with_owner(pr
->anchor
,
2848 pr
->rule
.owner
, is_anchor
, &error
)) == NULL
)
2851 for (i
= 0; i
< PF_RULESET_MAX
&& rule
== NULL
; i
++) {
2852 rule
= TAILQ_FIRST(ruleset
->rules
[i
].active
.ptr
);
2853 while (rule
&& (rule
->ticket
!= pr
->rule
.ticket
))
2854 rule
= TAILQ_NEXT(rule
, entries
);
2861 if (strcmp(rule
->owner
, pr
->rule
.owner
))
2865 if (rule
->anchor
&& (ruleset
!= &pf_main_ruleset
) &&
2866 ((strcmp(ruleset
->anchor
->owner
, "")) == 0) &&
2867 ((ruleset
->rules
[i
].active
.rcount
- 1) == 0)) {
2868 /* set rule & ruleset to parent and repeat */
2869 struct pf_rule
*delete_rule
= rule
;
2870 struct pf_ruleset
*delete_ruleset
= ruleset
;
2872 #define parent_ruleset ruleset->anchor->parent->ruleset
2873 if (ruleset
->anchor
->parent
== NULL
)
2874 ruleset
= &pf_main_ruleset
;
2876 ruleset
= &parent_ruleset
;
2878 rule
= TAILQ_FIRST(ruleset
->rules
[i
].active
.ptr
);
2880 (rule
->anchor
!= delete_ruleset
->anchor
))
2881 rule
= TAILQ_NEXT(rule
, entries
);
2883 panic("%s: rule not found!", __func__
);
2886 * if reqest device != rule's device, bail :
2887 * with error if ticket matches;
2888 * without error if ticket doesn't match (i.e. its just cleanup)
2890 if ((rule
->rule_flag
& PFRULE_PFM
) ^ req_dev
) {
2891 if (rule
->ticket
!= pr
->rule
.ticket
) {
2898 if (delete_rule
->rule_flag
& PFRULE_PFM
) {
2902 pf_delete_rule_from_ruleset(delete_ruleset
,
2904 delete_ruleset
->rules
[i
].active
.ticket
=
2905 ++delete_ruleset
->rules
[i
].inactive
.ticket
;
2909 * process deleting rule only if device that added the
2910 * rule matches device that issued the request
2912 if ((rule
->rule_flag
& PFRULE_PFM
) ^ req_dev
)
2914 if (rule
->rule_flag
& PFRULE_PFM
)
2916 pf_delete_rule_from_ruleset(ruleset
, i
,
2918 pf_ruleset_cleanup(ruleset
, i
);
2925 * req_dev encodes the PF interface. Currently, possible values are
2929 pf_delete_rule_by_owner(char *owner
, u_int32_t req_dev
)
2931 struct pf_ruleset
*ruleset
;
2932 struct pf_rule
*rule
, *next
;
2935 for (int rs
= 0; rs
< PF_RULESET_MAX
; rs
++) {
2936 rule
= TAILQ_FIRST(pf_main_ruleset
.rules
[rs
].active
.ptr
);
2937 ruleset
= &pf_main_ruleset
;
2939 next
= TAILQ_NEXT(rule
, entries
);
2941 * process deleting rule only if device that added the
2942 * rule matches device that issued the request
2944 if ((rule
->rule_flag
& PFRULE_PFM
) ^ req_dev
) {
2949 if (((strcmp(rule
->owner
, owner
)) == 0) ||
2950 ((strcmp(rule
->owner
, "")) == 0)) {
2951 if (rule
->anchor
->ruleset
.rules
[rs
].active
.rcount
> 0) {
2953 pf_ruleset_cleanup(ruleset
, rs
);
2956 /* step into anchor */
2958 &rule
->anchor
->ruleset
;
2959 rule
= TAILQ_FIRST(ruleset
->rules
[rs
].active
.ptr
);
2962 if (rule
->rule_flag
&
2965 pf_delete_rule_from_ruleset(ruleset
, rs
, rule
);
2972 if (((strcmp(rule
->owner
, owner
)) == 0)) {
2974 if (rule
->rule_flag
& PFRULE_PFM
)
2976 pf_delete_rule_from_ruleset(ruleset
,
2984 pf_ruleset_cleanup(ruleset
, rs
);
2987 if (ruleset
!= &pf_main_ruleset
)
2988 pf_deleterule_anchor_step_out(&ruleset
,
2996 pf_deleterule_anchor_step_out(struct pf_ruleset
**ruleset_ptr
,
2997 int rs
, struct pf_rule
**rule_ptr
)
2999 struct pf_ruleset
*ruleset
= *ruleset_ptr
;
3000 struct pf_rule
*rule
= *rule_ptr
;
3002 /* step out of anchor */
3003 struct pf_ruleset
*rs_copy
= ruleset
;
3004 ruleset
= ruleset
->anchor
->parent
?
3005 &ruleset
->anchor
->parent
->ruleset
:&pf_main_ruleset
;
3007 rule
= TAILQ_FIRST(ruleset
->rules
[rs
].active
.ptr
);
3008 while (rule
&& (rule
->anchor
!= rs_copy
->anchor
))
3009 rule
= TAILQ_NEXT(rule
, entries
);
3011 panic("%s: parent rule of anchor not found!", __func__
);
3012 if (rule
->anchor
->ruleset
.rules
[rs
].active
.rcount
> 0)
3013 rule
= TAILQ_NEXT(rule
, entries
);
3015 *ruleset_ptr
= ruleset
;
3020 pf_addrwrap_setup(struct pf_addr_wrap
*aw
)
3023 bzero(&aw
->p
, sizeof aw
->p
);
3027 pf_rule_setup(struct pfioc_rule
*pr
, struct pf_rule
*rule
,
3028 struct pf_ruleset
*ruleset
) {
3029 struct pf_pooladdr
*apa
;
3032 if (rule
->ifname
[0]) {
3033 rule
->kif
= pfi_kif_get(rule
->ifname
);
3034 if (rule
->kif
== NULL
) {
3035 pool_put(&pf_rule_pl
, rule
);
3038 pfi_kif_ref(rule
->kif
, PFI_KIF_REF_RULE
);
3042 if (altq_allowed
&& rule
->qname
[0] != '\0') {
3043 if ((rule
->qid
= pf_qname2qid(rule
->qname
)) == 0)
3045 else if (rule
->pqname
[0] != '\0') {
3047 pf_qname2qid(rule
->pqname
)) == 0)
3050 rule
->pqid
= rule
->qid
;
3052 #endif /* PF_ALTQ */
3053 if (rule
->tagname
[0])
3054 if ((rule
->tag
= pf_tagname2tag(rule
->tagname
)) == 0)
3056 if (rule
->match_tagname
[0])
3057 if ((rule
->match_tag
=
3058 pf_tagname2tag(rule
->match_tagname
)) == 0)
3060 if (rule
->rt
&& !rule
->direction
)
3065 if (rule
->logif
>= PFLOGIFS_MAX
)
3068 pf_addrwrap_setup(&rule
->src
.addr
);
3069 pf_addrwrap_setup(&rule
->dst
.addr
);
3070 if (pf_rtlabel_add(&rule
->src
.addr
) ||
3071 pf_rtlabel_add(&rule
->dst
.addr
))
3073 if (pfi_dynaddr_setup(&rule
->src
.addr
, rule
->af
))
3075 if (pfi_dynaddr_setup(&rule
->dst
.addr
, rule
->af
))
3077 if (pf_tbladdr_setup(ruleset
, &rule
->src
.addr
))
3079 if (pf_tbladdr_setup(ruleset
, &rule
->dst
.addr
))
3081 if (pf_anchor_setup(rule
, ruleset
, pr
->anchor_call
))
3083 TAILQ_FOREACH(apa
, &pf_pabuf
, entries
)
3084 if (pf_tbladdr_setup(ruleset
, &apa
->addr
))
3087 if (rule
->overload_tblname
[0]) {
3088 if ((rule
->overload_tbl
= pfr_attach_table(ruleset
,
3089 rule
->overload_tblname
)) == NULL
)
3092 rule
->overload_tbl
->pfrkt_flags
|=
3096 pf_mv_pool(&pf_pabuf
, &rule
->rpool
.list
);
3098 if (((((rule
->action
== PF_NAT
) || (rule
->action
== PF_RDR
) ||
3099 (rule
->action
== PF_BINAT
) || (rule
->action
== PF_NAT64
)) &&
3100 rule
->anchor
== NULL
) ||
3101 (rule
->rt
> PF_FASTROUTE
)) &&
3102 (TAILQ_FIRST(&rule
->rpool
.list
) == NULL
))
3106 pf_rm_rule(NULL
, rule
);
3109 /* For a NAT64 rule the rule's address family is AF_INET6 whereas
3110 * the address pool's family will be AF_INET
3112 rule
->rpool
.af
= (rule
->action
== PF_NAT64
) ? AF_INET
: rule
->af
;
3113 rule
->rpool
.cur
= TAILQ_FIRST(&rule
->rpool
.list
);
3114 rule
->evaluations
= rule
->packets
[0] = rule
->packets
[1] =
3115 rule
->bytes
[0] = rule
->bytes
[1] = 0;
3121 pfioctl_ioc_rule(u_long cmd
, int minordev
, struct pfioc_rule
*pr
, struct proc
*p
)
3124 u_int32_t req_dev
= 0;
3128 struct pf_ruleset
*ruleset
;
3129 struct pf_rule
*rule
, *tail
;
3132 pr
->anchor
[sizeof (pr
->anchor
) - 1] = '\0';
3133 pr
->anchor_call
[sizeof (pr
->anchor_call
) - 1] = '\0';
3134 ruleset
= pf_find_ruleset(pr
->anchor
);
3135 if (ruleset
== NULL
) {
3139 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
3140 if (rs_num
>= PF_RULESET_MAX
) {
3144 if (pr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
3148 if (pr
->ticket
!= ruleset
->rules
[rs_num
].inactive
.ticket
) {
3152 if (pr
->pool_ticket
!= ticket_pabuf
) {
3156 rule
= pool_get(&pf_rule_pl
, PR_WAITOK
);
3161 pf_rule_copyin(&pr
->rule
, rule
, p
, minordev
);
3163 if (rule
->af
== AF_INET
) {
3164 pool_put(&pf_rule_pl
, rule
);
3165 error
= EAFNOSUPPORT
;
3170 if (rule
->af
== AF_INET6
) {
3171 pool_put(&pf_rule_pl
, rule
);
3172 error
= EAFNOSUPPORT
;
3176 tail
= TAILQ_LAST(ruleset
->rules
[rs_num
].inactive
.ptr
,
3179 rule
->nr
= tail
->nr
+ 1;
3183 if ((error
= pf_rule_setup(pr
, rule
, ruleset
)))
3186 TAILQ_INSERT_TAIL(ruleset
->rules
[rs_num
].inactive
.ptr
,
3188 ruleset
->rules
[rs_num
].inactive
.rcount
++;
3189 if (rule
->rule_flag
& PFRULE_PFM
)
3192 if (rule
->action
== PF_NAT64
)
3193 atomic_add_16(&pf_nat64_configured
, 1);
3197 case DIOCGETRULES
: {
3198 struct pf_ruleset
*ruleset
;
3199 struct pf_rule
*tail
;
3202 pr
->anchor
[sizeof (pr
->anchor
) - 1] = '\0';
3203 pr
->anchor_call
[sizeof (pr
->anchor_call
) - 1] = '\0';
3204 ruleset
= pf_find_ruleset(pr
->anchor
);
3205 if (ruleset
== NULL
) {
3209 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
3210 if (rs_num
>= PF_RULESET_MAX
) {
3214 tail
= TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
,
3217 pr
->nr
= tail
->nr
+ 1;
3220 pr
->ticket
= ruleset
->rules
[rs_num
].active
.ticket
;
3225 struct pf_ruleset
*ruleset
;
3226 struct pf_rule
*rule
;
3229 pr
->anchor
[sizeof (pr
->anchor
) - 1] = '\0';
3230 pr
->anchor_call
[sizeof (pr
->anchor_call
) - 1] = '\0';
3231 ruleset
= pf_find_ruleset(pr
->anchor
);
3232 if (ruleset
== NULL
) {
3236 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
3237 if (rs_num
>= PF_RULESET_MAX
) {
3241 if (pr
->ticket
!= ruleset
->rules
[rs_num
].active
.ticket
) {
3245 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
3246 while ((rule
!= NULL
) && (rule
->nr
!= pr
->nr
))
3247 rule
= TAILQ_NEXT(rule
, entries
);
3252 pf_rule_copyout(rule
, &pr
->rule
);
3253 if (pf_anchor_copyout(ruleset
, rule
, pr
)) {
3257 pfi_dynaddr_copyout(&pr
->rule
.src
.addr
);
3258 pfi_dynaddr_copyout(&pr
->rule
.dst
.addr
);
3259 pf_tbladdr_copyout(&pr
->rule
.src
.addr
);
3260 pf_tbladdr_copyout(&pr
->rule
.dst
.addr
);
3261 pf_rtlabel_copyout(&pr
->rule
.src
.addr
);
3262 pf_rtlabel_copyout(&pr
->rule
.dst
.addr
);
3263 for (i
= 0; i
< PF_SKIP_COUNT
; ++i
)
3264 if (rule
->skip
[i
].ptr
== NULL
)
3265 pr
->rule
.skip
[i
].nr
= -1;
3267 pr
->rule
.skip
[i
].nr
=
3268 rule
->skip
[i
].ptr
->nr
;
3270 if (pr
->action
== PF_GET_CLR_CNTR
) {
3271 rule
->evaluations
= 0;
3272 rule
->packets
[0] = rule
->packets
[1] = 0;
3273 rule
->bytes
[0] = rule
->bytes
[1] = 0;
3278 case DIOCCHANGERULE
: {
3279 struct pfioc_rule
*pcr
= pr
;
3280 struct pf_ruleset
*ruleset
;
3281 struct pf_rule
*oldrule
= NULL
, *newrule
= NULL
;
3282 struct pf_pooladdr
*pa
;
3286 if (!(pcr
->action
== PF_CHANGE_REMOVE
||
3287 pcr
->action
== PF_CHANGE_GET_TICKET
) &&
3288 pcr
->pool_ticket
!= ticket_pabuf
) {
3293 if (pcr
->action
< PF_CHANGE_ADD_HEAD
||
3294 pcr
->action
> PF_CHANGE_GET_TICKET
) {
3298 pcr
->anchor
[sizeof (pcr
->anchor
) - 1] = '\0';
3299 pcr
->anchor_call
[sizeof (pcr
->anchor_call
) - 1] = '\0';
3300 ruleset
= pf_find_ruleset(pcr
->anchor
);
3301 if (ruleset
== NULL
) {
3305 rs_num
= pf_get_ruleset_number(pcr
->rule
.action
);
3306 if (rs_num
>= PF_RULESET_MAX
) {
3311 if (pcr
->action
== PF_CHANGE_GET_TICKET
) {
3312 pcr
->ticket
= ++ruleset
->rules
[rs_num
].active
.ticket
;
3316 ruleset
->rules
[rs_num
].active
.ticket
) {
3320 if (pcr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
3326 if (pcr
->action
!= PF_CHANGE_REMOVE
) {
3327 newrule
= pool_get(&pf_rule_pl
, PR_WAITOK
);
3328 if (newrule
== NULL
) {
3332 pf_rule_copyin(&pcr
->rule
, newrule
, p
, minordev
);
3334 if (newrule
->af
== AF_INET
) {
3335 pool_put(&pf_rule_pl
, newrule
);
3336 error
= EAFNOSUPPORT
;
3341 if (newrule
->af
== AF_INET6
) {
3342 pool_put(&pf_rule_pl
, newrule
);
3343 error
= EAFNOSUPPORT
;
3347 if (newrule
->ifname
[0]) {
3348 newrule
->kif
= pfi_kif_get(newrule
->ifname
);
3349 if (newrule
->kif
== NULL
) {
3350 pool_put(&pf_rule_pl
, newrule
);
3354 pfi_kif_ref(newrule
->kif
, PFI_KIF_REF_RULE
);
3356 newrule
->kif
= NULL
;
3360 if (altq_allowed
&& newrule
->qname
[0] != '\0') {
3362 pf_qname2qid(newrule
->qname
)) == 0)
3364 else if (newrule
->pqname
[0] != '\0') {
3365 if ((newrule
->pqid
=
3366 pf_qname2qid(newrule
->pqname
)) == 0)
3369 newrule
->pqid
= newrule
->qid
;
3371 #endif /* PF_ALTQ */
3372 if (newrule
->tagname
[0])
3374 pf_tagname2tag(newrule
->tagname
)) == 0)
3376 if (newrule
->match_tagname
[0])
3377 if ((newrule
->match_tag
= pf_tagname2tag(
3378 newrule
->match_tagname
)) == 0)
3380 if (newrule
->rt
&& !newrule
->direction
)
3385 if (newrule
->logif
>= PFLOGIFS_MAX
)
3388 pf_addrwrap_setup(&newrule
->src
.addr
);
3389 pf_addrwrap_setup(&newrule
->dst
.addr
);
3390 if (pf_rtlabel_add(&newrule
->src
.addr
) ||
3391 pf_rtlabel_add(&newrule
->dst
.addr
))
3393 if (pfi_dynaddr_setup(&newrule
->src
.addr
, newrule
->af
))
3395 if (pfi_dynaddr_setup(&newrule
->dst
.addr
, newrule
->af
))
3397 if (pf_tbladdr_setup(ruleset
, &newrule
->src
.addr
))
3399 if (pf_tbladdr_setup(ruleset
, &newrule
->dst
.addr
))
3401 if (pf_anchor_setup(newrule
, ruleset
, pcr
->anchor_call
))
3403 TAILQ_FOREACH(pa
, &pf_pabuf
, entries
)
3404 if (pf_tbladdr_setup(ruleset
, &pa
->addr
))
3407 if (newrule
->overload_tblname
[0]) {
3408 if ((newrule
->overload_tbl
= pfr_attach_table(
3409 ruleset
, newrule
->overload_tblname
)) ==
3413 newrule
->overload_tbl
->pfrkt_flags
|=
3417 pf_mv_pool(&pf_pabuf
, &newrule
->rpool
.list
);
3418 if (((((newrule
->action
== PF_NAT
) ||
3419 (newrule
->action
== PF_RDR
) ||
3420 (newrule
->action
== PF_BINAT
) ||
3421 (newrule
->rt
> PF_FASTROUTE
)) &&
3422 !newrule
->anchor
)) &&
3423 (TAILQ_FIRST(&newrule
->rpool
.list
) == NULL
))
3427 pf_rm_rule(NULL
, newrule
);
3430 newrule
->rpool
.cur
= TAILQ_FIRST(&newrule
->rpool
.list
);
3431 newrule
->evaluations
= 0;
3432 newrule
->packets
[0] = newrule
->packets
[1] = 0;
3433 newrule
->bytes
[0] = newrule
->bytes
[1] = 0;
3435 pf_empty_pool(&pf_pabuf
);
3437 if (pcr
->action
== PF_CHANGE_ADD_HEAD
)
3438 oldrule
= TAILQ_FIRST(
3439 ruleset
->rules
[rs_num
].active
.ptr
);
3440 else if (pcr
->action
== PF_CHANGE_ADD_TAIL
)
3441 oldrule
= TAILQ_LAST(
3442 ruleset
->rules
[rs_num
].active
.ptr
, pf_rulequeue
);
3444 oldrule
= TAILQ_FIRST(
3445 ruleset
->rules
[rs_num
].active
.ptr
);
3446 while ((oldrule
!= NULL
) && (oldrule
->nr
!= pcr
->nr
))
3447 oldrule
= TAILQ_NEXT(oldrule
, entries
);
3448 if (oldrule
== NULL
) {
3449 if (newrule
!= NULL
)
3450 pf_rm_rule(NULL
, newrule
);
3456 if (pcr
->action
== PF_CHANGE_REMOVE
) {
3457 pf_rm_rule(ruleset
->rules
[rs_num
].active
.ptr
, oldrule
);
3458 ruleset
->rules
[rs_num
].active
.rcount
--;
3460 if (oldrule
== NULL
)
3462 ruleset
->rules
[rs_num
].active
.ptr
,
3464 else if (pcr
->action
== PF_CHANGE_ADD_HEAD
||
3465 pcr
->action
== PF_CHANGE_ADD_BEFORE
)
3466 TAILQ_INSERT_BEFORE(oldrule
, newrule
, entries
);
3469 ruleset
->rules
[rs_num
].active
.ptr
,
3470 oldrule
, newrule
, entries
);
3471 ruleset
->rules
[rs_num
].active
.rcount
++;
3475 TAILQ_FOREACH(oldrule
,
3476 ruleset
->rules
[rs_num
].active
.ptr
, entries
)
3479 ruleset
->rules
[rs_num
].active
.ticket
++;
3481 pf_calc_skip_steps(ruleset
->rules
[rs_num
].active
.ptr
);
3482 pf_remove_if_empty_ruleset(ruleset
);
3487 case DIOCINSERTRULE
: {
3488 struct pf_ruleset
*ruleset
;
3489 struct pf_rule
*rule
, *tail
, *r
;
3493 pr
->anchor
[sizeof (pr
->anchor
) - 1] = '\0';
3494 pr
->anchor_call
[sizeof (pr
->anchor_call
) - 1] = '\0';
3495 is_anchor
= (pr
->anchor_call
[0] != '\0');
3497 if ((ruleset
= pf_find_ruleset_with_owner(pr
->anchor
,
3498 pr
->rule
.owner
, is_anchor
, &error
)) == NULL
)
3501 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
3502 if (rs_num
>= PF_RULESET_MAX
) {
3506 if (pr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
3511 /* make sure this anchor rule doesn't exist already */
3513 r
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
3516 ((strcmp(r
->anchor
->name
,
3517 pr
->anchor_call
)) == 0)) {
3518 if (((strcmp(pr
->rule
.owner
,
3520 ((strcmp(r
->owner
, "")) == 0))
3526 r
= TAILQ_NEXT(r
, entries
);
3532 rule
= pool_get(&pf_rule_pl
, PR_WAITOK
);
3537 pf_rule_copyin(&pr
->rule
, rule
, p
, minordev
);
3539 if (rule
->af
== AF_INET
) {
3540 pool_put(&pf_rule_pl
, rule
);
3541 error
= EAFNOSUPPORT
;
3546 if (rule
->af
== AF_INET6
) {
3547 pool_put(&pf_rule_pl
, rule
);
3548 error
= EAFNOSUPPORT
;
3553 r
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
3554 while ((r
!= NULL
) && (rule
->priority
>= (unsigned)r
->priority
))
3555 r
= TAILQ_NEXT(r
, entries
);
3558 TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
,
3559 pf_rulequeue
)) != NULL
)
3560 rule
->nr
= tail
->nr
+ 1;
3567 if ((error
= pf_rule_setup(pr
, rule
, ruleset
)))
3570 if (rule
->anchor
!= NULL
)
3571 strlcpy(rule
->anchor
->owner
, rule
->owner
,
3572 PF_OWNER_NAME_SIZE
);
3575 TAILQ_INSERT_BEFORE(r
, rule
, entries
);
3576 while (r
&& ++r
->nr
)
3577 r
= TAILQ_NEXT(r
, entries
);
3579 TAILQ_INSERT_TAIL(ruleset
->rules
[rs_num
].active
.ptr
,
3581 ruleset
->rules
[rs_num
].active
.rcount
++;
3583 /* Calculate checksum for the main ruleset */
3584 if (ruleset
== &pf_main_ruleset
)
3585 error
= pf_setup_pfsync_matching(ruleset
);
3587 pf_ruleset_cleanup(ruleset
, rs_num
);
3588 rule
->ticket
= VM_KERNEL_ADDRPERM((u_int64_t
)(uintptr_t)rule
);
3590 pr
->rule
.ticket
= rule
->ticket
;
3591 pf_rule_copyout(rule
, &pr
->rule
);
3592 if (rule
->rule_flag
& PFRULE_PFM
)
3594 if (rule
->action
== PF_NAT64
)
3595 atomic_add_16(&pf_nat64_configured
, 1);
3599 case DIOCDELETERULE
: {
3600 pr
->anchor
[sizeof (pr
->anchor
) - 1] = '\0';
3601 pr
->anchor_call
[sizeof (pr
->anchor_call
) - 1] = '\0';
3603 if (pr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
3608 /* get device through which request is made */
3609 if ((uint8_t)minordev
== PFDEV_PFM
)
3610 req_dev
|= PFRULE_PFM
;
3612 if (pr
->rule
.ticket
) {
3613 if ((error
= pf_delete_rule_by_ticket(pr
, req_dev
)))
3616 pf_delete_rule_by_owner(pr
->rule
.owner
, req_dev
);
3618 if (pr
->rule
.action
== PF_NAT64
)
3619 atomic_add_16(&pf_nat64_configured
, -1);
3632 pfioctl_ioc_state_kill(u_long cmd
, struct pfioc_state_kill
*psk
, struct proc
*p
)
3637 psk
->psk_ifname
[sizeof (psk
->psk_ifname
) - 1] = '\0';
3638 psk
->psk_ownername
[sizeof(psk
->psk_ownername
) - 1] = '\0';
3640 bool ifname_matched
= true;
3641 bool owner_matched
= true;
3644 case DIOCCLRSTATES
: {
3645 struct pf_state
*s
, *nexts
;
3648 for (s
= RB_MIN(pf_state_tree_id
, &tree_id
); s
; s
= nexts
) {
3649 nexts
= RB_NEXT(pf_state_tree_id
, &tree_id
, s
);
3651 * Purge all states only when neither ifname
3652 * or owner is provided. If any of these are provided
3653 * we purge only the states with meta data that match
3655 bool unlink_state
= false;
3656 ifname_matched
= true;
3657 owner_matched
= true;
3659 if (psk
->psk_ifname
[0] &&
3660 strcmp(psk
->psk_ifname
, s
->kif
->pfik_name
)) {
3661 ifname_matched
= false;
3664 if (psk
->psk_ownername
[0] &&
3665 ((NULL
== s
->rule
.ptr
) ||
3666 strcmp(psk
->psk_ownername
, s
->rule
.ptr
->owner
))) {
3667 owner_matched
= false;
3670 unlink_state
= ifname_matched
&& owner_matched
;
3674 /* don't send out individual delete messages */
3675 s
->sync_flags
= PFSTATE_NOSYNC
;
3681 psk
->psk_af
= killed
;
3683 pfsync_clear_states(pf_status
.hostid
, psk
->psk_ifname
);
3688 case DIOCKILLSTATES
: {
3689 struct pf_state
*s
, *nexts
;
3690 struct pf_state_key
*sk
;
3691 struct pf_state_host
*src
, *dst
;
3694 for (s
= RB_MIN(pf_state_tree_id
, &tree_id
); s
;
3696 nexts
= RB_NEXT(pf_state_tree_id
, &tree_id
, s
);
3698 ifname_matched
= true;
3699 owner_matched
= true;
3701 if (psk
->psk_ifname
[0] &&
3702 strcmp(psk
->psk_ifname
, s
->kif
->pfik_name
)) {
3703 ifname_matched
= false;
3706 if (psk
->psk_ownername
[0] &&
3707 ((NULL
== s
->rule
.ptr
) ||
3708 strcmp(psk
->psk_ownername
, s
->rule
.ptr
->owner
))) {
3709 owner_matched
= false;
3712 if (sk
->direction
== PF_OUT
) {
3719 if ((!psk
->psk_af
|| sk
->af_lan
== psk
->psk_af
) &&
3720 (!psk
->psk_proto
|| psk
->psk_proto
== sk
->proto
) &&
3721 PF_MATCHA(psk
->psk_src
.neg
,
3722 &psk
->psk_src
.addr
.v
.a
.addr
,
3723 &psk
->psk_src
.addr
.v
.a
.mask
,
3724 &src
->addr
, sk
->af_lan
) &&
3725 PF_MATCHA(psk
->psk_dst
.neg
,
3726 &psk
->psk_dst
.addr
.v
.a
.addr
,
3727 &psk
->psk_dst
.addr
.v
.a
.mask
,
3728 &dst
->addr
, sk
->af_lan
) &&
3729 (pf_match_xport(psk
->psk_proto
,
3730 psk
->psk_proto_variant
, &psk
->psk_src
.xport
,
3732 (pf_match_xport(psk
->psk_proto
,
3733 psk
->psk_proto_variant
, &psk
->psk_dst
.xport
,
3738 /* send immediate delete of state */
3739 pfsync_delete_state(s
);
3740 s
->sync_flags
|= PFSTATE_NOSYNC
;
3746 psk
->psk_af
= killed
;
3759 pfioctl_ioc_state(u_long cmd
, struct pfioc_state
*ps
, struct proc
*p
)
3765 case DIOCADDSTATE
: {
3766 struct pfsync_state
*sp
= &ps
->state
;
3768 struct pf_state_key
*sk
;
3769 struct pfi_kif
*kif
;
3771 if (sp
->timeout
>= PFTM_MAX
) {
3775 s
= pool_get(&pf_state_pl
, PR_WAITOK
);
3780 bzero(s
, sizeof (struct pf_state
));
3781 if ((sk
= pf_alloc_state_key(s
, NULL
)) == NULL
) {
3782 pool_put(&pf_state_pl
, s
);
3786 pf_state_import(sp
, sk
, s
);
3787 kif
= pfi_kif_get(sp
->ifname
);
3789 pool_put(&pf_state_pl
, s
);
3790 pool_put(&pf_state_key_pl
, sk
);
3794 TAILQ_INIT(&s
->unlink_hooks
);
3795 s
->state_key
->app_state
= 0;
3796 if (pf_insert_state(kif
, s
)) {
3797 pfi_kif_unref(kif
, PFI_KIF_REF_NONE
);
3798 pool_put(&pf_state_pl
, s
);
3802 pf_default_rule
.states
++;
3803 VERIFY(pf_default_rule
.states
!= 0);
3807 case DIOCGETSTATE
: {
3809 struct pf_state_cmp id_key
;
3811 bcopy(ps
->state
.id
, &id_key
.id
, sizeof (id_key
.id
));
3812 id_key
.creatorid
= ps
->state
.creatorid
;
3814 s
= pf_find_state_byid(&id_key
);
3820 pf_state_export(&ps
->state
, s
->state_key
, s
);
3833 pfioctl_ioc_states(u_long cmd
, struct pfioc_states_32
*ps32
,
3834 struct pfioc_states_64
*ps64
, struct proc
*p
)
3836 int p64
= proc_is64bit(p
);
3840 case DIOCGETSTATES
: { /* struct pfioc_states */
3841 struct pf_state
*state
;
3842 struct pfsync_state
*pstore
;
3847 len
= (p64
? ps64
->ps_len
: ps32
->ps_len
);
3849 size
= sizeof (struct pfsync_state
) * pf_status
.states
;
3851 ps64
->ps_len
= size
;
3853 ps32
->ps_len
= size
;
3857 pstore
= _MALLOC(sizeof (*pstore
), M_TEMP
, M_WAITOK
);
3858 if (pstore
== NULL
) {
3862 buf
= (p64
? ps64
->ps_buf
: ps32
->ps_buf
);
3864 state
= TAILQ_FIRST(&state_list
);
3866 if (state
->timeout
!= PFTM_UNLINKED
) {
3867 if ((nr
+ 1) * sizeof (*pstore
) > (unsigned)len
)
3870 pf_state_export(pstore
,
3871 state
->state_key
, state
);
3872 error
= copyout(pstore
, buf
, sizeof (*pstore
));
3874 _FREE(pstore
, M_TEMP
);
3877 buf
+= sizeof (*pstore
);
3880 state
= TAILQ_NEXT(state
, entry_list
);
3883 size
= sizeof (struct pfsync_state
) * nr
;
3885 ps64
->ps_len
= size
;
3887 ps32
->ps_len
= size
;
3889 _FREE(pstore
, M_TEMP
);
3902 pfioctl_ioc_natlook(u_long cmd
, struct pfioc_natlook
*pnl
, struct proc
*p
)
3909 struct pf_state_key
*sk
;
3910 struct pf_state
*state
;
3911 struct pf_state_key_cmp key
;
3912 int m
= 0, direction
= pnl
->direction
;
3914 key
.proto
= pnl
->proto
;
3915 key
.proto_variant
= pnl
->proto_variant
;
3918 PF_AZERO(&pnl
->saddr
, pnl
->af
) ||
3919 PF_AZERO(&pnl
->daddr
, pnl
->af
) ||
3920 ((pnl
->proto
== IPPROTO_TCP
||
3921 pnl
->proto
== IPPROTO_UDP
) &&
3922 (!pnl
->dxport
.port
|| !pnl
->sxport
.port
)))
3926 * userland gives us source and dest of connection,
3927 * reverse the lookup so we ask for what happens with
3928 * the return traffic, enabling us to find it in the
3931 if (direction
== PF_IN
) {
3932 key
.af_gwy
= pnl
->af
;
3933 PF_ACPY(&key
.ext_gwy
.addr
, &pnl
->daddr
,
3935 memcpy(&key
.ext_gwy
.xport
, &pnl
->dxport
,
3936 sizeof (key
.ext_gwy
.xport
));
3937 PF_ACPY(&key
.gwy
.addr
, &pnl
->saddr
, pnl
->af
);
3938 memcpy(&key
.gwy
.xport
, &pnl
->sxport
,
3939 sizeof (key
.gwy
.xport
));
3940 state
= pf_find_state_all(&key
, PF_IN
, &m
);
3942 key
.af_lan
= pnl
->af
;
3943 PF_ACPY(&key
.lan
.addr
, &pnl
->daddr
, pnl
->af
);
3944 memcpy(&key
.lan
.xport
, &pnl
->dxport
,
3945 sizeof (key
.lan
.xport
));
3946 PF_ACPY(&key
.ext_lan
.addr
, &pnl
->saddr
,
3948 memcpy(&key
.ext_lan
.xport
, &pnl
->sxport
,
3949 sizeof (key
.ext_lan
.xport
));
3950 state
= pf_find_state_all(&key
, PF_OUT
, &m
);
3953 error
= E2BIG
; /* more than one state */
3954 else if (state
!= NULL
) {
3955 sk
= state
->state_key
;
3956 if (direction
== PF_IN
) {
3957 PF_ACPY(&pnl
->rsaddr
, &sk
->lan
.addr
,
3959 memcpy(&pnl
->rsxport
, &sk
->lan
.xport
,
3960 sizeof (pnl
->rsxport
));
3961 PF_ACPY(&pnl
->rdaddr
, &pnl
->daddr
,
3963 memcpy(&pnl
->rdxport
, &pnl
->dxport
,
3964 sizeof (pnl
->rdxport
));
3966 PF_ACPY(&pnl
->rdaddr
, &sk
->gwy
.addr
,
3968 memcpy(&pnl
->rdxport
, &sk
->gwy
.xport
,
3969 sizeof (pnl
->rdxport
));
3970 PF_ACPY(&pnl
->rsaddr
, &pnl
->saddr
,
3972 memcpy(&pnl
->rsxport
, &pnl
->sxport
,
3973 sizeof (pnl
->rsxport
));
3990 pfioctl_ioc_tm(u_long cmd
, struct pfioc_tm
*pt
, struct proc
*p
)
3996 case DIOCSETTIMEOUT
: {
3999 if (pt
->timeout
< 0 || pt
->timeout
>= PFTM_MAX
||
4004 old
= pf_default_rule
.timeout
[pt
->timeout
];
4005 if (pt
->timeout
== PFTM_INTERVAL
&& pt
->seconds
== 0)
4007 pf_default_rule
.timeout
[pt
->timeout
] = pt
->seconds
;
4008 if (pt
->timeout
== PFTM_INTERVAL
&& pt
->seconds
< old
)
4009 wakeup(pf_purge_thread_fn
);
4014 case DIOCGETTIMEOUT
: {
4015 if (pt
->timeout
< 0 || pt
->timeout
>= PFTM_MAX
) {
4019 pt
->seconds
= pf_default_rule
.timeout
[pt
->timeout
];
4032 pfioctl_ioc_limit(u_long cmd
, struct pfioc_limit
*pl
, struct proc
*p
)
4038 case DIOCGETLIMIT
: {
4040 if (pl
->index
< 0 || pl
->index
>= PF_LIMIT_MAX
) {
4044 pl
->limit
= pf_pool_limits
[pl
->index
].limit
;
4048 case DIOCSETLIMIT
: {
4051 if (pl
->index
< 0 || pl
->index
>= PF_LIMIT_MAX
||
4052 pf_pool_limits
[pl
->index
].pp
== NULL
) {
4056 pool_sethardlimit(pf_pool_limits
[pl
->index
].pp
,
4057 pl
->limit
, NULL
, 0);
4058 old_limit
= pf_pool_limits
[pl
->index
].limit
;
4059 pf_pool_limits
[pl
->index
].limit
= pl
->limit
;
4060 pl
->limit
= old_limit
;
4073 pfioctl_ioc_pooladdr(u_long cmd
, struct pfioc_pooladdr
*pp
, struct proc
*p
)
4076 struct pf_pooladdr
*pa
= NULL
;
4077 struct pf_pool
*pool
= NULL
;
4081 case DIOCBEGINADDRS
: {
4082 pf_empty_pool(&pf_pabuf
);
4083 pp
->ticket
= ++ticket_pabuf
;
4088 pp
->anchor
[sizeof (pp
->anchor
) - 1] = '\0';
4089 if (pp
->ticket
!= ticket_pabuf
) {
4094 if (pp
->af
== AF_INET
) {
4095 error
= EAFNOSUPPORT
;
4100 if (pp
->af
== AF_INET6
) {
4101 error
= EAFNOSUPPORT
;
4105 if (pp
->addr
.addr
.type
!= PF_ADDR_ADDRMASK
&&
4106 pp
->addr
.addr
.type
!= PF_ADDR_DYNIFTL
&&
4107 pp
->addr
.addr
.type
!= PF_ADDR_TABLE
) {
4111 pa
= pool_get(&pf_pooladdr_pl
, PR_WAITOK
);
4116 pf_pooladdr_copyin(&pp
->addr
, pa
);
4117 if (pa
->ifname
[0]) {
4118 pa
->kif
= pfi_kif_get(pa
->ifname
);
4119 if (pa
->kif
== NULL
) {
4120 pool_put(&pf_pooladdr_pl
, pa
);
4124 pfi_kif_ref(pa
->kif
, PFI_KIF_REF_RULE
);
4126 pf_addrwrap_setup(&pa
->addr
);
4127 if (pfi_dynaddr_setup(&pa
->addr
, pp
->af
)) {
4128 pfi_dynaddr_remove(&pa
->addr
);
4129 pfi_kif_unref(pa
->kif
, PFI_KIF_REF_RULE
);
4130 pool_put(&pf_pooladdr_pl
, pa
);
4134 TAILQ_INSERT_TAIL(&pf_pabuf
, pa
, entries
);
4138 case DIOCGETADDRS
: {
4140 pp
->anchor
[sizeof (pp
->anchor
) - 1] = '\0';
4141 pool
= pf_get_pool(pp
->anchor
, pp
->ticket
, pp
->r_action
,
4142 pp
->r_num
, 0, 1, 0);
4147 TAILQ_FOREACH(pa
, &pool
->list
, entries
)
4155 pp
->anchor
[sizeof (pp
->anchor
) - 1] = '\0';
4156 pool
= pf_get_pool(pp
->anchor
, pp
->ticket
, pp
->r_action
,
4157 pp
->r_num
, 0, 1, 1);
4162 pa
= TAILQ_FIRST(&pool
->list
);
4163 while ((pa
!= NULL
) && (nr
< pp
->nr
)) {
4164 pa
= TAILQ_NEXT(pa
, entries
);
4171 pf_pooladdr_copyout(pa
, &pp
->addr
);
4172 pfi_dynaddr_copyout(&pp
->addr
.addr
);
4173 pf_tbladdr_copyout(&pp
->addr
.addr
);
4174 pf_rtlabel_copyout(&pp
->addr
.addr
);
4178 case DIOCCHANGEADDR
: {
4179 struct pfioc_pooladdr
*pca
= pp
;
4180 struct pf_pooladdr
*oldpa
= NULL
, *newpa
= NULL
;
4181 struct pf_ruleset
*ruleset
;
4183 if (pca
->action
< PF_CHANGE_ADD_HEAD
||
4184 pca
->action
> PF_CHANGE_REMOVE
) {
4188 if (pca
->addr
.addr
.type
!= PF_ADDR_ADDRMASK
&&
4189 pca
->addr
.addr
.type
!= PF_ADDR_DYNIFTL
&&
4190 pca
->addr
.addr
.type
!= PF_ADDR_TABLE
) {
4195 pca
->anchor
[sizeof (pca
->anchor
) - 1] = '\0';
4196 ruleset
= pf_find_ruleset(pca
->anchor
);
4197 if (ruleset
== NULL
) {
4201 pool
= pf_get_pool(pca
->anchor
, pca
->ticket
, pca
->r_action
,
4202 pca
->r_num
, pca
->r_last
, 1, 1);
4207 if (pca
->action
!= PF_CHANGE_REMOVE
) {
4208 newpa
= pool_get(&pf_pooladdr_pl
, PR_WAITOK
);
4209 if (newpa
== NULL
) {
4213 pf_pooladdr_copyin(&pca
->addr
, newpa
);
4215 if (pca
->af
== AF_INET
) {
4216 pool_put(&pf_pooladdr_pl
, newpa
);
4217 error
= EAFNOSUPPORT
;
4222 if (pca
->af
== AF_INET6
) {
4223 pool_put(&pf_pooladdr_pl
, newpa
);
4224 error
= EAFNOSUPPORT
;
4228 if (newpa
->ifname
[0]) {
4229 newpa
->kif
= pfi_kif_get(newpa
->ifname
);
4230 if (newpa
->kif
== NULL
) {
4231 pool_put(&pf_pooladdr_pl
, newpa
);
4235 pfi_kif_ref(newpa
->kif
, PFI_KIF_REF_RULE
);
4238 pf_addrwrap_setup(&newpa
->addr
);
4239 if (pfi_dynaddr_setup(&newpa
->addr
, pca
->af
) ||
4240 pf_tbladdr_setup(ruleset
, &newpa
->addr
)) {
4241 pfi_dynaddr_remove(&newpa
->addr
);
4242 pfi_kif_unref(newpa
->kif
, PFI_KIF_REF_RULE
);
4243 pool_put(&pf_pooladdr_pl
, newpa
);
4249 if (pca
->action
== PF_CHANGE_ADD_HEAD
)
4250 oldpa
= TAILQ_FIRST(&pool
->list
);
4251 else if (pca
->action
== PF_CHANGE_ADD_TAIL
)
4252 oldpa
= TAILQ_LAST(&pool
->list
, pf_palist
);
4256 oldpa
= TAILQ_FIRST(&pool
->list
);
4257 while ((oldpa
!= NULL
) && (i
< (int)pca
->nr
)) {
4258 oldpa
= TAILQ_NEXT(oldpa
, entries
);
4261 if (oldpa
== NULL
) {
4267 if (pca
->action
== PF_CHANGE_REMOVE
) {
4268 TAILQ_REMOVE(&pool
->list
, oldpa
, entries
);
4269 pfi_dynaddr_remove(&oldpa
->addr
);
4270 pf_tbladdr_remove(&oldpa
->addr
);
4271 pfi_kif_unref(oldpa
->kif
, PFI_KIF_REF_RULE
);
4272 pool_put(&pf_pooladdr_pl
, oldpa
);
4275 TAILQ_INSERT_TAIL(&pool
->list
, newpa
, entries
);
4276 else if (pca
->action
== PF_CHANGE_ADD_HEAD
||
4277 pca
->action
== PF_CHANGE_ADD_BEFORE
)
4278 TAILQ_INSERT_BEFORE(oldpa
, newpa
, entries
);
4280 TAILQ_INSERT_AFTER(&pool
->list
, oldpa
,
4284 pool
->cur
= TAILQ_FIRST(&pool
->list
);
4285 PF_ACPY(&pool
->counter
, &pool
->cur
->addr
.v
.a
.addr
,
4299 pfioctl_ioc_ruleset(u_long cmd
, struct pfioc_ruleset
*pr
, struct proc
*p
)
4305 case DIOCGETRULESETS
: {
4306 struct pf_ruleset
*ruleset
;
4307 struct pf_anchor
*anchor
;
4309 pr
->path
[sizeof (pr
->path
) - 1] = '\0';
4310 pr
->name
[sizeof (pr
->name
) - 1] = '\0';
4311 if ((ruleset
= pf_find_ruleset(pr
->path
)) == NULL
) {
4316 if (ruleset
->anchor
== NULL
) {
4317 /* XXX kludge for pf_main_ruleset */
4318 RB_FOREACH(anchor
, pf_anchor_global
, &pf_anchors
)
4319 if (anchor
->parent
== NULL
)
4322 RB_FOREACH(anchor
, pf_anchor_node
,
4323 &ruleset
->anchor
->children
)
4329 case DIOCGETRULESET
: {
4330 struct pf_ruleset
*ruleset
;
4331 struct pf_anchor
*anchor
;
4334 pr
->path
[sizeof (pr
->path
) - 1] = '\0';
4335 if ((ruleset
= pf_find_ruleset(pr
->path
)) == NULL
) {
4340 if (ruleset
->anchor
== NULL
) {
4341 /* XXX kludge for pf_main_ruleset */
4342 RB_FOREACH(anchor
, pf_anchor_global
, &pf_anchors
)
4343 if (anchor
->parent
== NULL
&& nr
++ == pr
->nr
) {
4344 strlcpy(pr
->name
, anchor
->name
,
4349 RB_FOREACH(anchor
, pf_anchor_node
,
4350 &ruleset
->anchor
->children
)
4351 if (nr
++ == pr
->nr
) {
4352 strlcpy(pr
->name
, anchor
->name
,
4371 pfioctl_ioc_trans(u_long cmd
, struct pfioc_trans_32
*io32
,
4372 struct pfioc_trans_64
*io64
, struct proc
*p
)
4374 int p64
= proc_is64bit(p
);
4375 int error
= 0, esize
, size
;
4378 esize
= (p64
? io64
->esize
: io32
->esize
);
4379 size
= (p64
? io64
->size
: io32
->size
);
4380 buf
= (p64
? io64
->array
: io32
->array
);
4384 struct pfioc_trans_e
*ioe
;
4385 struct pfr_table
*table
;
4388 if (esize
!= sizeof (*ioe
)) {
4392 ioe
= _MALLOC(sizeof (*ioe
), M_TEMP
, M_WAITOK
);
4393 table
= _MALLOC(sizeof (*table
), M_TEMP
, M_WAITOK
);
4394 for (i
= 0; i
< size
; i
++, buf
+= sizeof (*ioe
)) {
4395 if (copyin(buf
, ioe
, sizeof (*ioe
))) {
4396 _FREE(table
, M_TEMP
);
4401 ioe
->anchor
[sizeof (ioe
->anchor
) - 1] = '\0';
4402 switch (ioe
->rs_num
) {
4403 case PF_RULESET_ALTQ
:
4406 if (ioe
->anchor
[0]) {
4407 _FREE(table
, M_TEMP
);
4412 error
= pf_begin_altq(&ioe
->ticket
);
4414 _FREE(table
, M_TEMP
);
4419 #endif /* PF_ALTQ */
4421 case PF_RULESET_TABLE
:
4422 bzero(table
, sizeof (*table
));
4423 strlcpy(table
->pfrt_anchor
, ioe
->anchor
,
4424 sizeof (table
->pfrt_anchor
));
4425 if ((error
= pfr_ina_begin(table
,
4426 &ioe
->ticket
, NULL
, 0))) {
4427 _FREE(table
, M_TEMP
);
4433 if ((error
= pf_begin_rules(&ioe
->ticket
,
4434 ioe
->rs_num
, ioe
->anchor
))) {
4435 _FREE(table
, M_TEMP
);
4441 if (copyout(ioe
, buf
, sizeof (*ioe
))) {
4442 _FREE(table
, M_TEMP
);
4448 _FREE(table
, M_TEMP
);
4453 case DIOCXROLLBACK
: {
4454 struct pfioc_trans_e
*ioe
;
4455 struct pfr_table
*table
;
4458 if (esize
!= sizeof (*ioe
)) {
4462 ioe
= _MALLOC(sizeof (*ioe
), M_TEMP
, M_WAITOK
);
4463 table
= _MALLOC(sizeof (*table
), M_TEMP
, M_WAITOK
);
4464 for (i
= 0; i
< size
; i
++, buf
+= sizeof (*ioe
)) {
4465 if (copyin(buf
, ioe
, sizeof (*ioe
))) {
4466 _FREE(table
, M_TEMP
);
4471 ioe
->anchor
[sizeof (ioe
->anchor
) - 1] = '\0';
4472 switch (ioe
->rs_num
) {
4473 case PF_RULESET_ALTQ
:
4476 if (ioe
->anchor
[0]) {
4477 _FREE(table
, M_TEMP
);
4482 error
= pf_rollback_altq(ioe
->ticket
);
4484 _FREE(table
, M_TEMP
);
4486 goto fail
; /* really bad */
4489 #endif /* PF_ALTQ */
4491 case PF_RULESET_TABLE
:
4492 bzero(table
, sizeof (*table
));
4493 strlcpy(table
->pfrt_anchor
, ioe
->anchor
,
4494 sizeof (table
->pfrt_anchor
));
4495 if ((error
= pfr_ina_rollback(table
,
4496 ioe
->ticket
, NULL
, 0))) {
4497 _FREE(table
, M_TEMP
);
4499 goto fail
; /* really bad */
4503 if ((error
= pf_rollback_rules(ioe
->ticket
,
4504 ioe
->rs_num
, ioe
->anchor
))) {
4505 _FREE(table
, M_TEMP
);
4507 goto fail
; /* really bad */
4512 _FREE(table
, M_TEMP
);
4518 struct pfioc_trans_e
*ioe
;
4519 struct pfr_table
*table
;
4520 struct pf_ruleset
*rs
;
4521 user_addr_t _buf
= buf
;
4524 if (esize
!= sizeof (*ioe
)) {
4528 ioe
= _MALLOC(sizeof (*ioe
), M_TEMP
, M_WAITOK
);
4529 table
= _MALLOC(sizeof (*table
), M_TEMP
, M_WAITOK
);
4530 /* first makes sure everything will succeed */
4531 for (i
= 0; i
< size
; i
++, buf
+= sizeof (*ioe
)) {
4532 if (copyin(buf
, ioe
, sizeof (*ioe
))) {
4533 _FREE(table
, M_TEMP
);
4538 ioe
->anchor
[sizeof (ioe
->anchor
) - 1] = '\0';
4539 switch (ioe
->rs_num
) {
4540 case PF_RULESET_ALTQ
:
4543 if (ioe
->anchor
[0]) {
4544 _FREE(table
, M_TEMP
);
4549 if (!altqs_inactive_open
||
4551 ticket_altqs_inactive
) {
4552 _FREE(table
, M_TEMP
);
4558 #endif /* PF_ALTQ */
4560 case PF_RULESET_TABLE
:
4561 rs
= pf_find_ruleset(ioe
->anchor
);
4562 if (rs
== NULL
|| !rs
->topen
|| ioe
->ticket
!=
4564 _FREE(table
, M_TEMP
);
4571 if (ioe
->rs_num
< 0 || ioe
->rs_num
>=
4573 _FREE(table
, M_TEMP
);
4578 rs
= pf_find_ruleset(ioe
->anchor
);
4580 !rs
->rules
[ioe
->rs_num
].inactive
.open
||
4581 rs
->rules
[ioe
->rs_num
].inactive
.ticket
!=
4583 _FREE(table
, M_TEMP
);
4592 /* now do the commit - no errors should happen here */
4593 for (i
= 0; i
< size
; i
++, buf
+= sizeof (*ioe
)) {
4594 if (copyin(buf
, ioe
, sizeof (*ioe
))) {
4595 _FREE(table
, M_TEMP
);
4600 ioe
->anchor
[sizeof (ioe
->anchor
) - 1] = '\0';
4601 switch (ioe
->rs_num
) {
4602 case PF_RULESET_ALTQ
:
4605 (error
= pf_commit_altq(ioe
->ticket
))) {
4606 _FREE(table
, M_TEMP
);
4608 goto fail
; /* really bad */
4610 #endif /* PF_ALTQ */
4612 case PF_RULESET_TABLE
:
4613 bzero(table
, sizeof (*table
));
4614 strlcpy(table
->pfrt_anchor
, ioe
->anchor
,
4615 sizeof (table
->pfrt_anchor
));
4616 if ((error
= pfr_ina_commit(table
, ioe
->ticket
,
4618 _FREE(table
, M_TEMP
);
4620 goto fail
; /* really bad */
4624 if ((error
= pf_commit_rules(ioe
->ticket
,
4625 ioe
->rs_num
, ioe
->anchor
))) {
4626 _FREE(table
, M_TEMP
);
4628 goto fail
; /* really bad */
4633 _FREE(table
, M_TEMP
);
4647 pfioctl_ioc_src_nodes(u_long cmd
, struct pfioc_src_nodes_32
*psn32
,
4648 struct pfioc_src_nodes_64
*psn64
, struct proc
*p
)
4650 int p64
= proc_is64bit(p
);
4654 case DIOCGETSRCNODES
: {
4655 struct pf_src_node
*n
, *pstore
;
4660 space
= (p64
? psn64
->psn_len
: psn32
->psn_len
);
4662 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
)
4665 size
= sizeof (struct pf_src_node
) * nr
;
4667 psn64
->psn_len
= size
;
4669 psn32
->psn_len
= size
;
4673 pstore
= _MALLOC(sizeof (*pstore
), M_TEMP
, M_WAITOK
);
4674 if (pstore
== NULL
) {
4678 buf
= (p64
? psn64
->psn_buf
: psn32
->psn_buf
);
4680 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
) {
4681 uint64_t secs
= pf_time_second(), diff
;
4683 if ((nr
+ 1) * sizeof (*pstore
) > (unsigned)space
)
4686 bcopy(n
, pstore
, sizeof (*pstore
));
4687 if (n
->rule
.ptr
!= NULL
)
4688 pstore
->rule
.nr
= n
->rule
.ptr
->nr
;
4689 pstore
->creation
= secs
- pstore
->creation
;
4690 if (pstore
->expire
> secs
)
4691 pstore
->expire
-= secs
;
4695 /* adjust the connection rate estimate */
4696 diff
= secs
- n
->conn_rate
.last
;
4697 if (diff
>= n
->conn_rate
.seconds
)
4698 pstore
->conn_rate
.count
= 0;
4700 pstore
->conn_rate
.count
-=
4701 n
->conn_rate
.count
* diff
/
4702 n
->conn_rate
.seconds
;
4704 _RB_PARENT(pstore
, entry
) = NULL
;
4705 RB_LEFT(pstore
, entry
) = RB_RIGHT(pstore
, entry
) = NULL
;
4708 error
= copyout(pstore
, buf
, sizeof (*pstore
));
4710 _FREE(pstore
, M_TEMP
);
4713 buf
+= sizeof (*pstore
);
4717 size
= sizeof (struct pf_src_node
) * nr
;
4719 psn64
->psn_len
= size
;
4721 psn32
->psn_len
= size
;
4723 _FREE(pstore
, M_TEMP
);
4737 pfioctl_ioc_src_node_kill(u_long cmd
, struct pfioc_src_node_kill
*psnk
,
4744 case DIOCKILLSRCNODES
: {
4745 struct pf_src_node
*sn
;
4749 RB_FOREACH(sn
, pf_src_tree
, &tree_src_tracking
) {
4750 if (PF_MATCHA(psnk
->psnk_src
.neg
,
4751 &psnk
->psnk_src
.addr
.v
.a
.addr
,
4752 &psnk
->psnk_src
.addr
.v
.a
.mask
,
4753 &sn
->addr
, sn
->af
) &&
4754 PF_MATCHA(psnk
->psnk_dst
.neg
,
4755 &psnk
->psnk_dst
.addr
.v
.a
.addr
,
4756 &psnk
->psnk_dst
.addr
.v
.a
.mask
,
4757 &sn
->raddr
, sn
->af
)) {
4758 /* Handle state to src_node linkage */
4759 if (sn
->states
!= 0) {
4760 RB_FOREACH(s
, pf_state_tree_id
,
4762 if (s
->src_node
== sn
)
4764 if (s
->nat_src_node
== sn
)
4765 s
->nat_src_node
= NULL
;
4775 pf_purge_expired_src_nodes();
4777 psnk
->psnk_af
= killed
;
4790 pfioctl_ioc_iface(u_long cmd
, struct pfioc_iface_32
*io32
,
4791 struct pfioc_iface_64
*io64
, struct proc
*p
)
4793 int p64
= proc_is64bit(p
);
4797 case DIOCIGETIFACES
: {
4801 buf
= (p64
? io64
->pfiio_buffer
: io32
->pfiio_buffer
);
4802 esize
= (p64
? io64
->pfiio_esize
: io32
->pfiio_esize
);
4804 /* esize must be that of the user space version of pfi_kif */
4805 if (esize
!= sizeof (struct pfi_uif
)) {
4810 io64
->pfiio_name
[sizeof (io64
->pfiio_name
) - 1] = '\0';
4812 io32
->pfiio_name
[sizeof (io32
->pfiio_name
) - 1] = '\0';
4813 error
= pfi_get_ifaces(
4814 p64
? io64
->pfiio_name
: io32
->pfiio_name
, buf
,
4815 p64
? &io64
->pfiio_size
: &io32
->pfiio_size
);
4819 case DIOCSETIFFLAG
: {
4821 io64
->pfiio_name
[sizeof (io64
->pfiio_name
) - 1] = '\0';
4823 io32
->pfiio_name
[sizeof (io32
->pfiio_name
) - 1] = '\0';
4825 error
= pfi_set_flags(
4826 p64
? io64
->pfiio_name
: io32
->pfiio_name
,
4827 p64
? io64
->pfiio_flags
: io32
->pfiio_flags
);
4831 case DIOCCLRIFFLAG
: {
4833 io64
->pfiio_name
[sizeof (io64
->pfiio_name
) - 1] = '\0';
4835 io32
->pfiio_name
[sizeof (io32
->pfiio_name
) - 1] = '\0';
4837 error
= pfi_clear_flags(
4838 p64
? io64
->pfiio_name
: io32
->pfiio_name
,
4839 p64
? io64
->pfiio_flags
: io32
->pfiio_flags
);
4852 pf_af_hook(struct ifnet
*ifp
, struct mbuf
**mppn
, struct mbuf
**mp
,
4853 unsigned int af
, int input
, struct ip_fw_args
*fwa
)
4856 struct mbuf
*nextpkt
;
4857 net_thread_marks_t marks
;
4858 struct ifnet
* pf_ifp
= ifp
;
4860 /* Always allow traffic on co-processor interfaces. */
4861 if (ifp
&& IFNET_IS_INTCOPROC(ifp
))
4864 marks
= net_thread_marks_push(NET_THREAD_HELD_PF
);
4866 if (marks
!= net_thread_marks_none
) {
4867 lck_rw_lock_shared(pf_perim_lock
);
4870 lck_mtx_lock(pf_lock
);
4873 if (mppn
!= NULL
&& *mppn
!= NULL
)
4874 VERIFY(*mppn
== *mp
);
4875 if ((nextpkt
= (*mp
)->m_nextpkt
) != NULL
)
4876 (*mp
)->m_nextpkt
= NULL
;
4879 * For packets destined to locally hosted IP address
4880 * ip_output_list sets Mbuf's pkt header's rcvif to
4881 * the interface hosting the IP address.
4882 * While on the output path ifp passed to pf_af_hook
4883 * to such local communication is the loopback interface,
4884 * the input path derives ifp from mbuf packet header's
4886 * This asymmetry caues issues with PF.
4887 * To handle that case, we have a limited change here to
4888 * pass interface as loopback if packets are looped in.
4890 if (input
&& ((*mp
)->m_pkthdr
.pkt_flags
& PKTF_LOOP
)) {
4897 error
= pf_inet_hook(pf_ifp
, mp
, input
, fwa
);
4903 error
= pf_inet6_hook(pf_ifp
, mp
, input
, fwa
);
4910 /* When packet valid, link to the next packet */
4911 if (*mp
!= NULL
&& nextpkt
!= NULL
) {
4912 struct mbuf
*m
= *mp
;
4913 while (m
->m_nextpkt
!= NULL
)
4915 m
->m_nextpkt
= nextpkt
;
4917 /* Fix up linkage of previous packet in the chain */
4925 if (marks
!= net_thread_marks_none
)
4926 lck_mtx_unlock(pf_lock
);
4929 if (marks
!= net_thread_marks_none
)
4930 lck_rw_done(pf_perim_lock
);
4932 net_thread_marks_pop(marks
);
4939 pf_inet_hook(struct ifnet
*ifp
, struct mbuf
**mp
, int input
,
4940 struct ip_fw_args
*fwa
)
4942 struct mbuf
*m
= *mp
;
4943 #if BYTE_ORDER != BIG_ENDIAN
4944 struct ip
*ip
= mtod(m
, struct ip
*);
4949 * If the packet is outbound, is originated locally, is flagged for
4950 * delayed UDP/TCP checksum calculation, and is about to be processed
4951 * for an interface that doesn't support the appropriate checksum
4952 * offloading, then calculated the checksum here so that PF can adjust
4955 if (!input
&& m
->m_pkthdr
.rcvif
== NULL
) {
4956 static const int mask
= CSUM_DELAY_DATA
;
4957 const int flags
= m
->m_pkthdr
.csum_flags
&
4958 ~IF_HWASSIST_CSUM_FLAGS(ifp
->if_hwassist
);
4961 in_delayed_cksum(m
);
4962 m
->m_pkthdr
.csum_flags
&= ~mask
;
4966 #if BYTE_ORDER != BIG_ENDIAN
4970 if (pf_test(input
? PF_IN
: PF_OUT
, ifp
, mp
, NULL
, fwa
) != PF_PASS
) {
4974 error
= EHOSTUNREACH
;
4979 #if BYTE_ORDER != BIG_ENDIAN
4982 ip
= mtod(*mp
, struct ip
*);
4994 pf_inet6_hook(struct ifnet
*ifp
, struct mbuf
**mp
, int input
,
4995 struct ip_fw_args
*fwa
)
5000 * If the packet is outbound, is originated locally, is flagged for
5001 * delayed UDP/TCP checksum calculation, and is about to be processed
5002 * for an interface that doesn't support the appropriate checksum
5003 * offloading, then calculated the checksum here so that PF can adjust
5006 if (!input
&& (*mp
)->m_pkthdr
.rcvif
== NULL
) {
5007 static const int mask
= CSUM_DELAY_IPV6_DATA
;
5008 const int flags
= (*mp
)->m_pkthdr
.csum_flags
&
5009 ~IF_HWASSIST_CSUM_FLAGS(ifp
->if_hwassist
);
5013 * Checksum offload should not have been enabled
5014 * when extension headers exist, thus 0 for optlen.
5016 in6_delayed_cksum(*mp
);
5017 (*mp
)->m_pkthdr
.csum_flags
&= ~mask
;
5021 if (pf_test6(input
? PF_IN
: PF_OUT
, ifp
, mp
, NULL
, fwa
) != PF_PASS
) {
5025 error
= EHOSTUNREACH
;
5035 pf_ifaddr_hook(struct ifnet
*ifp
)
5037 struct pfi_kif
*kif
= ifp
->if_pf_kif
;
5040 lck_rw_lock_shared(pf_perim_lock
);
5041 lck_mtx_lock(pf_lock
);
5043 pfi_kifaddr_update(kif
);
5045 lck_mtx_unlock(pf_lock
);
5046 lck_rw_done(pf_perim_lock
);
5052 * Caller acquires dlil lock as writer (exclusive)
5055 pf_ifnet_hook(struct ifnet
*ifp
, int attach
)
5057 lck_rw_lock_shared(pf_perim_lock
);
5058 lck_mtx_lock(pf_lock
);
5060 pfi_attach_ifnet(ifp
);
5062 pfi_detach_ifnet(ifp
);
5063 lck_mtx_unlock(pf_lock
);
5064 lck_rw_done(pf_perim_lock
);
5068 pf_attach_hooks(void)
5070 ifnet_head_lock_shared();
5072 * Check against ifnet_addrs[] before proceeding, in case this
5073 * is called very early on, e.g. during dlil_init() before any
5074 * network interface is attached.
5076 if (ifnet_addrs
!= NULL
) {
5079 for (i
= 0; i
<= if_index
; i
++) {
5080 struct ifnet
*ifp
= ifindex2ifnet
[i
];
5082 pfi_attach_ifnet(ifp
);
5090 /* currently unused along with pfdetach() */
5092 pf_detach_hooks(void)
5094 ifnet_head_lock_shared();
5095 if (ifnet_addrs
!= NULL
) {
5096 for (i
= 0; i
<= if_index
; i
++) {
5099 struct ifnet
*ifp
= ifindex2ifnet
[i
];
5100 if (ifp
!= NULL
&& ifp
->if_pf_kif
!= NULL
) {
5101 pfi_detach_ifnet(ifp
);
5112 * The switch statement below does nothing at runtime, as it serves as a
5113 * compile time check to ensure that all of the socket 'D' ioctls (those
5114 * in the 'D' group going thru soo_ioctl) that are made available by the
5115 * networking stack is unique. This works as long as this routine gets
5116 * updated each time a new interface ioctl gets added.
5118 * Any failures at compile time indicates duplicated ioctl values.
5120 static __attribute__((unused
)) void
5121 pfioctl_cassert(void)
5124 * This is equivalent to _CASSERT() and the compiler wouldn't
5125 * generate any instructions, thus for compile time only.
5127 switch ((u_long
)0) {
5130 /* bsd/net/pfvar.h */
5134 case DIOCGETSTARTERS
:
5141 case DIOCSETSTATUSIF
:
5147 case DIOCCHANGERULE
:
5148 case DIOCINSERTRULE
:
5149 case DIOCDELETERULE
:
5150 case DIOCSETTIMEOUT
:
5151 case DIOCGETTIMEOUT
:
5153 case DIOCCLRRULECTRS
:
5156 case DIOCKILLSTATES
:
5162 case DIOCCHANGEALTQ
:
5164 case DIOCBEGINADDRS
:
5168 case DIOCCHANGEADDR
:
5169 case DIOCGETRULESETS
:
5170 case DIOCGETRULESET
:
5171 case DIOCRCLRTABLES
:
5172 case DIOCRADDTABLES
:
5173 case DIOCRDELTABLES
:
5174 case DIOCRGETTABLES
:
5175 case DIOCRGETTSTATS
:
5176 case DIOCRCLRTSTATS
:
5182 case DIOCRGETASTATS
:
5183 case DIOCRCLRASTATS
:
5185 case DIOCRSETTFLAGS
:
5186 case DIOCRINADEFINE
:
5193 case DIOCGETSRCNODES
:
5194 case DIOCCLRSRCNODES
:
5196 case DIOCIGETIFACES
:
5199 case DIOCKILLSRCNODES
: