2 * Copyright (c) 2007-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */
30 /* $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * - Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * - Redistributions in binary form must reproduce the above
44 * copyright notice, this list of conditions and the following
45 * disclaimer in the documentation and/or other materials provided
46 * with the distribution.
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
71 #include <sys/filio.h>
72 #include <sys/fcntl.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/kernel.h>
77 #include <sys/proc_internal.h>
78 #include <sys/malloc.h>
79 #include <sys/kauth.h>
81 #include <sys/mcache.h>
82 #include <sys/queue.h>
84 #include <mach/vm_param.h>
88 #include <net/if_types.h>
89 #include <net/route.h>
91 #include <netinet/in.h>
92 #include <netinet/in_var.h>
93 #include <netinet/in_systm.h>
94 #include <netinet/ip.h>
95 #include <netinet/ip_var.h>
96 #include <netinet/ip_icmp.h>
97 #include <netinet/if_ether.h>
100 #include <netinet/ip_dummynet.h>
103 #endif /* DUMMYNET */
105 #include <libkern/crypto/md5.h>
107 #include <machine/machine_routines.h>
109 #include <miscfs/devfs/devfs.h>
111 #include <net/pfvar.h>
114 #include <net/if_pfsync.h>
118 #include <net/if_pflog.h>
122 #include <netinet/ip6.h>
123 #include <netinet/in_pcb.h>
127 #include <net/altq/altq.h>
128 #include <net/altq/altq_cbq.h>
129 #include <net/classq/classq_red.h>
130 #include <net/classq/classq_rio.h>
131 #include <net/classq/classq_blue.h>
132 #include <net/classq/classq_sfb.h>
136 static void pfdetach(void);
138 static int pfopen(dev_t
, int, int, struct proc
*);
139 static int pfclose(dev_t
, int, int, struct proc
*);
140 static int pfioctl(dev_t
, u_long
, caddr_t
, int, struct proc
*);
141 static int pfioctl_ioc_table(u_long
, struct pfioc_table_32
*,
142 struct pfioc_table_64
*, struct proc
*);
143 static int pfioctl_ioc_tokens(u_long
, struct pfioc_tokens_32
*,
144 struct pfioc_tokens_64
*, struct proc
*);
145 static int pfioctl_ioc_rule(u_long
, int, struct pfioc_rule
*, struct proc
*);
146 static int pfioctl_ioc_state_kill(u_long
, struct pfioc_state_kill
*,
148 static int pfioctl_ioc_state(u_long
, struct pfioc_state
*, struct proc
*);
149 static int pfioctl_ioc_states(u_long
, struct pfioc_states_32
*,
150 struct pfioc_states_64
*, struct proc
*);
151 static int pfioctl_ioc_natlook(u_long
, struct pfioc_natlook
*, struct proc
*);
152 static int pfioctl_ioc_tm(u_long
, struct pfioc_tm
*, struct proc
*);
153 static int pfioctl_ioc_limit(u_long
, struct pfioc_limit
*, struct proc
*);
154 static int pfioctl_ioc_pooladdr(u_long
, struct pfioc_pooladdr
*, struct proc
*);
155 static int pfioctl_ioc_ruleset(u_long
, struct pfioc_ruleset
*, struct proc
*);
156 static int pfioctl_ioc_trans(u_long
, struct pfioc_trans_32
*,
157 struct pfioc_trans_64
*, struct proc
*);
158 static int pfioctl_ioc_src_nodes(u_long
, struct pfioc_src_nodes_32
*,
159 struct pfioc_src_nodes_64
*, struct proc
*);
160 static int pfioctl_ioc_src_node_kill(u_long
, struct pfioc_src_node_kill
*,
162 static int pfioctl_ioc_iface(u_long
, struct pfioc_iface_32
*,
163 struct pfioc_iface_64
*, struct proc
*);
164 static struct pf_pool
*pf_get_pool(char *, u_int32_t
, u_int8_t
, u_int32_t
,
165 u_int8_t
, u_int8_t
, u_int8_t
);
166 static void pf_mv_pool(struct pf_palist
*, struct pf_palist
*);
167 static void pf_empty_pool(struct pf_palist
*);
169 static int pf_begin_altq(u_int32_t
*);
170 static int pf_rollback_altq(u_int32_t
);
171 static int pf_commit_altq(u_int32_t
);
172 static int pf_enable_altq(struct pf_altq
*);
173 static int pf_disable_altq(struct pf_altq
*);
174 static void pf_altq_copyin(struct pf_altq
*, struct pf_altq
*);
175 static void pf_altq_copyout(struct pf_altq
*, struct pf_altq
*);
177 static int pf_begin_rules(u_int32_t
*, int, const char *);
178 static int pf_rollback_rules(u_int32_t
, int, char *);
179 static int pf_setup_pfsync_matching(struct pf_ruleset
*);
180 static void pf_hash_rule(MD5_CTX
*, struct pf_rule
*);
181 static void pf_hash_rule_addr(MD5_CTX
*, struct pf_rule_addr
*, u_int8_t
);
182 static int pf_commit_rules(u_int32_t
, int, char *);
183 static void pf_rule_copyin(struct pf_rule
*, struct pf_rule
*, struct proc
*,
185 static void pf_rule_copyout(struct pf_rule
*, struct pf_rule
*);
186 static void pf_state_export(struct pfsync_state
*, struct pf_state_key
*,
188 static void pf_state_import(struct pfsync_state
*, struct pf_state_key
*,
190 static void pf_pooladdr_copyin(struct pf_pooladdr
*, struct pf_pooladdr
*);
191 static void pf_pooladdr_copyout(struct pf_pooladdr
*, struct pf_pooladdr
*);
192 static void pf_expire_states_and_src_nodes(struct pf_rule
*);
193 static void pf_delete_rule_from_ruleset(struct pf_ruleset
*,
194 int, struct pf_rule
*);
195 static int pf_rule_setup(struct pfioc_rule
*, struct pf_rule
*,
196 struct pf_ruleset
*);
197 static void pf_delete_rule_by_owner(char *);
198 static int pf_delete_rule_by_ticket(struct pfioc_rule
*);
199 static void pf_ruleset_cleanup(struct pf_ruleset
*, int);
200 static void pf_deleterule_anchor_step_out(struct pf_ruleset
**,
201 int, struct pf_rule
**);
203 #define PF_CDEV_MAJOR (-1)
205 static struct cdevsw pf_cdevsw
= {
208 /* read */ eno_rdwrt
,
209 /* write */ eno_rdwrt
,
212 /* reset */ eno_reset
,
214 /* select */ eno_select
,
216 /* strategy */ eno_strat
,
222 static void pf_attach_hooks(void);
224 /* currently unused along with pfdetach() */
225 static void pf_detach_hooks(void);
229 * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer,
230 * and used in pf_af_hook() for performance optimization, such that packets
231 * will enter pf_test() or pf_test6() only when PF is running.
233 int pf_is_enabled
= 0;
236 u_int32_t altq_allowed
= 0;
239 u_int32_t pf_hash_seed
;
242 * These are the pf enabled reference counting variables
244 static u_int64_t pf_enabled_ref_count
;
245 static u_int32_t nr_tokens
= 0;
246 static u_int64_t pffwrules
;
247 static u_int32_t pfdevcnt
;
249 SLIST_HEAD(list_head
, pfioc_kernel_token
);
250 static struct list_head token_list_head
;
252 struct pf_rule pf_default_rule
;
254 static int pf_altq_running
;
257 #define TAGID_MAX 50000
259 static TAILQ_HEAD(pf_tags
, pf_tagname
) pf_tags
=
260 TAILQ_HEAD_INITIALIZER(pf_tags
);
262 static TAILQ_HEAD(pf_tags
, pf_tagname
)
263 pf_tags
= TAILQ_HEAD_INITIALIZER(pf_tags
),
264 pf_qids
= TAILQ_HEAD_INITIALIZER(pf_qids
);
267 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
268 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
270 static u_int16_t
tagname2tag(struct pf_tags
*, char *);
271 static void tag2tagname(struct pf_tags
*, u_int16_t
, char *);
272 static void tag_unref(struct pf_tags
*, u_int16_t
);
273 static int pf_rtlabel_add(struct pf_addr_wrap
*);
274 static void pf_rtlabel_remove(struct pf_addr_wrap
*);
275 static void pf_rtlabel_copyout(struct pf_addr_wrap
*);
278 static int pf_inet_hook(struct ifnet
*, struct mbuf
**, int,
279 struct ip_fw_args
*);
282 static int pf_inet6_hook(struct ifnet
*, struct mbuf
**, int,
283 struct ip_fw_args
*);
286 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
289 * Helper macros for ioctl structures which vary in size (32-bit vs. 64-bit)
291 #define PFIOCX_STRUCT_DECL(s) \
294 struct s##_32 _s##_32; \
295 struct s##_64 _s##_64; \
299 #define PFIOCX_STRUCT_BEGIN(a, s, _action) { \
300 VERIFY(s##_un == NULL); \
301 s##_un = _MALLOC(sizeof (*s##_un), M_TEMP, M_WAITOK|M_ZERO); \
302 if (s##_un == NULL) { \
306 bcopy(a, &s##_un->_u._s##_64, \
307 sizeof (struct s##_64)); \
309 bcopy(a, &s##_un->_u._s##_32, \
310 sizeof (struct s##_32)); \
314 #define PFIOCX_STRUCT_END(s, a) { \
315 VERIFY(s##_un != NULL); \
317 bcopy(&s##_un->_u._s##_64, a, sizeof (struct s##_64)); \
319 bcopy(&s##_un->_u._s##_32, a, sizeof (struct s##_32)); \
320 _FREE(s##_un, M_TEMP); \
324 #define PFIOCX_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
325 #define PFIOCX_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
328 * Helper macros for regular ioctl structures.
330 #define PFIOC_STRUCT_BEGIN(a, v, _action) { \
331 VERIFY((v) == NULL); \
332 (v) = _MALLOC(sizeof (*(v)), M_TEMP, M_WAITOK|M_ZERO); \
336 bcopy(a, v, sizeof (*(v))); \
340 #define PFIOC_STRUCT_END(v, a) { \
341 VERIFY((v) != NULL); \
342 bcopy(v, a, sizeof (*(v))); \
347 #define PFIOC_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
348 #define PFIOC_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
350 static lck_attr_t
*pf_perim_lock_attr
;
351 static lck_grp_t
*pf_perim_lock_grp
;
352 static lck_grp_attr_t
*pf_perim_lock_grp_attr
;
354 static lck_attr_t
*pf_lock_attr
;
355 static lck_grp_t
*pf_lock_grp
;
356 static lck_grp_attr_t
*pf_lock_grp_attr
;
358 struct thread
*pf_purge_thread
;
360 extern void pfi_kifaddr_update(void *);
362 /* pf enable ref-counting helper functions */
363 static u_int64_t
generate_token(struct proc
*);
364 static int remove_token(struct pfioc_remove_token
*);
365 static void invalidate_all_tokens(void);
368 generate_token(struct proc
*p
)
370 u_int64_t token_value
;
371 struct pfioc_kernel_token
*new_token
;
373 new_token
= _MALLOC(sizeof (struct pfioc_kernel_token
), M_TEMP
,
376 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
378 if (new_token
== NULL
) {
379 /* malloc failed! bail! */
380 printf("%s: unable to allocate pf token structure!", __func__
);
384 token_value
= (u_int64_t
)(uintptr_t)new_token
;
386 new_token
->token
.token_value
= token_value
;
387 new_token
->token
.pid
= proc_pid(p
);
388 proc_name(new_token
->token
.pid
, new_token
->token
.proc_name
,
389 sizeof (new_token
->token
.proc_name
));
390 new_token
->token
.timestamp
= pf_calendar_time_second();
392 SLIST_INSERT_HEAD(&token_list_head
, new_token
, next
);
395 return (token_value
);
399 remove_token(struct pfioc_remove_token
*tok
)
401 struct pfioc_kernel_token
*entry
, *tmp
;
403 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
405 SLIST_FOREACH_SAFE(entry
, &token_list_head
, next
, tmp
) {
406 if (tok
->token_value
== entry
->token
.token_value
) {
407 SLIST_REMOVE(&token_list_head
, entry
,
408 pfioc_kernel_token
, next
);
409 _FREE(entry
, M_TEMP
);
411 return (0); /* success */
415 printf("pf : remove failure\n");
416 return (ESRCH
); /* failure */
420 invalidate_all_tokens(void)
422 struct pfioc_kernel_token
*entry
, *tmp
;
424 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
426 SLIST_FOREACH_SAFE(entry
, &token_list_head
, next
, tmp
) {
427 SLIST_REMOVE(&token_list_head
, entry
, pfioc_kernel_token
, next
);
428 _FREE(entry
, M_TEMP
);
437 u_int32_t
*t
= pf_default_rule
.timeout
;
440 pf_perim_lock_grp_attr
= lck_grp_attr_alloc_init();
441 pf_perim_lock_grp
= lck_grp_alloc_init("pf_perim",
442 pf_perim_lock_grp_attr
);
443 pf_perim_lock_attr
= lck_attr_alloc_init();
444 lck_rw_init(pf_perim_lock
, pf_perim_lock_grp
, pf_perim_lock_attr
);
446 pf_lock_grp_attr
= lck_grp_attr_alloc_init();
447 pf_lock_grp
= lck_grp_alloc_init("pf", pf_lock_grp_attr
);
448 pf_lock_attr
= lck_attr_alloc_init();
449 lck_mtx_init(pf_lock
, pf_lock_grp
, pf_lock_attr
);
451 pool_init(&pf_rule_pl
, sizeof (struct pf_rule
), 0, 0, 0, "pfrulepl",
453 pool_init(&pf_src_tree_pl
, sizeof (struct pf_src_node
), 0, 0, 0,
455 pool_init(&pf_state_pl
, sizeof (struct pf_state
), 0, 0, 0, "pfstatepl",
457 pool_init(&pf_state_key_pl
, sizeof (struct pf_state_key
), 0, 0, 0,
458 "pfstatekeypl", NULL
);
459 pool_init(&pf_app_state_pl
, sizeof (struct pf_app_state
), 0, 0, 0,
460 "pfappstatepl", NULL
);
462 pool_init(&pf_altq_pl
, sizeof (struct pf_altq
), 0, 0, 0, "pfaltqpl",
465 pool_init(&pf_pooladdr_pl
, sizeof (struct pf_pooladdr
), 0, 0, 0,
466 "pfpooladdrpl", NULL
);
469 pf_osfp_initialize();
471 pool_sethardlimit(pf_pool_limits
[PF_LIMIT_STATES
].pp
,
472 pf_pool_limits
[PF_LIMIT_STATES
].limit
, NULL
, 0);
474 if (max_mem
<= 256*1024*1024)
475 pf_pool_limits
[PF_LIMIT_TABLE_ENTRIES
].limit
=
476 PFR_KENTRY_HIWAT_SMALL
;
478 RB_INIT(&tree_src_tracking
);
479 RB_INIT(&pf_anchors
);
480 pf_init_ruleset(&pf_main_ruleset
);
481 TAILQ_INIT(&pf_pabuf
);
482 TAILQ_INIT(&state_list
);
484 TAILQ_INIT(&pf_altqs
[0]);
485 TAILQ_INIT(&pf_altqs
[1]);
486 pf_altqs_active
= &pf_altqs
[0];
487 pf_altqs_inactive
= &pf_altqs
[1];
489 PE_parse_boot_argn("altq", &altq_allowed
, sizeof (altq_allowed
));
491 _CASSERT(ALTRQ_PURGE
== CLASSQRQ_PURGE
);
492 _CASSERT(ALTRQ_PURGE_SC
== CLASSQRQ_PURGE_SC
);
493 _CASSERT(ALTRQ_EVENT
== CLASSQRQ_EVENT
);
495 _CASSERT(ALTDQ_REMOVE
== CLASSQDQ_REMOVE
);
496 _CASSERT(ALTDQ_POLL
== CLASSQDQ_POLL
);
499 _CASSERT((SC_BE
& SCIDX_MASK
) == SCIDX_BE
);
500 _CASSERT((SC_BK_SYS
& SCIDX_MASK
) == SCIDX_BK_SYS
);
501 _CASSERT((SC_BK
& SCIDX_MASK
) == SCIDX_BK
);
502 _CASSERT((SC_RD
& SCIDX_MASK
) == SCIDX_RD
);
503 _CASSERT((SC_OAM
& SCIDX_MASK
) == SCIDX_OAM
);
504 _CASSERT((SC_AV
& SCIDX_MASK
) == SCIDX_AV
);
505 _CASSERT((SC_RV
& SCIDX_MASK
) == SCIDX_RV
);
506 _CASSERT((SC_VI
& SCIDX_MASK
) == SCIDX_VI
);
507 _CASSERT((SC_VO
& SCIDX_MASK
) == SCIDX_VO
);
508 _CASSERT((SC_CTL
& SCIDX_MASK
) == SCIDX_CTL
);
510 /* default rule should never be garbage collected */
511 pf_default_rule
.entries
.tqe_prev
= &pf_default_rule
.entries
.tqe_next
;
512 pf_default_rule
.action
= PF_PASS
;
513 pf_default_rule
.nr
= -1;
514 pf_default_rule
.rtableid
= IFSCOPE_NONE
;
516 /* initialize default timeouts */
517 t
[PFTM_TCP_FIRST_PACKET
] = PFTM_TCP_FIRST_PACKET_VAL
;
518 t
[PFTM_TCP_OPENING
] = PFTM_TCP_OPENING_VAL
;
519 t
[PFTM_TCP_ESTABLISHED
] = PFTM_TCP_ESTABLISHED_VAL
;
520 t
[PFTM_TCP_CLOSING
] = PFTM_TCP_CLOSING_VAL
;
521 t
[PFTM_TCP_FIN_WAIT
] = PFTM_TCP_FIN_WAIT_VAL
;
522 t
[PFTM_TCP_CLOSED
] = PFTM_TCP_CLOSED_VAL
;
523 t
[PFTM_UDP_FIRST_PACKET
] = PFTM_UDP_FIRST_PACKET_VAL
;
524 t
[PFTM_UDP_SINGLE
] = PFTM_UDP_SINGLE_VAL
;
525 t
[PFTM_UDP_MULTIPLE
] = PFTM_UDP_MULTIPLE_VAL
;
526 t
[PFTM_ICMP_FIRST_PACKET
] = PFTM_ICMP_FIRST_PACKET_VAL
;
527 t
[PFTM_ICMP_ERROR_REPLY
] = PFTM_ICMP_ERROR_REPLY_VAL
;
528 t
[PFTM_GREv1_FIRST_PACKET
] = PFTM_GREv1_FIRST_PACKET_VAL
;
529 t
[PFTM_GREv1_INITIATING
] = PFTM_GREv1_INITIATING_VAL
;
530 t
[PFTM_GREv1_ESTABLISHED
] = PFTM_GREv1_ESTABLISHED_VAL
;
531 t
[PFTM_ESP_FIRST_PACKET
] = PFTM_ESP_FIRST_PACKET_VAL
;
532 t
[PFTM_ESP_INITIATING
] = PFTM_ESP_INITIATING_VAL
;
533 t
[PFTM_ESP_ESTABLISHED
] = PFTM_ESP_ESTABLISHED_VAL
;
534 t
[PFTM_OTHER_FIRST_PACKET
] = PFTM_OTHER_FIRST_PACKET_VAL
;
535 t
[PFTM_OTHER_SINGLE
] = PFTM_OTHER_SINGLE_VAL
;
536 t
[PFTM_OTHER_MULTIPLE
] = PFTM_OTHER_MULTIPLE_VAL
;
537 t
[PFTM_FRAG
] = PFTM_FRAG_VAL
;
538 t
[PFTM_INTERVAL
] = PFTM_INTERVAL_VAL
;
539 t
[PFTM_SRC_NODE
] = PFTM_SRC_NODE_VAL
;
540 t
[PFTM_TS_DIFF
] = PFTM_TS_DIFF_VAL
;
541 t
[PFTM_ADAPTIVE_START
] = PFSTATE_ADAPT_START
;
542 t
[PFTM_ADAPTIVE_END
] = PFSTATE_ADAPT_END
;
545 bzero(&pf_status
, sizeof (pf_status
));
546 pf_status
.debug
= PF_DEBUG_URGENT
;
547 pf_hash_seed
= random();
549 /* XXX do our best to avoid a conflict */
550 pf_status
.hostid
= random();
552 if (kernel_thread_start(pf_purge_thread_fn
, NULL
,
553 &pf_purge_thread
) != 0) {
554 printf("%s: unable to start purge thread!", __func__
);
558 maj
= cdevsw_add(PF_CDEV_MAJOR
, &pf_cdevsw
);
560 printf("%s: failed to allocate major number!\n", __func__
);
563 (void) devfs_make_node(makedev(maj
, PFDEV_PF
), DEVFS_CHAR
,
564 UID_ROOT
, GID_WHEEL
, 0600, "pf", 0);
566 (void) devfs_make_node(makedev(maj
, PFDEV_PFM
), DEVFS_CHAR
,
567 UID_ROOT
, GID_WHEEL
, 0600, "pfm", 0);
576 struct pf_anchor
*anchor
;
577 struct pf_state
*state
;
578 struct pf_src_node
*node
;
579 struct pfioc_table pt
;
586 pf_status
.running
= 0;
587 wakeup(pf_purge_thread_fn
);
589 /* clear the rulesets */
590 for (i
= 0; i
< PF_RULESET_MAX
; i
++)
591 if (pf_begin_rules(&ticket
, i
, &r
) == 0)
592 pf_commit_rules(ticket
, i
, &r
);
594 if (pf_begin_altq(&ticket
) == 0)
595 pf_commit_altq(ticket
);
599 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
600 state
->timeout
= PFTM_PURGE
;
602 state
->sync_flags
= PFSTATE_NOSYNC
;
605 pf_purge_expired_states(pf_status
.states
);
608 pfsync_clear_states(pf_status
.hostid
, NULL
);
611 /* clear source nodes */
612 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
613 state
->src_node
= NULL
;
614 state
->nat_src_node
= NULL
;
616 RB_FOREACH(node
, pf_src_tree
, &tree_src_tracking
) {
620 pf_purge_expired_src_nodes();
623 memset(&pt
, '\0', sizeof (pt
));
624 pfr_clr_tables(&pt
.pfrio_table
, &pt
.pfrio_ndel
, pt
.pfrio_flags
);
626 /* destroy anchors */
627 while ((anchor
= RB_MIN(pf_anchor_global
, &pf_anchors
)) != NULL
) {
628 for (i
= 0; i
< PF_RULESET_MAX
; i
++)
629 if (pf_begin_rules(&ticket
, i
, anchor
->name
) == 0)
630 pf_commit_rules(ticket
, i
, anchor
->name
);
633 /* destroy main ruleset */
634 pf_remove_if_empty_ruleset(&pf_main_ruleset
);
636 /* destroy the pools */
637 pool_destroy(&pf_pooladdr_pl
);
639 pool_destroy(&pf_altq_pl
);
641 pool_destroy(&pf_state_pl
);
642 pool_destroy(&pf_rule_pl
);
643 pool_destroy(&pf_src_tree_pl
);
645 /* destroy subsystems */
646 pf_normalize_destroy();
654 pfopen(dev_t dev
, int flags
, int fmt
, struct proc
*p
)
656 #pragma unused(flags, fmt, p)
657 if (minor(dev
) >= PFDEV_MAX
)
660 if (minor(dev
) == PFDEV_PFM
) {
661 lck_mtx_lock(pf_lock
);
663 lck_mtx_unlock(pf_lock
);
667 lck_mtx_unlock(pf_lock
);
673 pfclose(dev_t dev
, int flags
, int fmt
, struct proc
*p
)
675 #pragma unused(flags, fmt, p)
676 if (minor(dev
) >= PFDEV_MAX
)
679 if (minor(dev
) == PFDEV_PFM
) {
680 lck_mtx_lock(pf_lock
);
681 VERIFY(pfdevcnt
> 0);
683 lck_mtx_unlock(pf_lock
);
688 static struct pf_pool
*
689 pf_get_pool(char *anchor
, u_int32_t ticket
, u_int8_t rule_action
,
690 u_int32_t rule_number
, u_int8_t r_last
, u_int8_t active
,
691 u_int8_t check_ticket
)
693 struct pf_ruleset
*ruleset
;
694 struct pf_rule
*rule
;
697 ruleset
= pf_find_ruleset(anchor
);
700 rs_num
= pf_get_ruleset_number(rule_action
);
701 if (rs_num
>= PF_RULESET_MAX
)
704 if (check_ticket
&& ticket
!=
705 ruleset
->rules
[rs_num
].active
.ticket
)
708 rule
= TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
,
711 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
713 if (check_ticket
&& ticket
!=
714 ruleset
->rules
[rs_num
].inactive
.ticket
)
717 rule
= TAILQ_LAST(ruleset
->rules
[rs_num
].inactive
.ptr
,
720 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].inactive
.ptr
);
723 while ((rule
!= NULL
) && (rule
->nr
!= rule_number
))
724 rule
= TAILQ_NEXT(rule
, entries
);
729 return (&rule
->rpool
);
733 pf_mv_pool(struct pf_palist
*poola
, struct pf_palist
*poolb
)
735 struct pf_pooladdr
*mv_pool_pa
;
737 while ((mv_pool_pa
= TAILQ_FIRST(poola
)) != NULL
) {
738 TAILQ_REMOVE(poola
, mv_pool_pa
, entries
);
739 TAILQ_INSERT_TAIL(poolb
, mv_pool_pa
, entries
);
744 pf_empty_pool(struct pf_palist
*poola
)
746 struct pf_pooladdr
*empty_pool_pa
;
748 while ((empty_pool_pa
= TAILQ_FIRST(poola
)) != NULL
) {
749 pfi_dynaddr_remove(&empty_pool_pa
->addr
);
750 pf_tbladdr_remove(&empty_pool_pa
->addr
);
751 pfi_kif_unref(empty_pool_pa
->kif
, PFI_KIF_REF_RULE
);
752 TAILQ_REMOVE(poola
, empty_pool_pa
, entries
);
753 pool_put(&pf_pooladdr_pl
, empty_pool_pa
);
758 pf_rm_rule(struct pf_rulequeue
*rulequeue
, struct pf_rule
*rule
)
760 if (rulequeue
!= NULL
) {
761 if (rule
->states
<= 0) {
763 * XXX - we need to remove the table *before* detaching
764 * the rule to make sure the table code does not delete
765 * the anchor under our feet.
767 pf_tbladdr_remove(&rule
->src
.addr
);
768 pf_tbladdr_remove(&rule
->dst
.addr
);
769 if (rule
->overload_tbl
)
770 pfr_detach_table(rule
->overload_tbl
);
772 TAILQ_REMOVE(rulequeue
, rule
, entries
);
773 rule
->entries
.tqe_prev
= NULL
;
777 if (rule
->states
> 0 || rule
->src_nodes
> 0 ||
778 rule
->entries
.tqe_prev
!= NULL
)
780 pf_tag_unref(rule
->tag
);
781 pf_tag_unref(rule
->match_tag
);
784 if (rule
->pqid
!= rule
->qid
)
785 pf_qid_unref(rule
->pqid
);
786 pf_qid_unref(rule
->qid
);
789 pf_rtlabel_remove(&rule
->src
.addr
);
790 pf_rtlabel_remove(&rule
->dst
.addr
);
791 pfi_dynaddr_remove(&rule
->src
.addr
);
792 pfi_dynaddr_remove(&rule
->dst
.addr
);
793 if (rulequeue
== NULL
) {
794 pf_tbladdr_remove(&rule
->src
.addr
);
795 pf_tbladdr_remove(&rule
->dst
.addr
);
796 if (rule
->overload_tbl
)
797 pfr_detach_table(rule
->overload_tbl
);
799 pfi_kif_unref(rule
->kif
, PFI_KIF_REF_RULE
);
800 pf_anchor_remove(rule
);
801 pf_empty_pool(&rule
->rpool
.list
);
802 pool_put(&pf_rule_pl
, rule
);
806 tagname2tag(struct pf_tags
*head
, char *tagname
)
808 struct pf_tagname
*tag
, *p
= NULL
;
809 u_int16_t new_tagid
= 1;
811 TAILQ_FOREACH(tag
, head
, entries
)
812 if (strcmp(tagname
, tag
->name
) == 0) {
818 * to avoid fragmentation, we do a linear search from the beginning
819 * and take the first free slot we find. if there is none or the list
820 * is empty, append a new entry at the end.
824 if (!TAILQ_EMPTY(head
))
825 for (p
= TAILQ_FIRST(head
); p
!= NULL
&&
826 p
->tag
== new_tagid
; p
= TAILQ_NEXT(p
, entries
))
827 new_tagid
= p
->tag
+ 1;
829 if (new_tagid
> TAGID_MAX
)
832 /* allocate and fill new struct pf_tagname */
833 tag
= _MALLOC(sizeof (*tag
), M_TEMP
, M_WAITOK
|M_ZERO
);
836 strlcpy(tag
->name
, tagname
, sizeof (tag
->name
));
837 tag
->tag
= new_tagid
;
840 if (p
!= NULL
) /* insert new entry before p */
841 TAILQ_INSERT_BEFORE(p
, tag
, entries
);
842 else /* either list empty or no free slot in between */
843 TAILQ_INSERT_TAIL(head
, tag
, entries
);
849 tag2tagname(struct pf_tags
*head
, u_int16_t tagid
, char *p
)
851 struct pf_tagname
*tag
;
853 TAILQ_FOREACH(tag
, head
, entries
)
854 if (tag
->tag
== tagid
) {
855 strlcpy(p
, tag
->name
, PF_TAG_NAME_SIZE
);
861 tag_unref(struct pf_tags
*head
, u_int16_t tag
)
863 struct pf_tagname
*p
, *next
;
868 for (p
= TAILQ_FIRST(head
); p
!= NULL
; p
= next
) {
869 next
= TAILQ_NEXT(p
, entries
);
872 TAILQ_REMOVE(head
, p
, entries
);
881 pf_tagname2tag(char *tagname
)
883 return (tagname2tag(&pf_tags
, tagname
));
887 pf_tag2tagname(u_int16_t tagid
, char *p
)
889 tag2tagname(&pf_tags
, tagid
, p
);
893 pf_tag_ref(u_int16_t tag
)
895 struct pf_tagname
*t
;
897 TAILQ_FOREACH(t
, &pf_tags
, entries
)
905 pf_tag_unref(u_int16_t tag
)
907 tag_unref(&pf_tags
, tag
);
911 pf_rtlabel_add(struct pf_addr_wrap
*a
)
918 pf_rtlabel_remove(struct pf_addr_wrap
*a
)
924 pf_rtlabel_copyout(struct pf_addr_wrap
*a
)
931 pf_qname2qid(char *qname
)
933 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
935 return ((u_int32_t
)tagname2tag(&pf_qids
, qname
));
939 pf_qid2qname(u_int32_t qid
, char *p
)
941 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
943 tag2tagname(&pf_qids
, (u_int16_t
)qid
, p
);
947 pf_qid_unref(u_int32_t qid
)
949 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
951 tag_unref(&pf_qids
, (u_int16_t
)qid
);
955 pf_begin_altq(u_int32_t
*ticket
)
957 struct pf_altq
*altq
;
960 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
962 /* Purge the old altq list */
963 while ((altq
= TAILQ_FIRST(pf_altqs_inactive
)) != NULL
) {
964 TAILQ_REMOVE(pf_altqs_inactive
, altq
, entries
);
965 if (altq
->qname
[0] == '\0') {
966 /* detach and destroy the discipline */
967 error
= altq_remove(altq
);
969 pf_qid_unref(altq
->qid
);
970 pool_put(&pf_altq_pl
, altq
);
974 *ticket
= ++ticket_altqs_inactive
;
975 altqs_inactive_open
= 1;
980 pf_rollback_altq(u_int32_t ticket
)
982 struct pf_altq
*altq
;
985 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
987 if (!altqs_inactive_open
|| ticket
!= ticket_altqs_inactive
)
989 /* Purge the old altq list */
990 while ((altq
= TAILQ_FIRST(pf_altqs_inactive
)) != NULL
) {
991 TAILQ_REMOVE(pf_altqs_inactive
, altq
, entries
);
992 if (altq
->qname
[0] == '\0') {
993 /* detach and destroy the discipline */
994 error
= altq_remove(altq
);
996 pf_qid_unref(altq
->qid
);
997 pool_put(&pf_altq_pl
, altq
);
999 altqs_inactive_open
= 0;
1004 pf_commit_altq(u_int32_t ticket
)
1006 struct pf_altqqueue
*old_altqs
;
1007 struct pf_altq
*altq
;
1010 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1012 if (!altqs_inactive_open
|| ticket
!= ticket_altqs_inactive
)
1015 /* swap altqs, keep the old. */
1016 old_altqs
= pf_altqs_active
;
1017 pf_altqs_active
= pf_altqs_inactive
;
1018 pf_altqs_inactive
= old_altqs
;
1019 ticket_altqs_active
= ticket_altqs_inactive
;
1021 /* Attach new disciplines */
1022 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
) {
1023 if (altq
->qname
[0] == '\0') {
1024 /* attach the discipline */
1025 error
= altq_pfattach(altq
);
1026 if (error
== 0 && pf_altq_running
)
1027 error
= pf_enable_altq(altq
);
1034 /* Purge the old altq list */
1035 while ((altq
= TAILQ_FIRST(pf_altqs_inactive
)) != NULL
) {
1036 TAILQ_REMOVE(pf_altqs_inactive
, altq
, entries
);
1037 if (altq
->qname
[0] == '\0') {
1038 /* detach and destroy the discipline */
1039 if (pf_altq_running
)
1040 error
= pf_disable_altq(altq
);
1041 err
= altq_pfdetach(altq
);
1042 if (err
!= 0 && error
== 0)
1044 err
= altq_remove(altq
);
1045 if (err
!= 0 && error
== 0)
1048 pf_qid_unref(altq
->qid
);
1049 pool_put(&pf_altq_pl
, altq
);
1052 altqs_inactive_open
= 0;
1057 pf_enable_altq(struct pf_altq
*altq
)
1060 struct ifclassq
*ifq
;
1063 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1065 if ((ifp
= ifunit(altq
->ifname
)) == NULL
)
1070 if (IFCQ_ALTQ(ifq
)->altq_type
!= ALTQT_NONE
)
1071 error
= altq_enable(IFCQ_ALTQ(ifq
));
1073 /* set or clear tokenbucket regulator */
1074 if (error
== 0 && ifp
!= NULL
&& ALTQ_IS_ENABLED(IFCQ_ALTQ(ifq
))) {
1075 struct tb_profile tb
= { 0, 0, 0 };
1077 if (altq
->aflags
& PF_ALTQF_TBR
) {
1078 if (altq
->bwtype
!= PF_ALTQ_BW_ABSOLUTE
&&
1079 altq
->bwtype
!= PF_ALTQ_BW_PERCENT
) {
1082 if (altq
->bwtype
== PF_ALTQ_BW_ABSOLUTE
)
1083 tb
.rate
= altq
->ifbandwidth
;
1085 tb
.percent
= altq
->ifbandwidth
;
1086 tb
.depth
= altq
->tbrsize
;
1087 error
= ifclassq_tbr_set(ifq
, &tb
, TRUE
);
1089 } else if (IFCQ_TBR_IS_ENABLED(ifq
)) {
1090 error
= ifclassq_tbr_set(ifq
, &tb
, TRUE
);
1099 pf_disable_altq(struct pf_altq
*altq
)
1102 struct ifclassq
*ifq
;
1105 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1107 if ((ifp
= ifunit(altq
->ifname
)) == NULL
)
1111 * when the discipline is no longer referenced, it was overridden
1112 * by a new one. if so, just return.
1116 if (altq
->altq_disc
!= IFCQ_ALTQ(ifq
)->altq_disc
) {
1121 error
= altq_disable(IFCQ_ALTQ(ifq
));
1123 if (error
== 0 && IFCQ_TBR_IS_ENABLED(ifq
)) {
1124 /* clear tokenbucket regulator */
1125 struct tb_profile tb
= { 0, 0, 0 };
1126 error
= ifclassq_tbr_set(ifq
, &tb
, TRUE
);
1134 pf_altq_copyin(struct pf_altq
*src
, struct pf_altq
*dst
)
1136 bcopy(src
, dst
, sizeof (struct pf_altq
));
1138 dst
->ifname
[sizeof (dst
->ifname
) - 1] = '\0';
1139 dst
->qname
[sizeof (dst
->qname
) - 1] = '\0';
1140 dst
->parent
[sizeof (dst
->parent
) - 1] = '\0';
1141 dst
->altq_disc
= NULL
;
1142 dst
->entries
.tqe_next
= NULL
;
1143 dst
->entries
.tqe_prev
= NULL
;
1147 pf_altq_copyout(struct pf_altq
*src
, struct pf_altq
*dst
)
1151 bcopy(src
, &pa
, sizeof (struct pf_altq
));
1152 pa
.altq_disc
= NULL
;
1153 pa
.entries
.tqe_next
= NULL
;
1154 pa
.entries
.tqe_prev
= NULL
;
1155 bcopy(&pa
, dst
, sizeof (struct pf_altq
));
1157 #endif /* PF_ALTQ */
1160 pf_begin_rules(u_int32_t
*ticket
, int rs_num
, const char *anchor
)
1162 struct pf_ruleset
*rs
;
1163 struct pf_rule
*rule
;
1165 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
1167 rs
= pf_find_or_create_ruleset(anchor
);
1170 while ((rule
= TAILQ_FIRST(rs
->rules
[rs_num
].inactive
.ptr
)) != NULL
) {
1171 pf_rm_rule(rs
->rules
[rs_num
].inactive
.ptr
, rule
);
1172 rs
->rules
[rs_num
].inactive
.rcount
--;
1174 *ticket
= ++rs
->rules
[rs_num
].inactive
.ticket
;
1175 rs
->rules
[rs_num
].inactive
.open
= 1;
1180 pf_rollback_rules(u_int32_t ticket
, int rs_num
, char *anchor
)
1182 struct pf_ruleset
*rs
;
1183 struct pf_rule
*rule
;
1185 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
1187 rs
= pf_find_ruleset(anchor
);
1188 if (rs
== NULL
|| !rs
->rules
[rs_num
].inactive
.open
||
1189 rs
->rules
[rs_num
].inactive
.ticket
!= ticket
)
1191 while ((rule
= TAILQ_FIRST(rs
->rules
[rs_num
].inactive
.ptr
)) != NULL
) {
1192 pf_rm_rule(rs
->rules
[rs_num
].inactive
.ptr
, rule
);
1193 rs
->rules
[rs_num
].inactive
.rcount
--;
1195 rs
->rules
[rs_num
].inactive
.open
= 0;
1199 #define PF_MD5_UPD(st, elm) \
1200 MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm))
1202 #define PF_MD5_UPD_STR(st, elm) \
1203 MD5Update(ctx, (u_int8_t *)(st)->elm, strlen((st)->elm))
1205 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
1206 (stor) = htonl((st)->elm); \
1207 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t)); \
1210 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
1211 (stor) = htons((st)->elm); \
1212 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t)); \
1216 pf_hash_rule_addr(MD5_CTX
*ctx
, struct pf_rule_addr
*pfr
, u_int8_t proto
)
1218 PF_MD5_UPD(pfr
, addr
.type
);
1219 switch (pfr
->addr
.type
) {
1220 case PF_ADDR_DYNIFTL
:
1221 PF_MD5_UPD(pfr
, addr
.v
.ifname
);
1222 PF_MD5_UPD(pfr
, addr
.iflags
);
1225 PF_MD5_UPD(pfr
, addr
.v
.tblname
);
1227 case PF_ADDR_ADDRMASK
:
1228 /* XXX ignore af? */
1229 PF_MD5_UPD(pfr
, addr
.v
.a
.addr
.addr32
);
1230 PF_MD5_UPD(pfr
, addr
.v
.a
.mask
.addr32
);
1232 case PF_ADDR_RTLABEL
:
1233 PF_MD5_UPD(pfr
, addr
.v
.rtlabelname
);
1240 PF_MD5_UPD(pfr
, xport
.range
.port
[0]);
1241 PF_MD5_UPD(pfr
, xport
.range
.port
[1]);
1242 PF_MD5_UPD(pfr
, xport
.range
.op
);
1249 PF_MD5_UPD(pfr
, neg
);
1253 pf_hash_rule(MD5_CTX
*ctx
, struct pf_rule
*rule
)
1258 pf_hash_rule_addr(ctx
, &rule
->src
, rule
->proto
);
1259 pf_hash_rule_addr(ctx
, &rule
->dst
, rule
->proto
);
1260 PF_MD5_UPD_STR(rule
, label
);
1261 PF_MD5_UPD_STR(rule
, ifname
);
1262 PF_MD5_UPD_STR(rule
, match_tagname
);
1263 PF_MD5_UPD_HTONS(rule
, match_tag
, x
); /* dup? */
1264 PF_MD5_UPD_HTONL(rule
, os_fingerprint
, y
);
1265 PF_MD5_UPD_HTONL(rule
, prob
, y
);
1266 PF_MD5_UPD_HTONL(rule
, uid
.uid
[0], y
);
1267 PF_MD5_UPD_HTONL(rule
, uid
.uid
[1], y
);
1268 PF_MD5_UPD(rule
, uid
.op
);
1269 PF_MD5_UPD_HTONL(rule
, gid
.gid
[0], y
);
1270 PF_MD5_UPD_HTONL(rule
, gid
.gid
[1], y
);
1271 PF_MD5_UPD(rule
, gid
.op
);
1272 PF_MD5_UPD_HTONL(rule
, rule_flag
, y
);
1273 PF_MD5_UPD(rule
, action
);
1274 PF_MD5_UPD(rule
, direction
);
1275 PF_MD5_UPD(rule
, af
);
1276 PF_MD5_UPD(rule
, quick
);
1277 PF_MD5_UPD(rule
, ifnot
);
1278 PF_MD5_UPD(rule
, match_tag_not
);
1279 PF_MD5_UPD(rule
, natpass
);
1280 PF_MD5_UPD(rule
, keep_state
);
1281 PF_MD5_UPD(rule
, proto
);
1282 PF_MD5_UPD(rule
, type
);
1283 PF_MD5_UPD(rule
, code
);
1284 PF_MD5_UPD(rule
, flags
);
1285 PF_MD5_UPD(rule
, flagset
);
1286 PF_MD5_UPD(rule
, allow_opts
);
1287 PF_MD5_UPD(rule
, rt
);
1288 PF_MD5_UPD(rule
, tos
);
1292 pf_commit_rules(u_int32_t ticket
, int rs_num
, char *anchor
)
1294 struct pf_ruleset
*rs
;
1295 struct pf_rule
*rule
, **old_array
;
1296 struct pf_rulequeue
*old_rules
;
1298 u_int32_t old_rcount
;
1300 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1302 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
1304 rs
= pf_find_ruleset(anchor
);
1305 if (rs
== NULL
|| !rs
->rules
[rs_num
].inactive
.open
||
1306 ticket
!= rs
->rules
[rs_num
].inactive
.ticket
)
1309 /* Calculate checksum for the main ruleset */
1310 if (rs
== &pf_main_ruleset
) {
1311 error
= pf_setup_pfsync_matching(rs
);
1316 /* Swap rules, keep the old. */
1317 old_rules
= rs
->rules
[rs_num
].active
.ptr
;
1318 old_rcount
= rs
->rules
[rs_num
].active
.rcount
;
1319 old_array
= rs
->rules
[rs_num
].active
.ptr_array
;
1321 rs
->rules
[rs_num
].active
.ptr
=
1322 rs
->rules
[rs_num
].inactive
.ptr
;
1323 rs
->rules
[rs_num
].active
.ptr_array
=
1324 rs
->rules
[rs_num
].inactive
.ptr_array
;
1325 rs
->rules
[rs_num
].active
.rcount
=
1326 rs
->rules
[rs_num
].inactive
.rcount
;
1327 rs
->rules
[rs_num
].inactive
.ptr
= old_rules
;
1328 rs
->rules
[rs_num
].inactive
.ptr_array
= old_array
;
1329 rs
->rules
[rs_num
].inactive
.rcount
= old_rcount
;
1331 rs
->rules
[rs_num
].active
.ticket
=
1332 rs
->rules
[rs_num
].inactive
.ticket
;
1333 pf_calc_skip_steps(rs
->rules
[rs_num
].active
.ptr
);
1336 /* Purge the old rule list. */
1337 while ((rule
= TAILQ_FIRST(old_rules
)) != NULL
)
1338 pf_rm_rule(old_rules
, rule
);
1339 if (rs
->rules
[rs_num
].inactive
.ptr_array
)
1340 _FREE(rs
->rules
[rs_num
].inactive
.ptr_array
, M_TEMP
);
1341 rs
->rules
[rs_num
].inactive
.ptr_array
= NULL
;
1342 rs
->rules
[rs_num
].inactive
.rcount
= 0;
1343 rs
->rules
[rs_num
].inactive
.open
= 0;
1344 pf_remove_if_empty_ruleset(rs
);
1349 pf_rule_copyin(struct pf_rule
*src
, struct pf_rule
*dst
, struct proc
*p
,
1352 bcopy(src
, dst
, sizeof (struct pf_rule
));
1354 dst
->label
[sizeof (dst
->label
) - 1] = '\0';
1355 dst
->ifname
[sizeof (dst
->ifname
) - 1] = '\0';
1356 dst
->qname
[sizeof (dst
->qname
) - 1] = '\0';
1357 dst
->pqname
[sizeof (dst
->pqname
) - 1] = '\0';
1358 dst
->tagname
[sizeof (dst
->tagname
) - 1] = '\0';
1359 dst
->match_tagname
[sizeof (dst
->match_tagname
) - 1] = '\0';
1360 dst
->overload_tblname
[sizeof (dst
->overload_tblname
) - 1] = '\0';
1362 dst
->cuid
= kauth_cred_getuid(p
->p_ucred
);
1363 dst
->cpid
= p
->p_pid
;
1367 dst
->overload_tbl
= NULL
;
1369 TAILQ_INIT(&dst
->rpool
.list
);
1370 dst
->rpool
.cur
= NULL
;
1372 /* initialize refcounting */
1376 dst
->entries
.tqe_prev
= NULL
;
1377 dst
->entries
.tqe_next
= NULL
;
1378 if ((uint8_t)minordev
== PFDEV_PFM
)
1379 dst
->rule_flag
|= PFRULE_PFM
;
1383 pf_rule_copyout(struct pf_rule
*src
, struct pf_rule
*dst
)
1385 bcopy(src
, dst
, sizeof (struct pf_rule
));
1389 dst
->overload_tbl
= NULL
;
1391 TAILQ_INIT(&dst
->rpool
.list
);
1392 dst
->rpool
.cur
= NULL
;
1394 dst
->entries
.tqe_prev
= NULL
;
1395 dst
->entries
.tqe_next
= NULL
;
1399 pf_state_export(struct pfsync_state
*sp
, struct pf_state_key
*sk
,
1402 uint64_t secs
= pf_time_second();
1403 bzero(sp
, sizeof (struct pfsync_state
));
1405 /* copy from state key */
1406 sp
->lan
.addr
= sk
->lan
.addr
;
1407 sp
->lan
.xport
= sk
->lan
.xport
;
1408 sp
->gwy
.addr
= sk
->gwy
.addr
;
1409 sp
->gwy
.xport
= sk
->gwy
.xport
;
1410 sp
->ext
.addr
= sk
->ext
.addr
;
1411 sp
->ext
.xport
= sk
->ext
.xport
;
1412 sp
->proto_variant
= sk
->proto_variant
;
1414 sp
->proto
= sk
->proto
;
1416 sp
->direction
= sk
->direction
;
1417 sp
->flowhash
= sk
->flowhash
;
1419 /* copy from state */
1420 memcpy(&sp
->id
, &s
->id
, sizeof (sp
->id
));
1421 sp
->creatorid
= s
->creatorid
;
1422 strlcpy(sp
->ifname
, s
->kif
->pfik_name
, sizeof (sp
->ifname
));
1423 pf_state_peer_to_pfsync(&s
->src
, &sp
->src
);
1424 pf_state_peer_to_pfsync(&s
->dst
, &sp
->dst
);
1426 sp
->rule
= s
->rule
.ptr
->nr
;
1427 sp
->nat_rule
= (s
->nat_rule
.ptr
== NULL
) ?
1428 (unsigned)-1 : s
->nat_rule
.ptr
->nr
;
1429 sp
->anchor
= (s
->anchor
.ptr
== NULL
) ?
1430 (unsigned)-1 : s
->anchor
.ptr
->nr
;
1432 pf_state_counter_to_pfsync(s
->bytes
[0], sp
->bytes
[0]);
1433 pf_state_counter_to_pfsync(s
->bytes
[1], sp
->bytes
[1]);
1434 pf_state_counter_to_pfsync(s
->packets
[0], sp
->packets
[0]);
1435 pf_state_counter_to_pfsync(s
->packets
[1], sp
->packets
[1]);
1436 sp
->creation
= secs
- s
->creation
;
1437 sp
->expire
= pf_state_expires(s
);
1439 sp
->allow_opts
= s
->allow_opts
;
1440 sp
->timeout
= s
->timeout
;
1443 sp
->sync_flags
|= PFSYNC_FLAG_SRCNODE
;
1444 if (s
->nat_src_node
)
1445 sp
->sync_flags
|= PFSYNC_FLAG_NATSRCNODE
;
1447 if (sp
->expire
> secs
)
1455 pf_state_import(struct pfsync_state
*sp
, struct pf_state_key
*sk
,
1458 /* copy to state key */
1459 sk
->lan
.addr
= sp
->lan
.addr
;
1460 sk
->lan
.xport
= sp
->lan
.xport
;
1461 sk
->gwy
.addr
= sp
->gwy
.addr
;
1462 sk
->gwy
.xport
= sp
->gwy
.xport
;
1463 sk
->ext
.addr
= sp
->ext
.addr
;
1464 sk
->ext
.xport
= sp
->ext
.xport
;
1465 sk
->proto_variant
= sp
->proto_variant
;
1467 sk
->proto
= sp
->proto
;
1469 sk
->direction
= sp
->direction
;
1470 sk
->flowhash
= pf_calc_state_key_flowhash(sk
);
1473 memcpy(&s
->id
, &sp
->id
, sizeof (sp
->id
));
1474 s
->creatorid
= sp
->creatorid
;
1475 pf_state_peer_from_pfsync(&sp
->src
, &s
->src
);
1476 pf_state_peer_from_pfsync(&sp
->dst
, &s
->dst
);
1478 s
->rule
.ptr
= &pf_default_rule
;
1479 s
->nat_rule
.ptr
= NULL
;
1480 s
->anchor
.ptr
= NULL
;
1482 s
->creation
= pf_time_second();
1483 s
->expire
= pf_time_second();
1485 s
->expire
-= pf_default_rule
.timeout
[sp
->timeout
] - sp
->expire
;
1487 s
->packets
[0] = s
->packets
[1] = 0;
1488 s
->bytes
[0] = s
->bytes
[1] = 0;
1492 pf_pooladdr_copyin(struct pf_pooladdr
*src
, struct pf_pooladdr
*dst
)
1494 bcopy(src
, dst
, sizeof (struct pf_pooladdr
));
1496 dst
->entries
.tqe_prev
= NULL
;
1497 dst
->entries
.tqe_next
= NULL
;
1498 dst
->ifname
[sizeof (dst
->ifname
) - 1] = '\0';
1503 pf_pooladdr_copyout(struct pf_pooladdr
*src
, struct pf_pooladdr
*dst
)
1505 bcopy(src
, dst
, sizeof (struct pf_pooladdr
));
1507 dst
->entries
.tqe_prev
= NULL
;
1508 dst
->entries
.tqe_next
= NULL
;
1513 pf_setup_pfsync_matching(struct pf_ruleset
*rs
)
1516 struct pf_rule
*rule
;
1518 u_int8_t digest
[PF_MD5_DIGEST_LENGTH
];
1521 for (rs_cnt
= 0; rs_cnt
< PF_RULESET_MAX
; rs_cnt
++) {
1522 /* XXX PF_RULESET_SCRUB as well? */
1523 if (rs_cnt
== PF_RULESET_SCRUB
)
1526 if (rs
->rules
[rs_cnt
].inactive
.ptr_array
)
1527 _FREE(rs
->rules
[rs_cnt
].inactive
.ptr_array
, M_TEMP
);
1528 rs
->rules
[rs_cnt
].inactive
.ptr_array
= NULL
;
1530 if (rs
->rules
[rs_cnt
].inactive
.rcount
) {
1531 rs
->rules
[rs_cnt
].inactive
.ptr_array
=
1532 _MALLOC(sizeof (caddr_t
) *
1533 rs
->rules
[rs_cnt
].inactive
.rcount
,
1536 if (!rs
->rules
[rs_cnt
].inactive
.ptr_array
)
1540 TAILQ_FOREACH(rule
, rs
->rules
[rs_cnt
].inactive
.ptr
,
1542 pf_hash_rule(&ctx
, rule
);
1543 (rs
->rules
[rs_cnt
].inactive
.ptr_array
)[rule
->nr
] = rule
;
1547 MD5Final(digest
, &ctx
);
1548 memcpy(pf_status
.pf_chksum
, digest
, sizeof (pf_status
.pf_chksum
));
1555 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1557 VERIFY(pf_is_enabled
== 0);
1560 pf_status
.running
= 1;
1561 pf_status
.since
= pf_calendar_time_second();
1562 if (pf_status
.stateid
== 0) {
1563 pf_status
.stateid
= pf_time_second();
1564 pf_status
.stateid
= pf_status
.stateid
<< 32;
1566 wakeup(pf_purge_thread_fn
);
1567 DPFPRINTF(PF_DEBUG_MISC
, ("pf: started\n"));
1573 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1575 VERIFY(pf_is_enabled
);
1577 pf_status
.running
= 0;
1579 pf_status
.since
= pf_calendar_time_second();
1580 wakeup(pf_purge_thread_fn
);
1581 DPFPRINTF(PF_DEBUG_MISC
, ("pf: stopped\n"));
1585 pfioctl(dev_t dev
, u_long cmd
, caddr_t addr
, int flags
, struct proc
*p
)
1588 int p64
= proc_is64bit(p
);
1590 int minordev
= minor(dev
);
1592 if (kauth_cred_issuser(kauth_cred_get()) == 0)
1595 /* XXX keep in sync with switch() below */
1596 if (securelevel
> 1)
1603 case DIOCSETSTATUSIF
:
1609 case DIOCINSERTRULE
:
1610 case DIOCDELETERULE
:
1611 case DIOCGETTIMEOUT
:
1612 case DIOCCLRRULECTRS
:
1617 case DIOCGETRULESETS
:
1618 case DIOCGETRULESET
:
1619 case DIOCRGETTABLES
:
1620 case DIOCRGETTSTATS
:
1621 case DIOCRCLRTSTATS
:
1627 case DIOCRGETASTATS
:
1628 case DIOCRCLRASTATS
:
1631 case DIOCGETSRCNODES
:
1632 case DIOCCLRSRCNODES
:
1633 case DIOCIGETIFACES
:
1638 case DIOCRCLRTABLES
:
1639 case DIOCRADDTABLES
:
1640 case DIOCRDELTABLES
:
1641 case DIOCRSETTFLAGS
: {
1644 bcopy(&((struct pfioc_table
*)(void *)addr
)->
1645 pfrio_flags
, &pfrio_flags
, sizeof (pfrio_flags
));
1647 if (pfrio_flags
& PFR_FLAG_DUMMY
)
1648 break; /* dummy operation ok */
1655 if (!(flags
& FWRITE
))
1661 case DIOCGETSTARTERS
:
1668 case DIOCINSERTRULE
:
1669 case DIOCDELETERULE
:
1670 case DIOCGETTIMEOUT
:
1675 case DIOCGETRULESETS
:
1676 case DIOCGETRULESET
:
1678 case DIOCRGETTABLES
:
1679 case DIOCRGETTSTATS
:
1681 case DIOCRGETASTATS
:
1684 case DIOCGETSRCNODES
:
1685 case DIOCIGETIFACES
:
1688 case DIOCRCLRTABLES
:
1689 case DIOCRADDTABLES
:
1690 case DIOCRDELTABLES
:
1691 case DIOCRCLRTSTATS
:
1696 case DIOCRSETTFLAGS
: {
1699 bcopy(&((struct pfioc_table
*)(void *)addr
)->
1700 pfrio_flags
, &pfrio_flags
, sizeof (pfrio_flags
));
1702 if (pfrio_flags
& PFR_FLAG_DUMMY
) {
1703 flags
|= FWRITE
; /* need write lock for dummy */
1704 break; /* dummy operation ok */
1711 bcopy(&((struct pfioc_rule
*)(void *)addr
)->action
,
1712 &action
, sizeof (action
));
1714 if (action
== PF_GET_CLR_CNTR
)
1729 case DIOCCHANGEALTQ
:
1731 /* fail if ALTQ is disabled */
1736 #endif /* PF_ALTQ */
1739 lck_rw_lock_exclusive(pf_perim_lock
);
1741 lck_rw_lock_shared(pf_perim_lock
);
1743 lck_mtx_lock(pf_lock
);
1748 if (pf_status
.running
) {
1750 * Increment the reference for a simple -e enable, so
1751 * that even if other processes drop their references,
1752 * pf will still be available to processes that turned
1753 * it on without taking a reference
1755 if (nr_tokens
== pf_enabled_ref_count
) {
1756 pf_enabled_ref_count
++;
1757 VERIFY(pf_enabled_ref_count
!= 0);
1760 } else if (pf_purge_thread
== NULL
) {
1764 pf_enabled_ref_count
++;
1765 VERIFY(pf_enabled_ref_count
!= 0);
1769 case DIOCSTARTREF
: /* u_int64_t */
1770 if (pf_purge_thread
== NULL
) {
1775 /* small enough to be on stack */
1776 if ((token
= generate_token(p
)) != 0) {
1777 if (pf_is_enabled
== 0) {
1780 pf_enabled_ref_count
++;
1781 VERIFY(pf_enabled_ref_count
!= 0);
1784 DPFPRINTF(PF_DEBUG_URGENT
,
1785 ("pf: unable to generate token\n"));
1787 bcopy(&token
, addr
, sizeof (token
));
1792 if (!pf_status
.running
) {
1796 pf_enabled_ref_count
= 0;
1797 invalidate_all_tokens();
1801 case DIOCSTOPREF
: /* struct pfioc_remove_token */
1802 if (!pf_status
.running
) {
1805 struct pfioc_remove_token pfrt
;
1807 /* small enough to be on stack */
1808 bcopy(addr
, &pfrt
, sizeof (pfrt
));
1809 if ((error
= remove_token(&pfrt
)) == 0) {
1810 VERIFY(pf_enabled_ref_count
!= 0);
1811 pf_enabled_ref_count
--;
1812 /* return currently held references */
1813 pfrt
.refcount
= pf_enabled_ref_count
;
1814 DPFPRINTF(PF_DEBUG_MISC
,
1815 ("pf: enabled refcount decremented\n"));
1818 DPFPRINTF(PF_DEBUG_URGENT
,
1819 ("pf: token mismatch\n"));
1821 bcopy(&pfrt
, addr
, sizeof (pfrt
));
1823 if (error
== 0 && pf_enabled_ref_count
== 0)
1828 case DIOCGETSTARTERS
: { /* struct pfioc_tokens */
1829 PFIOCX_STRUCT_DECL(pfioc_tokens
);
1831 PFIOCX_STRUCT_BEGIN(addr
, pfioc_tokens
, error
= ENOMEM
; break;);
1832 error
= pfioctl_ioc_tokens(cmd
,
1833 PFIOCX_STRUCT_ADDR32(pfioc_tokens
),
1834 PFIOCX_STRUCT_ADDR64(pfioc_tokens
), p
);
1835 PFIOCX_STRUCT_END(pfioc_tokens
, addr
);
1839 case DIOCADDRULE
: /* struct pfioc_rule */
1840 case DIOCGETRULES
: /* struct pfioc_rule */
1841 case DIOCGETRULE
: /* struct pfioc_rule */
1842 case DIOCCHANGERULE
: /* struct pfioc_rule */
1843 case DIOCINSERTRULE
: /* struct pfioc_rule */
1844 case DIOCDELETERULE
: { /* struct pfioc_rule */
1845 struct pfioc_rule
*pr
= NULL
;
1847 PFIOC_STRUCT_BEGIN(addr
, pr
, error
= ENOMEM
; break;);
1848 error
= pfioctl_ioc_rule(cmd
, minordev
, pr
, p
);
1849 PFIOC_STRUCT_END(pr
, addr
);
1853 case DIOCCLRSTATES
: /* struct pfioc_state_kill */
1854 case DIOCKILLSTATES
: { /* struct pfioc_state_kill */
1855 struct pfioc_state_kill
*psk
= NULL
;
1857 PFIOC_STRUCT_BEGIN(addr
, psk
, error
= ENOMEM
; break;);
1858 error
= pfioctl_ioc_state_kill(cmd
, psk
, p
);
1859 PFIOC_STRUCT_END(psk
, addr
);
1863 case DIOCADDSTATE
: /* struct pfioc_state */
1864 case DIOCGETSTATE
: { /* struct pfioc_state */
1865 struct pfioc_state
*ps
= NULL
;
1867 PFIOC_STRUCT_BEGIN(addr
, ps
, error
= ENOMEM
; break;);
1868 error
= pfioctl_ioc_state(cmd
, ps
, p
);
1869 PFIOC_STRUCT_END(ps
, addr
);
1873 case DIOCGETSTATES
: { /* struct pfioc_states */
1874 PFIOCX_STRUCT_DECL(pfioc_states
);
1876 PFIOCX_STRUCT_BEGIN(addr
, pfioc_states
, error
= ENOMEM
; break;);
1877 error
= pfioctl_ioc_states(cmd
,
1878 PFIOCX_STRUCT_ADDR32(pfioc_states
),
1879 PFIOCX_STRUCT_ADDR64(pfioc_states
), p
);
1880 PFIOCX_STRUCT_END(pfioc_states
, addr
);
1884 case DIOCGETSTATUS
: { /* struct pf_status */
1885 struct pf_status
*s
= NULL
;
1887 PFIOC_STRUCT_BEGIN(&pf_status
, s
, error
= ENOMEM
; break;);
1888 pfi_update_status(s
->ifname
, s
);
1889 PFIOC_STRUCT_END(s
, addr
);
1893 case DIOCSETSTATUSIF
: { /* struct pfioc_if */
1894 struct pfioc_if
*pi
= (struct pfioc_if
*)(void *)addr
;
1896 /* OK for unaligned accesses */
1897 if (pi
->ifname
[0] == 0) {
1898 bzero(pf_status
.ifname
, IFNAMSIZ
);
1901 strlcpy(pf_status
.ifname
, pi
->ifname
, IFNAMSIZ
);
1905 case DIOCCLRSTATUS
: {
1906 bzero(pf_status
.counters
, sizeof (pf_status
.counters
));
1907 bzero(pf_status
.fcounters
, sizeof (pf_status
.fcounters
));
1908 bzero(pf_status
.scounters
, sizeof (pf_status
.scounters
));
1909 pf_status
.since
= pf_calendar_time_second();
1910 if (*pf_status
.ifname
)
1911 pfi_update_status(pf_status
.ifname
, NULL
);
1915 case DIOCNATLOOK
: { /* struct pfioc_natlook */
1916 struct pfioc_natlook
*pnl
= NULL
;
1918 PFIOC_STRUCT_BEGIN(addr
, pnl
, error
= ENOMEM
; break;);
1919 error
= pfioctl_ioc_natlook(cmd
, pnl
, p
);
1920 PFIOC_STRUCT_END(pnl
, addr
);
1924 case DIOCSETTIMEOUT
: /* struct pfioc_tm */
1925 case DIOCGETTIMEOUT
: { /* struct pfioc_tm */
1928 /* small enough to be on stack */
1929 bcopy(addr
, &pt
, sizeof (pt
));
1930 error
= pfioctl_ioc_tm(cmd
, &pt
, p
);
1931 bcopy(&pt
, addr
, sizeof (pt
));
1935 case DIOCGETLIMIT
: /* struct pfioc_limit */
1936 case DIOCSETLIMIT
: { /* struct pfioc_limit */
1937 struct pfioc_limit pl
;
1939 /* small enough to be on stack */
1940 bcopy(addr
, &pl
, sizeof (pl
));
1941 error
= pfioctl_ioc_limit(cmd
, &pl
, p
);
1942 bcopy(&pl
, addr
, sizeof (pl
));
1946 case DIOCSETDEBUG
: { /* u_int32_t */
1947 bcopy(addr
, &pf_status
.debug
, sizeof (u_int32_t
));
1951 case DIOCCLRRULECTRS
: {
1952 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1953 struct pf_ruleset
*ruleset
= &pf_main_ruleset
;
1954 struct pf_rule
*rule
;
1957 ruleset
->rules
[PF_RULESET_FILTER
].active
.ptr
, entries
) {
1958 rule
->evaluations
= 0;
1959 rule
->packets
[0] = rule
->packets
[1] = 0;
1960 rule
->bytes
[0] = rule
->bytes
[1] = 0;
1965 case DIOCGIFSPEED
: {
1966 struct pf_ifspeed
*psp
= (struct pf_ifspeed
*)(void *)addr
;
1967 struct pf_ifspeed ps
;
1971 if (psp
->ifname
[0] != '\0') {
1972 /* Can we completely trust user-land? */
1973 strlcpy(ps
.ifname
, psp
->ifname
, IFNAMSIZ
);
1974 ps
.ifname
[IFNAMSIZ
- 1] = '\0';
1975 ifp
= ifunit(ps
.ifname
);
1977 baudrate
= ifp
->if_output_bw
.max_bw
;
1978 bcopy(&baudrate
, &psp
->baudrate
,
1990 case DIOCSTARTALTQ
: {
1991 struct pf_altq
*altq
;
1993 VERIFY(altq_allowed
);
1994 /* enable all altq interfaces on active list */
1995 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
) {
1996 if (altq
->qname
[0] == '\0') {
1997 error
= pf_enable_altq(altq
);
2003 pf_altq_running
= 1;
2004 DPFPRINTF(PF_DEBUG_MISC
, ("altq: started\n"));
2008 case DIOCSTOPALTQ
: {
2009 struct pf_altq
*altq
;
2011 VERIFY(altq_allowed
);
2012 /* disable all altq interfaces on active list */
2013 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
) {
2014 if (altq
->qname
[0] == '\0') {
2015 error
= pf_disable_altq(altq
);
2021 pf_altq_running
= 0;
2022 DPFPRINTF(PF_DEBUG_MISC
, ("altq: stopped\n"));
2026 case DIOCADDALTQ
: { /* struct pfioc_altq */
2027 struct pfioc_altq
*pa
= (struct pfioc_altq
*)(void *)addr
;
2028 struct pf_altq
*altq
, *a
;
2031 VERIFY(altq_allowed
);
2032 bcopy(&pa
->ticket
, &ticket
, sizeof (ticket
));
2033 if (ticket
!= ticket_altqs_inactive
) {
2037 altq
= pool_get(&pf_altq_pl
, PR_WAITOK
);
2042 pf_altq_copyin(&pa
->altq
, altq
);
2045 * if this is for a queue, find the discipline and
2046 * copy the necessary fields
2048 if (altq
->qname
[0] != '\0') {
2049 if ((altq
->qid
= pf_qname2qid(altq
->qname
)) == 0) {
2051 pool_put(&pf_altq_pl
, altq
);
2054 altq
->altq_disc
= NULL
;
2055 TAILQ_FOREACH(a
, pf_altqs_inactive
, entries
) {
2056 if (strncmp(a
->ifname
, altq
->ifname
,
2057 IFNAMSIZ
) == 0 && a
->qname
[0] == '\0') {
2058 altq
->altq_disc
= a
->altq_disc
;
2064 error
= altq_add(altq
);
2066 pool_put(&pf_altq_pl
, altq
);
2070 TAILQ_INSERT_TAIL(pf_altqs_inactive
, altq
, entries
);
2071 pf_altq_copyout(altq
, &pa
->altq
);
2075 case DIOCGETALTQS
: {
2076 struct pfioc_altq
*pa
= (struct pfioc_altq
*)(void *)addr
;
2077 struct pf_altq
*altq
;
2080 VERIFY(altq_allowed
);
2082 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
)
2084 bcopy(&nr
, &pa
->nr
, sizeof (nr
));
2085 bcopy(&ticket_altqs_active
, &pa
->ticket
, sizeof (pa
->ticket
));
2090 struct pfioc_altq
*pa
= (struct pfioc_altq
*)(void *)addr
;
2091 struct pf_altq
*altq
;
2092 u_int32_t nr
, pa_nr
, ticket
;
2094 VERIFY(altq_allowed
);
2095 bcopy(&pa
->ticket
, &ticket
, sizeof (ticket
));
2096 if (ticket
!= ticket_altqs_active
) {
2100 bcopy(&pa
->nr
, &pa_nr
, sizeof (pa_nr
));
2102 altq
= TAILQ_FIRST(pf_altqs_active
);
2103 while ((altq
!= NULL
) && (nr
< pa_nr
)) {
2104 altq
= TAILQ_NEXT(altq
, entries
);
2111 pf_altq_copyout(altq
, &pa
->altq
);
2115 case DIOCCHANGEALTQ
:
2116 VERIFY(altq_allowed
);
2117 /* CHANGEALTQ not supported yet! */
2121 case DIOCGETQSTATS
: {
2122 struct pfioc_qstats
*pq
= (struct pfioc_qstats
*)(void *)addr
;
2123 struct pf_altq
*altq
;
2124 u_int32_t nr
, pq_nr
, ticket
;
2127 VERIFY(altq_allowed
);
2128 bcopy(&pq
->ticket
, &ticket
, sizeof (ticket
));
2129 if (ticket
!= ticket_altqs_active
) {
2133 bcopy(&pq
->nr
, &pq_nr
, sizeof (pq_nr
));
2135 altq
= TAILQ_FIRST(pf_altqs_active
);
2136 while ((altq
!= NULL
) && (nr
< pq_nr
)) {
2137 altq
= TAILQ_NEXT(altq
, entries
);
2144 bcopy(&pq
->nbytes
, &nbytes
, sizeof (nbytes
));
2145 error
= altq_getqstats(altq
, pq
->buf
, &nbytes
);
2147 pq
->scheduler
= altq
->scheduler
;
2148 bcopy(&nbytes
, &pq
->nbytes
, sizeof (nbytes
));
2152 #endif /* PF_ALTQ */
2154 case DIOCBEGINADDRS
: /* struct pfioc_pooladdr */
2155 case DIOCADDADDR
: /* struct pfioc_pooladdr */
2156 case DIOCGETADDRS
: /* struct pfioc_pooladdr */
2157 case DIOCGETADDR
: /* struct pfioc_pooladdr */
2158 case DIOCCHANGEADDR
: { /* struct pfioc_pooladdr */
2159 struct pfioc_pooladdr
*pp
= NULL
;
2161 PFIOC_STRUCT_BEGIN(addr
, pp
, error
= ENOMEM
; break;)
2162 error
= pfioctl_ioc_pooladdr(cmd
, pp
, p
);
2163 PFIOC_STRUCT_END(pp
, addr
);
2167 case DIOCGETRULESETS
: /* struct pfioc_ruleset */
2168 case DIOCGETRULESET
: { /* struct pfioc_ruleset */
2169 struct pfioc_ruleset
*pr
= NULL
;
2171 PFIOC_STRUCT_BEGIN(addr
, pr
, error
= ENOMEM
; break;);
2172 error
= pfioctl_ioc_ruleset(cmd
, pr
, p
);
2173 PFIOC_STRUCT_END(pr
, addr
);
2177 case DIOCRCLRTABLES
: /* struct pfioc_table */
2178 case DIOCRADDTABLES
: /* struct pfioc_table */
2179 case DIOCRDELTABLES
: /* struct pfioc_table */
2180 case DIOCRGETTABLES
: /* struct pfioc_table */
2181 case DIOCRGETTSTATS
: /* struct pfioc_table */
2182 case DIOCRCLRTSTATS
: /* struct pfioc_table */
2183 case DIOCRSETTFLAGS
: /* struct pfioc_table */
2184 case DIOCRCLRADDRS
: /* struct pfioc_table */
2185 case DIOCRADDADDRS
: /* struct pfioc_table */
2186 case DIOCRDELADDRS
: /* struct pfioc_table */
2187 case DIOCRSETADDRS
: /* struct pfioc_table */
2188 case DIOCRGETADDRS
: /* struct pfioc_table */
2189 case DIOCRGETASTATS
: /* struct pfioc_table */
2190 case DIOCRCLRASTATS
: /* struct pfioc_table */
2191 case DIOCRTSTADDRS
: /* struct pfioc_table */
2192 case DIOCRINADEFINE
: { /* struct pfioc_table */
2193 PFIOCX_STRUCT_DECL(pfioc_table
);
2195 PFIOCX_STRUCT_BEGIN(addr
, pfioc_table
, error
= ENOMEM
; break;);
2196 error
= pfioctl_ioc_table(cmd
,
2197 PFIOCX_STRUCT_ADDR32(pfioc_table
),
2198 PFIOCX_STRUCT_ADDR64(pfioc_table
), p
);
2199 PFIOCX_STRUCT_END(pfioc_table
, addr
);
2203 case DIOCOSFPADD
: /* struct pf_osfp_ioctl */
2204 case DIOCOSFPGET
: { /* struct pf_osfp_ioctl */
2205 struct pf_osfp_ioctl
*io
= NULL
;
2207 PFIOC_STRUCT_BEGIN(addr
, io
, error
= ENOMEM
; break;);
2208 if (cmd
== DIOCOSFPADD
) {
2209 error
= pf_osfp_add(io
);
2211 VERIFY(cmd
== DIOCOSFPGET
);
2212 error
= pf_osfp_get(io
);
2214 PFIOC_STRUCT_END(io
, addr
);
2218 case DIOCXBEGIN
: /* struct pfioc_trans */
2219 case DIOCXROLLBACK
: /* struct pfioc_trans */
2220 case DIOCXCOMMIT
: { /* struct pfioc_trans */
2221 PFIOCX_STRUCT_DECL(pfioc_trans
);
2223 PFIOCX_STRUCT_BEGIN(addr
, pfioc_trans
, error
= ENOMEM
; break;);
2224 error
= pfioctl_ioc_trans(cmd
,
2225 PFIOCX_STRUCT_ADDR32(pfioc_trans
),
2226 PFIOCX_STRUCT_ADDR64(pfioc_trans
), p
);
2227 PFIOCX_STRUCT_END(pfioc_trans
, addr
);
2231 case DIOCGETSRCNODES
: { /* struct pfioc_src_nodes */
2232 PFIOCX_STRUCT_DECL(pfioc_src_nodes
);
2234 PFIOCX_STRUCT_BEGIN(addr
, pfioc_src_nodes
,
2235 error
= ENOMEM
; break;);
2236 error
= pfioctl_ioc_src_nodes(cmd
,
2237 PFIOCX_STRUCT_ADDR32(pfioc_src_nodes
),
2238 PFIOCX_STRUCT_ADDR64(pfioc_src_nodes
), p
);
2239 PFIOCX_STRUCT_END(pfioc_src_nodes
, addr
);
2243 case DIOCCLRSRCNODES
: {
2244 struct pf_src_node
*n
;
2245 struct pf_state
*state
;
2247 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
2248 state
->src_node
= NULL
;
2249 state
->nat_src_node
= NULL
;
2251 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
) {
2255 pf_purge_expired_src_nodes();
2256 pf_status
.src_nodes
= 0;
2260 case DIOCKILLSRCNODES
: { /* struct pfioc_src_node_kill */
2261 struct pfioc_src_node_kill
*psnk
= NULL
;
2263 PFIOC_STRUCT_BEGIN(addr
, psnk
, error
= ENOMEM
; break;);
2264 error
= pfioctl_ioc_src_node_kill(cmd
, psnk
, p
);
2265 PFIOC_STRUCT_END(psnk
, addr
);
2269 case DIOCSETHOSTID
: { /* u_int32_t */
2272 /* small enough to be on stack */
2273 bcopy(addr
, &hid
, sizeof (hid
));
2275 pf_status
.hostid
= random();
2277 pf_status
.hostid
= hid
;
2285 case DIOCIGETIFACES
: /* struct pfioc_iface */
2286 case DIOCSETIFFLAG
: /* struct pfioc_iface */
2287 case DIOCCLRIFFLAG
: { /* struct pfioc_iface */
2288 PFIOCX_STRUCT_DECL(pfioc_iface
);
2290 PFIOCX_STRUCT_BEGIN(addr
, pfioc_iface
, error
= ENOMEM
; break;);
2291 error
= pfioctl_ioc_iface(cmd
,
2292 PFIOCX_STRUCT_ADDR32(pfioc_iface
),
2293 PFIOCX_STRUCT_ADDR64(pfioc_iface
), p
);
2294 PFIOCX_STRUCT_END(pfioc_iface
, addr
);
2303 lck_mtx_unlock(pf_lock
);
2304 lck_rw_done(pf_perim_lock
);
2310 pfioctl_ioc_table(u_long cmd
, struct pfioc_table_32
*io32
,
2311 struct pfioc_table_64
*io64
, struct proc
*p
)
2313 int p64
= proc_is64bit(p
);
2320 * 64-bit structure processing
2323 case DIOCRCLRTABLES
:
2324 if (io64
->pfrio_esize
!= 0) {
2328 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2329 error
= pfr_clr_tables(&io64
->pfrio_table
, &io64
->pfrio_ndel
,
2330 io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2333 case DIOCRADDTABLES
:
2334 if (io64
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2338 error
= pfr_add_tables(io64
->pfrio_buffer
, io64
->pfrio_size
,
2339 &io64
->pfrio_nadd
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2342 case DIOCRDELTABLES
:
2343 if (io64
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2347 error
= pfr_del_tables(io64
->pfrio_buffer
, io64
->pfrio_size
,
2348 &io64
->pfrio_ndel
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2351 case DIOCRGETTABLES
:
2352 if (io64
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2356 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2357 error
= pfr_get_tables(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2358 &io64
->pfrio_size
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2361 case DIOCRGETTSTATS
:
2362 if (io64
->pfrio_esize
!= sizeof (struct pfr_tstats
)) {
2366 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2367 error
= pfr_get_tstats(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2368 &io64
->pfrio_size
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2371 case DIOCRCLRTSTATS
:
2372 if (io64
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2376 error
= pfr_clr_tstats(io64
->pfrio_buffer
, io64
->pfrio_size
,
2377 &io64
->pfrio_nzero
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2380 case DIOCRSETTFLAGS
:
2381 if (io64
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2385 error
= pfr_set_tflags(io64
->pfrio_buffer
, io64
->pfrio_size
,
2386 io64
->pfrio_setflag
, io64
->pfrio_clrflag
,
2387 &io64
->pfrio_nchange
, &io64
->pfrio_ndel
,
2388 io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2392 if (io64
->pfrio_esize
!= 0) {
2396 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2397 error
= pfr_clr_addrs(&io64
->pfrio_table
, &io64
->pfrio_ndel
,
2398 io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2402 if (io64
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2406 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2407 error
= pfr_add_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2408 io64
->pfrio_size
, &io64
->pfrio_nadd
, io64
->pfrio_flags
|
2409 PFR_FLAG_USERIOCTL
);
2413 if (io64
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2417 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2418 error
= pfr_del_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2419 io64
->pfrio_size
, &io64
->pfrio_ndel
, io64
->pfrio_flags
|
2420 PFR_FLAG_USERIOCTL
);
2424 if (io64
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2428 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2429 error
= pfr_set_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2430 io64
->pfrio_size
, &io64
->pfrio_size2
, &io64
->pfrio_nadd
,
2431 &io64
->pfrio_ndel
, &io64
->pfrio_nchange
, io64
->pfrio_flags
|
2432 PFR_FLAG_USERIOCTL
, 0);
2436 if (io64
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2440 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2441 error
= pfr_get_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2442 &io64
->pfrio_size
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2445 case DIOCRGETASTATS
:
2446 if (io64
->pfrio_esize
!= sizeof (struct pfr_astats
)) {
2450 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2451 error
= pfr_get_astats(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2452 &io64
->pfrio_size
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2455 case DIOCRCLRASTATS
:
2456 if (io64
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2460 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2461 error
= pfr_clr_astats(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2462 io64
->pfrio_size
, &io64
->pfrio_nzero
, io64
->pfrio_flags
|
2463 PFR_FLAG_USERIOCTL
);
2467 if (io64
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2471 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2472 error
= pfr_tst_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2473 io64
->pfrio_size
, &io64
->pfrio_nmatch
, io64
->pfrio_flags
|
2474 PFR_FLAG_USERIOCTL
);
2477 case DIOCRINADEFINE
:
2478 if (io64
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2482 pfr_table_copyin_cleanup(&io64
->pfrio_table
);
2483 error
= pfr_ina_define(&io64
->pfrio_table
, io64
->pfrio_buffer
,
2484 io64
->pfrio_size
, &io64
->pfrio_nadd
, &io64
->pfrio_naddr
,
2485 io64
->pfrio_ticket
, io64
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2496 * 32-bit structure processing
2499 case DIOCRCLRTABLES
:
2500 if (io32
->pfrio_esize
!= 0) {
2504 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2505 error
= pfr_clr_tables(&io32
->pfrio_table
, &io32
->pfrio_ndel
,
2506 io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2509 case DIOCRADDTABLES
:
2510 if (io32
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2514 error
= pfr_add_tables(io32
->pfrio_buffer
, io32
->pfrio_size
,
2515 &io32
->pfrio_nadd
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2518 case DIOCRDELTABLES
:
2519 if (io32
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2523 error
= pfr_del_tables(io32
->pfrio_buffer
, io32
->pfrio_size
,
2524 &io32
->pfrio_ndel
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2527 case DIOCRGETTABLES
:
2528 if (io32
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2532 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2533 error
= pfr_get_tables(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2534 &io32
->pfrio_size
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2537 case DIOCRGETTSTATS
:
2538 if (io32
->pfrio_esize
!= sizeof (struct pfr_tstats
)) {
2542 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2543 error
= pfr_get_tstats(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2544 &io32
->pfrio_size
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2547 case DIOCRCLRTSTATS
:
2548 if (io32
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2552 error
= pfr_clr_tstats(io32
->pfrio_buffer
, io32
->pfrio_size
,
2553 &io32
->pfrio_nzero
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2556 case DIOCRSETTFLAGS
:
2557 if (io32
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2561 error
= pfr_set_tflags(io32
->pfrio_buffer
, io32
->pfrio_size
,
2562 io32
->pfrio_setflag
, io32
->pfrio_clrflag
,
2563 &io32
->pfrio_nchange
, &io32
->pfrio_ndel
,
2564 io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2568 if (io32
->pfrio_esize
!= 0) {
2572 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2573 error
= pfr_clr_addrs(&io32
->pfrio_table
, &io32
->pfrio_ndel
,
2574 io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2578 if (io32
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2582 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2583 error
= pfr_add_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2584 io32
->pfrio_size
, &io32
->pfrio_nadd
, io32
->pfrio_flags
|
2585 PFR_FLAG_USERIOCTL
);
2589 if (io32
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2593 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2594 error
= pfr_del_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2595 io32
->pfrio_size
, &io32
->pfrio_ndel
, io32
->pfrio_flags
|
2596 PFR_FLAG_USERIOCTL
);
2600 if (io32
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2604 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2605 error
= pfr_set_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2606 io32
->pfrio_size
, &io32
->pfrio_size2
, &io32
->pfrio_nadd
,
2607 &io32
->pfrio_ndel
, &io32
->pfrio_nchange
, io32
->pfrio_flags
|
2608 PFR_FLAG_USERIOCTL
, 0);
2612 if (io32
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2616 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2617 error
= pfr_get_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2618 &io32
->pfrio_size
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2621 case DIOCRGETASTATS
:
2622 if (io32
->pfrio_esize
!= sizeof (struct pfr_astats
)) {
2626 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2627 error
= pfr_get_astats(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2628 &io32
->pfrio_size
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2631 case DIOCRCLRASTATS
:
2632 if (io32
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2636 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2637 error
= pfr_clr_astats(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2638 io32
->pfrio_size
, &io32
->pfrio_nzero
, io32
->pfrio_flags
|
2639 PFR_FLAG_USERIOCTL
);
2643 if (io32
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2647 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2648 error
= pfr_tst_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2649 io32
->pfrio_size
, &io32
->pfrio_nmatch
, io32
->pfrio_flags
|
2650 PFR_FLAG_USERIOCTL
);
2653 case DIOCRINADEFINE
:
2654 if (io32
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
2658 pfr_table_copyin_cleanup(&io32
->pfrio_table
);
2659 error
= pfr_ina_define(&io32
->pfrio_table
, io32
->pfrio_buffer
,
2660 io32
->pfrio_size
, &io32
->pfrio_nadd
, &io32
->pfrio_naddr
,
2661 io32
->pfrio_ticket
, io32
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2674 pfioctl_ioc_tokens(u_long cmd
, struct pfioc_tokens_32
*tok32
,
2675 struct pfioc_tokens_64
*tok64
, struct proc
*p
)
2677 struct pfioc_token
*tokens
;
2678 struct pfioc_kernel_token
*entry
, *tmp
;
2679 user_addr_t token_buf
;
2680 int ocnt
, cnt
, error
= 0, p64
= proc_is64bit(p
);
2684 case DIOCGETSTARTERS
: {
2687 if (nr_tokens
== 0) {
2692 size
= sizeof (struct pfioc_token
) * nr_tokens
;
2693 ocnt
= cnt
= (p64
? tok64
->size
: tok32
->size
);
2702 token_buf
= (p64
? tok64
->pgt_buf
: tok32
->pgt_buf
);
2703 tokens
= _MALLOC(size
, M_TEMP
, M_WAITOK
|M_ZERO
);
2704 if (tokens
== NULL
) {
2709 ptr
= (void *)tokens
;
2710 SLIST_FOREACH_SAFE(entry
, &token_list_head
, next
, tmp
) {
2711 struct pfioc_token
*t
;
2713 if ((unsigned)cnt
< sizeof (*tokens
))
2714 break; /* no more buffer space left */
2716 t
= (struct pfioc_token
*)(void *)ptr
;
2717 t
->token_value
= entry
->token
.token_value
;
2718 t
->timestamp
= entry
->token
.timestamp
;
2719 t
->pid
= entry
->token
.pid
;
2720 bcopy(entry
->token
.proc_name
, t
->proc_name
,
2721 PFTOK_PROCNAME_LEN
);
2722 ptr
+= sizeof (struct pfioc_token
);
2724 cnt
-= sizeof (struct pfioc_token
);
2728 error
= copyout(tokens
, token_buf
, ocnt
- cnt
);
2731 tok64
->size
= ocnt
- cnt
;
2733 tok32
->size
= ocnt
- cnt
;
2735 _FREE(tokens
, M_TEMP
);
2748 pf_expire_states_and_src_nodes(struct pf_rule
*rule
)
2750 struct pf_state
*state
;
2751 struct pf_src_node
*sn
;
2754 /* expire the states */
2755 state
= TAILQ_FIRST(&state_list
);
2757 if (state
->rule
.ptr
== rule
)
2758 state
->timeout
= PFTM_PURGE
;
2759 state
= TAILQ_NEXT(state
, entry_list
);
2761 pf_purge_expired_states(pf_status
.states
);
2763 /* expire the src_nodes */
2764 RB_FOREACH(sn
, pf_src_tree
, &tree_src_tracking
) {
2765 if (sn
->rule
.ptr
!= rule
)
2767 if (sn
->states
!= 0) {
2768 RB_FOREACH(state
, pf_state_tree_id
,
2770 if (state
->src_node
== sn
)
2771 state
->src_node
= NULL
;
2772 if (state
->nat_src_node
== sn
)
2773 state
->nat_src_node
= NULL
;
2781 pf_purge_expired_src_nodes();
2785 pf_delete_rule_from_ruleset(struct pf_ruleset
*ruleset
, int rs_num
,
2786 struct pf_rule
*rule
)
2791 pf_expire_states_and_src_nodes(rule
);
2793 pf_rm_rule(ruleset
->rules
[rs_num
].active
.ptr
, rule
);
2794 if (ruleset
->rules
[rs_num
].active
.rcount
-- == 0)
2795 panic("%s: rcount value broken!", __func__
);
2796 r
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
2800 r
= TAILQ_NEXT(r
, entries
);
2806 pf_ruleset_cleanup(struct pf_ruleset
*ruleset
, int rs
)
2808 pf_calc_skip_steps(ruleset
->rules
[rs
].active
.ptr
);
2809 ruleset
->rules
[rs
].active
.ticket
=
2810 ++ruleset
->rules
[rs
].inactive
.ticket
;
2814 pf_delete_rule_by_ticket(struct pfioc_rule
*pr
)
2816 struct pf_ruleset
*ruleset
;
2817 struct pf_rule
*rule
;
2822 is_anchor
= (pr
->anchor_call
[0] != '\0');
2823 if ((ruleset
= pf_find_ruleset_with_owner(pr
->anchor
,
2824 pr
->rule
.owner
, is_anchor
, &error
)) == NULL
)
2827 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
2828 if (rs_num
>= PF_RULESET_MAX
) {
2832 if (pr
->rule
.ticket
) {
2833 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
2834 while (rule
&& (rule
->ticket
!= pr
->rule
.ticket
))
2835 rule
= TAILQ_NEXT(rule
, entries
);
2839 if (strcmp(rule
->owner
, pr
->rule
.owner
))
2843 if (rule
->anchor
&& (ruleset
!= &pf_main_ruleset
) &&
2844 ((strcmp(ruleset
->anchor
->owner
, "")) == 0) &&
2845 ((ruleset
->rules
[rs_num
].active
.rcount
- 1) == 0)) {
2846 /* set rule & ruleset to parent and repeat */
2847 struct pf_rule
*delete_rule
= rule
;
2848 struct pf_ruleset
*delete_ruleset
= ruleset
;
2850 #define parent_ruleset ruleset->anchor->parent->ruleset
2851 if (ruleset
->anchor
->parent
== NULL
)
2852 ruleset
= &pf_main_ruleset
;
2854 ruleset
= &parent_ruleset
;
2856 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
2858 (rule
->anchor
!= delete_ruleset
->anchor
))
2859 rule
= TAILQ_NEXT(rule
, entries
);
2861 panic("%s: rule not found!", __func__
);
2863 if (delete_rule
->rule_flag
& PFRULE_PFM
)
2866 pf_delete_rule_from_ruleset(delete_ruleset
,
2867 rs_num
, delete_rule
);
2868 delete_ruleset
->rules
[rs_num
].active
.ticket
=
2869 ++delete_ruleset
->rules
[rs_num
].inactive
.ticket
;
2873 if (rule
->rule_flag
& PFRULE_PFM
)
2875 pf_delete_rule_from_ruleset(ruleset
, rs_num
,
2877 pf_ruleset_cleanup(ruleset
, rs_num
);
2885 pf_delete_rule_by_owner(char *owner
)
2887 struct pf_ruleset
*ruleset
;
2888 struct pf_rule
*rule
, *next
;
2891 for (int rs
= 0; rs
< PF_RULESET_MAX
; rs
++) {
2892 rule
= TAILQ_FIRST(pf_main_ruleset
.rules
[rs
].active
.ptr
);
2893 ruleset
= &pf_main_ruleset
;
2895 next
= TAILQ_NEXT(rule
, entries
);
2897 if (((strcmp(rule
->owner
, owner
)) == 0) ||
2898 ((strcmp(rule
->owner
, "")) == 0)) {
2899 if (rule
->anchor
->ruleset
.rules
[rs
].active
.rcount
> 0) {
2901 pf_ruleset_cleanup(ruleset
, rs
);
2904 /* step into anchor */
2906 &rule
->anchor
->ruleset
;
2907 rule
= TAILQ_FIRST(ruleset
->rules
[rs
].active
.ptr
);
2910 if (rule
->rule_flag
&
2913 pf_delete_rule_from_ruleset(ruleset
, rs
, rule
);
2920 if (((strcmp(rule
->owner
, owner
)) == 0)) {
2922 if (rule
->rule_flag
& PFRULE_PFM
)
2924 pf_delete_rule_from_ruleset(ruleset
,
2932 pf_ruleset_cleanup(ruleset
, rs
);
2935 if (ruleset
!= &pf_main_ruleset
)
2936 pf_deleterule_anchor_step_out(&ruleset
,
2944 pf_deleterule_anchor_step_out(struct pf_ruleset
**ruleset_ptr
,
2945 int rs
, struct pf_rule
**rule_ptr
)
2947 struct pf_ruleset
*ruleset
= *ruleset_ptr
;
2948 struct pf_rule
*rule
= *rule_ptr
;
2950 /* step out of anchor */
2951 struct pf_ruleset
*rs_copy
= ruleset
;
2952 ruleset
= ruleset
->anchor
->parent
?
2953 &ruleset
->anchor
->parent
->ruleset
:&pf_main_ruleset
;
2955 rule
= TAILQ_FIRST(ruleset
->rules
[rs
].active
.ptr
);
2956 while (rule
&& (rule
->anchor
!= rs_copy
->anchor
))
2957 rule
= TAILQ_NEXT(rule
, entries
);
2959 panic("%s: parent rule of anchor not found!", __func__
);
2960 if (rule
->anchor
->ruleset
.rules
[rs
].active
.rcount
> 0)
2961 rule
= TAILQ_NEXT(rule
, entries
);
2963 *ruleset_ptr
= ruleset
;
2968 pf_rule_setup(struct pfioc_rule
*pr
, struct pf_rule
*rule
,
2969 struct pf_ruleset
*ruleset
) {
2970 struct pf_pooladdr
*apa
;
2973 if (rule
->ifname
[0]) {
2974 rule
->kif
= pfi_kif_get(rule
->ifname
);
2975 if (rule
->kif
== NULL
) {
2976 pool_put(&pf_rule_pl
, rule
);
2979 pfi_kif_ref(rule
->kif
, PFI_KIF_REF_RULE
);
2983 if (altq_allowed
&& rule
->qname
[0] != '\0') {
2984 if ((rule
->qid
= pf_qname2qid(rule
->qname
)) == 0)
2986 else if (rule
->pqname
[0] != '\0') {
2988 pf_qname2qid(rule
->pqname
)) == 0)
2991 rule
->pqid
= rule
->qid
;
2993 #endif /* PF_ALTQ */
2994 if (rule
->tagname
[0])
2995 if ((rule
->tag
= pf_tagname2tag(rule
->tagname
)) == 0)
2997 if (rule
->match_tagname
[0])
2998 if ((rule
->match_tag
=
2999 pf_tagname2tag(rule
->match_tagname
)) == 0)
3001 if (rule
->rt
&& !rule
->direction
)
3006 if (rule
->logif
>= PFLOGIFS_MAX
)
3009 if (pf_rtlabel_add(&rule
->src
.addr
) ||
3010 pf_rtlabel_add(&rule
->dst
.addr
))
3012 if (pfi_dynaddr_setup(&rule
->src
.addr
, rule
->af
))
3014 if (pfi_dynaddr_setup(&rule
->dst
.addr
, rule
->af
))
3016 if (pf_tbladdr_setup(ruleset
, &rule
->src
.addr
))
3018 if (pf_tbladdr_setup(ruleset
, &rule
->dst
.addr
))
3020 if (pf_anchor_setup(rule
, ruleset
, pr
->anchor_call
))
3022 TAILQ_FOREACH(apa
, &pf_pabuf
, entries
)
3023 if (pf_tbladdr_setup(ruleset
, &apa
->addr
))
3026 if (rule
->overload_tblname
[0]) {
3027 if ((rule
->overload_tbl
= pfr_attach_table(ruleset
,
3028 rule
->overload_tblname
)) == NULL
)
3031 rule
->overload_tbl
->pfrkt_flags
|=
3035 pf_mv_pool(&pf_pabuf
, &rule
->rpool
.list
);
3036 if (((((rule
->action
== PF_NAT
) || (rule
->action
== PF_RDR
) ||
3037 (rule
->action
== PF_BINAT
)) && rule
->anchor
== NULL
) ||
3038 (rule
->rt
> PF_FASTROUTE
)) &&
3039 (TAILQ_FIRST(&rule
->rpool
.list
) == NULL
))
3043 pf_rm_rule(NULL
, rule
);
3046 rule
->rpool
.cur
= TAILQ_FIRST(&rule
->rpool
.list
);
3047 rule
->evaluations
= rule
->packets
[0] = rule
->packets
[1] =
3048 rule
->bytes
[0] = rule
->bytes
[1] = 0;
3054 pfioctl_ioc_rule(u_long cmd
, int minordev
, struct pfioc_rule
*pr
, struct proc
*p
)
3060 struct pf_ruleset
*ruleset
;
3061 struct pf_rule
*rule
, *tail
;
3064 pr
->anchor
[sizeof (pr
->anchor
) - 1] = '\0';
3065 pr
->anchor_call
[sizeof (pr
->anchor_call
) - 1] = '\0';
3066 ruleset
= pf_find_ruleset(pr
->anchor
);
3067 if (ruleset
== NULL
) {
3071 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
3072 if (rs_num
>= PF_RULESET_MAX
) {
3076 if (pr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
3080 if (pr
->ticket
!= ruleset
->rules
[rs_num
].inactive
.ticket
) {
3084 if (pr
->pool_ticket
!= ticket_pabuf
) {
3088 rule
= pool_get(&pf_rule_pl
, PR_WAITOK
);
3093 pf_rule_copyin(&pr
->rule
, rule
, p
, minordev
);
3095 if (rule
->af
== AF_INET
) {
3096 pool_put(&pf_rule_pl
, rule
);
3097 error
= EAFNOSUPPORT
;
3102 if (rule
->af
== AF_INET6
) {
3103 pool_put(&pf_rule_pl
, rule
);
3104 error
= EAFNOSUPPORT
;
3108 tail
= TAILQ_LAST(ruleset
->rules
[rs_num
].inactive
.ptr
,
3111 rule
->nr
= tail
->nr
+ 1;
3115 if ((error
= pf_rule_setup(pr
, rule
, ruleset
)))
3118 TAILQ_INSERT_TAIL(ruleset
->rules
[rs_num
].inactive
.ptr
,
3120 ruleset
->rules
[rs_num
].inactive
.rcount
++;
3121 if (rule
->rule_flag
& PFRULE_PFM
)
3126 case DIOCGETRULES
: {
3127 struct pf_ruleset
*ruleset
;
3128 struct pf_rule
*tail
;
3131 pr
->anchor
[sizeof (pr
->anchor
) - 1] = '\0';
3132 pr
->anchor_call
[sizeof (pr
->anchor_call
) - 1] = '\0';
3133 ruleset
= pf_find_ruleset(pr
->anchor
);
3134 if (ruleset
== NULL
) {
3138 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
3139 if (rs_num
>= PF_RULESET_MAX
) {
3143 tail
= TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
,
3146 pr
->nr
= tail
->nr
+ 1;
3149 pr
->ticket
= ruleset
->rules
[rs_num
].active
.ticket
;
3154 struct pf_ruleset
*ruleset
;
3155 struct pf_rule
*rule
;
3158 pr
->anchor
[sizeof (pr
->anchor
) - 1] = '\0';
3159 pr
->anchor_call
[sizeof (pr
->anchor_call
) - 1] = '\0';
3160 ruleset
= pf_find_ruleset(pr
->anchor
);
3161 if (ruleset
== NULL
) {
3165 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
3166 if (rs_num
>= PF_RULESET_MAX
) {
3170 if (pr
->ticket
!= ruleset
->rules
[rs_num
].active
.ticket
) {
3174 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
3175 while ((rule
!= NULL
) && (rule
->nr
!= pr
->nr
))
3176 rule
= TAILQ_NEXT(rule
, entries
);
3181 pf_rule_copyout(rule
, &pr
->rule
);
3182 if (pf_anchor_copyout(ruleset
, rule
, pr
)) {
3186 pfi_dynaddr_copyout(&pr
->rule
.src
.addr
);
3187 pfi_dynaddr_copyout(&pr
->rule
.dst
.addr
);
3188 pf_tbladdr_copyout(&pr
->rule
.src
.addr
);
3189 pf_tbladdr_copyout(&pr
->rule
.dst
.addr
);
3190 pf_rtlabel_copyout(&pr
->rule
.src
.addr
);
3191 pf_rtlabel_copyout(&pr
->rule
.dst
.addr
);
3192 for (i
= 0; i
< PF_SKIP_COUNT
; ++i
)
3193 if (rule
->skip
[i
].ptr
== NULL
)
3194 pr
->rule
.skip
[i
].nr
= -1;
3196 pr
->rule
.skip
[i
].nr
=
3197 rule
->skip
[i
].ptr
->nr
;
3199 if (pr
->action
== PF_GET_CLR_CNTR
) {
3200 rule
->evaluations
= 0;
3201 rule
->packets
[0] = rule
->packets
[1] = 0;
3202 rule
->bytes
[0] = rule
->bytes
[1] = 0;
3207 case DIOCCHANGERULE
: {
3208 struct pfioc_rule
*pcr
= pr
;
3209 struct pf_ruleset
*ruleset
;
3210 struct pf_rule
*oldrule
= NULL
, *newrule
= NULL
;
3211 struct pf_pooladdr
*pa
;
3215 if (!(pcr
->action
== PF_CHANGE_REMOVE
||
3216 pcr
->action
== PF_CHANGE_GET_TICKET
) &&
3217 pcr
->pool_ticket
!= ticket_pabuf
) {
3222 if (pcr
->action
< PF_CHANGE_ADD_HEAD
||
3223 pcr
->action
> PF_CHANGE_GET_TICKET
) {
3227 pcr
->anchor
[sizeof (pcr
->anchor
) - 1] = '\0';
3228 pcr
->anchor_call
[sizeof (pcr
->anchor_call
) - 1] = '\0';
3229 ruleset
= pf_find_ruleset(pcr
->anchor
);
3230 if (ruleset
== NULL
) {
3234 rs_num
= pf_get_ruleset_number(pcr
->rule
.action
);
3235 if (rs_num
>= PF_RULESET_MAX
) {
3240 if (pcr
->action
== PF_CHANGE_GET_TICKET
) {
3241 pcr
->ticket
= ++ruleset
->rules
[rs_num
].active
.ticket
;
3245 ruleset
->rules
[rs_num
].active
.ticket
) {
3249 if (pcr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
3255 if (pcr
->action
!= PF_CHANGE_REMOVE
) {
3256 newrule
= pool_get(&pf_rule_pl
, PR_WAITOK
);
3257 if (newrule
== NULL
) {
3261 pf_rule_copyin(&pcr
->rule
, newrule
, p
, minordev
);
3263 if (newrule
->af
== AF_INET
) {
3264 pool_put(&pf_rule_pl
, newrule
);
3265 error
= EAFNOSUPPORT
;
3270 if (newrule
->af
== AF_INET6
) {
3271 pool_put(&pf_rule_pl
, newrule
);
3272 error
= EAFNOSUPPORT
;
3276 if (newrule
->ifname
[0]) {
3277 newrule
->kif
= pfi_kif_get(newrule
->ifname
);
3278 if (newrule
->kif
== NULL
) {
3279 pool_put(&pf_rule_pl
, newrule
);
3283 pfi_kif_ref(newrule
->kif
, PFI_KIF_REF_RULE
);
3285 newrule
->kif
= NULL
;
3289 if (altq_allowed
&& newrule
->qname
[0] != '\0') {
3291 pf_qname2qid(newrule
->qname
)) == 0)
3293 else if (newrule
->pqname
[0] != '\0') {
3294 if ((newrule
->pqid
=
3295 pf_qname2qid(newrule
->pqname
)) == 0)
3298 newrule
->pqid
= newrule
->qid
;
3300 #endif /* PF_ALTQ */
3301 if (newrule
->tagname
[0])
3303 pf_tagname2tag(newrule
->tagname
)) == 0)
3305 if (newrule
->match_tagname
[0])
3306 if ((newrule
->match_tag
= pf_tagname2tag(
3307 newrule
->match_tagname
)) == 0)
3309 if (newrule
->rt
&& !newrule
->direction
)
3314 if (newrule
->logif
>= PFLOGIFS_MAX
)
3317 if (pf_rtlabel_add(&newrule
->src
.addr
) ||
3318 pf_rtlabel_add(&newrule
->dst
.addr
))
3320 if (pfi_dynaddr_setup(&newrule
->src
.addr
, newrule
->af
))
3322 if (pfi_dynaddr_setup(&newrule
->dst
.addr
, newrule
->af
))
3324 if (pf_tbladdr_setup(ruleset
, &newrule
->src
.addr
))
3326 if (pf_tbladdr_setup(ruleset
, &newrule
->dst
.addr
))
3328 if (pf_anchor_setup(newrule
, ruleset
, pcr
->anchor_call
))
3330 TAILQ_FOREACH(pa
, &pf_pabuf
, entries
)
3331 if (pf_tbladdr_setup(ruleset
, &pa
->addr
))
3334 if (newrule
->overload_tblname
[0]) {
3335 if ((newrule
->overload_tbl
= pfr_attach_table(
3336 ruleset
, newrule
->overload_tblname
)) ==
3340 newrule
->overload_tbl
->pfrkt_flags
|=
3344 pf_mv_pool(&pf_pabuf
, &newrule
->rpool
.list
);
3345 if (((((newrule
->action
== PF_NAT
) ||
3346 (newrule
->action
== PF_RDR
) ||
3347 (newrule
->action
== PF_BINAT
) ||
3348 (newrule
->rt
> PF_FASTROUTE
)) &&
3349 !newrule
->anchor
)) &&
3350 (TAILQ_FIRST(&newrule
->rpool
.list
) == NULL
))
3354 pf_rm_rule(NULL
, newrule
);
3357 newrule
->rpool
.cur
= TAILQ_FIRST(&newrule
->rpool
.list
);
3358 newrule
->evaluations
= 0;
3359 newrule
->packets
[0] = newrule
->packets
[1] = 0;
3360 newrule
->bytes
[0] = newrule
->bytes
[1] = 0;
3362 pf_empty_pool(&pf_pabuf
);
3364 if (pcr
->action
== PF_CHANGE_ADD_HEAD
)
3365 oldrule
= TAILQ_FIRST(
3366 ruleset
->rules
[rs_num
].active
.ptr
);
3367 else if (pcr
->action
== PF_CHANGE_ADD_TAIL
)
3368 oldrule
= TAILQ_LAST(
3369 ruleset
->rules
[rs_num
].active
.ptr
, pf_rulequeue
);
3371 oldrule
= TAILQ_FIRST(
3372 ruleset
->rules
[rs_num
].active
.ptr
);
3373 while ((oldrule
!= NULL
) && (oldrule
->nr
!= pcr
->nr
))
3374 oldrule
= TAILQ_NEXT(oldrule
, entries
);
3375 if (oldrule
== NULL
) {
3376 if (newrule
!= NULL
)
3377 pf_rm_rule(NULL
, newrule
);
3383 if (pcr
->action
== PF_CHANGE_REMOVE
) {
3384 pf_rm_rule(ruleset
->rules
[rs_num
].active
.ptr
, oldrule
);
3385 ruleset
->rules
[rs_num
].active
.rcount
--;
3387 if (oldrule
== NULL
)
3389 ruleset
->rules
[rs_num
].active
.ptr
,
3391 else if (pcr
->action
== PF_CHANGE_ADD_HEAD
||
3392 pcr
->action
== PF_CHANGE_ADD_BEFORE
)
3393 TAILQ_INSERT_BEFORE(oldrule
, newrule
, entries
);
3396 ruleset
->rules
[rs_num
].active
.ptr
,
3397 oldrule
, newrule
, entries
);
3398 ruleset
->rules
[rs_num
].active
.rcount
++;
3402 TAILQ_FOREACH(oldrule
,
3403 ruleset
->rules
[rs_num
].active
.ptr
, entries
)
3406 ruleset
->rules
[rs_num
].active
.ticket
++;
3408 pf_calc_skip_steps(ruleset
->rules
[rs_num
].active
.ptr
);
3409 pf_remove_if_empty_ruleset(ruleset
);
3414 case DIOCINSERTRULE
: {
3415 struct pf_ruleset
*ruleset
;
3416 struct pf_rule
*rule
, *tail
, *r
;
3420 pr
->anchor
[sizeof (pr
->anchor
) - 1] = '\0';
3421 pr
->anchor_call
[sizeof (pr
->anchor_call
) - 1] = '\0';
3422 is_anchor
= (pr
->anchor_call
[0] != '\0');
3424 if ((ruleset
= pf_find_ruleset_with_owner(pr
->anchor
,
3425 pr
->rule
.owner
, is_anchor
, &error
)) == NULL
)
3428 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
3429 if (rs_num
>= PF_RULESET_MAX
) {
3433 if (pr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
3438 /* make sure this anchor rule doesn't exist already */
3440 r
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
3443 ((strcmp(r
->anchor
->name
,
3444 pr
->anchor_call
)) == 0)) {
3445 if (((strcmp(pr
->rule
.owner
,
3447 ((strcmp(r
->owner
, "")) == 0))
3453 r
= TAILQ_NEXT(r
, entries
);
3457 rule
= pool_get(&pf_rule_pl
, PR_WAITOK
);
3462 pf_rule_copyin(&pr
->rule
, rule
, p
, minordev
);
3464 if (rule
->af
== AF_INET
) {
3465 pool_put(&pf_rule_pl
, rule
);
3466 error
= EAFNOSUPPORT
;
3471 if (rule
->af
== AF_INET6
) {
3472 pool_put(&pf_rule_pl
, rule
);
3473 error
= EAFNOSUPPORT
;
3478 r
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
3479 while ((r
!= NULL
) && (rule
->priority
>= (unsigned)r
->priority
))
3480 r
= TAILQ_NEXT(r
, entries
);
3483 TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
,
3484 pf_rulequeue
)) != NULL
)
3485 rule
->nr
= tail
->nr
+ 1;
3492 if ((error
= pf_rule_setup(pr
, rule
, ruleset
)))
3495 if (rule
->anchor
!= NULL
)
3496 strncpy(rule
->anchor
->owner
, rule
->owner
,
3497 PF_OWNER_NAME_SIZE
);
3500 TAILQ_INSERT_BEFORE(r
, rule
, entries
);
3501 while (r
&& ++r
->nr
)
3502 r
= TAILQ_NEXT(r
, entries
);
3504 TAILQ_INSERT_TAIL(ruleset
->rules
[rs_num
].active
.ptr
,
3506 ruleset
->rules
[rs_num
].active
.rcount
++;
3508 /* Calculate checksum for the main ruleset */
3509 if (ruleset
== &pf_main_ruleset
)
3510 error
= pf_setup_pfsync_matching(ruleset
);
3512 pf_ruleset_cleanup(ruleset
, rs_num
);
3513 rule
->ticket
= ruleset
->rules
[rs_num
].active
.ticket
;
3515 pr
->rule
.ticket
= rule
->ticket
;
3516 pf_rule_copyout(rule
, &pr
->rule
);
3517 if (rule
->rule_flag
& PFRULE_PFM
)
3522 case DIOCDELETERULE
: {
3523 pr
->anchor
[sizeof (pr
->anchor
) - 1] = '\0';
3524 pr
->anchor_call
[sizeof (pr
->anchor_call
) - 1] = '\0';
3526 if (pr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
3531 if (pr
->rule
.ticket
) {
3532 if ((error
= pf_delete_rule_by_ticket(pr
)))
3535 pf_delete_rule_by_owner(pr
->rule
.owner
);
3549 pfioctl_ioc_state_kill(u_long cmd
, struct pfioc_state_kill
*psk
, struct proc
*p
)
3555 case DIOCCLRSTATES
: {
3556 struct pf_state
*s
, *nexts
;
3559 psk
->psk_ifname
[sizeof (psk
->psk_ifname
) - 1] = '\0';
3560 for (s
= RB_MIN(pf_state_tree_id
, &tree_id
); s
; s
= nexts
) {
3561 nexts
= RB_NEXT(pf_state_tree_id
, &tree_id
, s
);
3563 if (!psk
->psk_ifname
[0] || strcmp(psk
->psk_ifname
,
3564 s
->kif
->pfik_name
) == 0) {
3566 /* don't send out individual delete messages */
3567 s
->sync_flags
= PFSTATE_NOSYNC
;
3573 psk
->psk_af
= killed
;
3575 pfsync_clear_states(pf_status
.hostid
, psk
->psk_ifname
);
3580 case DIOCKILLSTATES
: {
3581 struct pf_state
*s
, *nexts
;
3582 struct pf_state_key
*sk
;
3583 struct pf_state_host
*src
, *dst
;
3586 for (s
= RB_MIN(pf_state_tree_id
, &tree_id
); s
;
3588 nexts
= RB_NEXT(pf_state_tree_id
, &tree_id
, s
);
3591 if (sk
->direction
== PF_OUT
) {
3598 if ((!psk
->psk_af
|| sk
->af
== psk
->psk_af
) &&
3599 (!psk
->psk_proto
|| psk
->psk_proto
== sk
->proto
) &&
3600 PF_MATCHA(psk
->psk_src
.neg
,
3601 &psk
->psk_src
.addr
.v
.a
.addr
,
3602 &psk
->psk_src
.addr
.v
.a
.mask
,
3603 &src
->addr
, sk
->af
) &&
3604 PF_MATCHA(psk
->psk_dst
.neg
,
3605 &psk
->psk_dst
.addr
.v
.a
.addr
,
3606 &psk
->psk_dst
.addr
.v
.a
.mask
,
3607 &dst
->addr
, sk
->af
) &&
3608 (pf_match_xport(psk
->psk_proto
,
3609 psk
->psk_proto_variant
, &psk
->psk_src
.xport
,
3611 (pf_match_xport(psk
->psk_proto
,
3612 psk
->psk_proto_variant
, &psk
->psk_dst
.xport
,
3614 (!psk
->psk_ifname
[0] || strcmp(psk
->psk_ifname
,
3615 s
->kif
->pfik_name
) == 0)) {
3617 /* send immediate delete of state */
3618 pfsync_delete_state(s
);
3619 s
->sync_flags
|= PFSTATE_NOSYNC
;
3625 psk
->psk_af
= killed
;
3638 pfioctl_ioc_state(u_long cmd
, struct pfioc_state
*ps
, struct proc
*p
)
3644 case DIOCADDSTATE
: {
3645 struct pfsync_state
*sp
= &ps
->state
;
3647 struct pf_state_key
*sk
;
3648 struct pfi_kif
*kif
;
3650 if (sp
->timeout
>= PFTM_MAX
&&
3651 sp
->timeout
!= PFTM_UNTIL_PACKET
) {
3655 s
= pool_get(&pf_state_pl
, PR_WAITOK
);
3660 bzero(s
, sizeof (struct pf_state
));
3661 if ((sk
= pf_alloc_state_key(s
, NULL
)) == NULL
) {
3662 pool_put(&pf_state_pl
, s
);
3666 pf_state_import(sp
, sk
, s
);
3667 kif
= pfi_kif_get(sp
->ifname
);
3669 pool_put(&pf_state_pl
, s
);
3670 pool_put(&pf_state_key_pl
, sk
);
3674 TAILQ_INIT(&s
->unlink_hooks
);
3675 s
->state_key
->app_state
= 0;
3676 if (pf_insert_state(kif
, s
)) {
3677 pfi_kif_unref(kif
, PFI_KIF_REF_NONE
);
3678 pool_put(&pf_state_pl
, s
);
3682 pf_default_rule
.states
++;
3683 VERIFY(pf_default_rule
.states
!= 0);
3687 case DIOCGETSTATE
: {
3689 struct pf_state_cmp id_key
;
3691 bcopy(ps
->state
.id
, &id_key
.id
, sizeof (id_key
.id
));
3692 id_key
.creatorid
= ps
->state
.creatorid
;
3694 s
= pf_find_state_byid(&id_key
);
3700 pf_state_export(&ps
->state
, s
->state_key
, s
);
3713 pfioctl_ioc_states(u_long cmd
, struct pfioc_states_32
*ps32
,
3714 struct pfioc_states_64
*ps64
, struct proc
*p
)
3716 int p64
= proc_is64bit(p
);
3720 case DIOCGETSTATES
: { /* struct pfioc_states */
3721 struct pf_state
*state
;
3722 struct pfsync_state
*pstore
;
3727 len
= (p64
? ps64
->ps_len
: ps32
->ps_len
);
3729 size
= sizeof (struct pfsync_state
) * pf_status
.states
;
3731 ps64
->ps_len
= size
;
3733 ps32
->ps_len
= size
;
3737 pstore
= _MALLOC(sizeof (*pstore
), M_TEMP
, M_WAITOK
);
3738 if (pstore
== NULL
) {
3742 buf
= (p64
? ps64
->ps_buf
: ps32
->ps_buf
);
3744 state
= TAILQ_FIRST(&state_list
);
3746 if (state
->timeout
!= PFTM_UNLINKED
) {
3747 if ((nr
+ 1) * sizeof (*pstore
) > (unsigned)len
)
3750 pf_state_export(pstore
,
3751 state
->state_key
, state
);
3752 error
= copyout(pstore
, buf
, sizeof (*pstore
));
3754 _FREE(pstore
, M_TEMP
);
3757 buf
+= sizeof (*pstore
);
3760 state
= TAILQ_NEXT(state
, entry_list
);
3763 size
= sizeof (struct pfsync_state
) * nr
;
3765 ps64
->ps_len
= size
;
3767 ps32
->ps_len
= size
;
3769 _FREE(pstore
, M_TEMP
);
3782 pfioctl_ioc_natlook(u_long cmd
, struct pfioc_natlook
*pnl
, struct proc
*p
)
3789 struct pf_state_key
*sk
;
3790 struct pf_state
*state
;
3791 struct pf_state_key_cmp key
;
3792 int m
= 0, direction
= pnl
->direction
;
3795 key
.proto
= pnl
->proto
;
3796 key
.proto_variant
= pnl
->proto_variant
;
3799 PF_AZERO(&pnl
->saddr
, pnl
->af
) ||
3800 PF_AZERO(&pnl
->daddr
, pnl
->af
) ||
3801 ((pnl
->proto
== IPPROTO_TCP
||
3802 pnl
->proto
== IPPROTO_UDP
) &&
3803 (!pnl
->dxport
.port
|| !pnl
->sxport
.port
)))
3807 * userland gives us source and dest of connection,
3808 * reverse the lookup so we ask for what happens with
3809 * the return traffic, enabling us to find it in the
3812 if (direction
== PF_IN
) {
3813 PF_ACPY(&key
.ext
.addr
, &pnl
->daddr
, pnl
->af
);
3814 memcpy(&key
.ext
.xport
, &pnl
->dxport
,
3815 sizeof (key
.ext
.xport
));
3816 PF_ACPY(&key
.gwy
.addr
, &pnl
->saddr
, pnl
->af
);
3817 memcpy(&key
.gwy
.xport
, &pnl
->sxport
,
3818 sizeof (key
.gwy
.xport
));
3819 state
= pf_find_state_all(&key
, PF_IN
, &m
);
3821 PF_ACPY(&key
.lan
.addr
, &pnl
->daddr
, pnl
->af
);
3822 memcpy(&key
.lan
.xport
, &pnl
->dxport
,
3823 sizeof (key
.lan
.xport
));
3824 PF_ACPY(&key
.ext
.addr
, &pnl
->saddr
, pnl
->af
);
3825 memcpy(&key
.ext
.xport
, &pnl
->sxport
,
3826 sizeof (key
.ext
.xport
));
3827 state
= pf_find_state_all(&key
, PF_OUT
, &m
);
3830 error
= E2BIG
; /* more than one state */
3831 else if (state
!= NULL
) {
3832 sk
= state
->state_key
;
3833 if (direction
== PF_IN
) {
3834 PF_ACPY(&pnl
->rsaddr
, &sk
->lan
.addr
,
3836 memcpy(&pnl
->rsxport
, &sk
->lan
.xport
,
3837 sizeof (pnl
->rsxport
));
3838 PF_ACPY(&pnl
->rdaddr
, &pnl
->daddr
,
3840 memcpy(&pnl
->rdxport
, &pnl
->dxport
,
3841 sizeof (pnl
->rdxport
));
3843 PF_ACPY(&pnl
->rdaddr
, &sk
->gwy
.addr
,
3845 memcpy(&pnl
->rdxport
, &sk
->gwy
.xport
,
3846 sizeof (pnl
->rdxport
));
3847 PF_ACPY(&pnl
->rsaddr
, &pnl
->saddr
,
3849 memcpy(&pnl
->rsxport
, &pnl
->sxport
,
3850 sizeof (pnl
->rsxport
));
3867 pfioctl_ioc_tm(u_long cmd
, struct pfioc_tm
*pt
, struct proc
*p
)
3873 case DIOCSETTIMEOUT
: {
3876 if (pt
->timeout
< 0 || pt
->timeout
>= PFTM_MAX
||
3881 old
= pf_default_rule
.timeout
[pt
->timeout
];
3882 if (pt
->timeout
== PFTM_INTERVAL
&& pt
->seconds
== 0)
3884 pf_default_rule
.timeout
[pt
->timeout
] = pt
->seconds
;
3885 if (pt
->timeout
== PFTM_INTERVAL
&& pt
->seconds
< old
)
3886 wakeup(pf_purge_thread_fn
);
3891 case DIOCGETTIMEOUT
: {
3892 if (pt
->timeout
< 0 || pt
->timeout
>= PFTM_MAX
) {
3896 pt
->seconds
= pf_default_rule
.timeout
[pt
->timeout
];
3909 pfioctl_ioc_limit(u_long cmd
, struct pfioc_limit
*pl
, struct proc
*p
)
3915 case DIOCGETLIMIT
: {
3917 if (pl
->index
< 0 || pl
->index
>= PF_LIMIT_MAX
) {
3921 pl
->limit
= pf_pool_limits
[pl
->index
].limit
;
3925 case DIOCSETLIMIT
: {
3928 if (pl
->index
< 0 || pl
->index
>= PF_LIMIT_MAX
||
3929 pf_pool_limits
[pl
->index
].pp
== NULL
) {
3933 pool_sethardlimit(pf_pool_limits
[pl
->index
].pp
,
3934 pl
->limit
, NULL
, 0);
3935 old_limit
= pf_pool_limits
[pl
->index
].limit
;
3936 pf_pool_limits
[pl
->index
].limit
= pl
->limit
;
3937 pl
->limit
= old_limit
;
3950 pfioctl_ioc_pooladdr(u_long cmd
, struct pfioc_pooladdr
*pp
, struct proc
*p
)
3953 struct pf_pooladdr
*pa
= NULL
;
3954 struct pf_pool
*pool
= NULL
;
3958 case DIOCBEGINADDRS
: {
3959 pf_empty_pool(&pf_pabuf
);
3960 pp
->ticket
= ++ticket_pabuf
;
3965 pp
->anchor
[sizeof (pp
->anchor
) - 1] = '\0';
3966 if (pp
->ticket
!= ticket_pabuf
) {
3971 if (pp
->af
== AF_INET
) {
3972 error
= EAFNOSUPPORT
;
3977 if (pp
->af
== AF_INET6
) {
3978 error
= EAFNOSUPPORT
;
3982 if (pp
->addr
.addr
.type
!= PF_ADDR_ADDRMASK
&&
3983 pp
->addr
.addr
.type
!= PF_ADDR_DYNIFTL
&&
3984 pp
->addr
.addr
.type
!= PF_ADDR_TABLE
) {
3988 pa
= pool_get(&pf_pooladdr_pl
, PR_WAITOK
);
3993 pf_pooladdr_copyin(&pp
->addr
, pa
);
3994 if (pa
->ifname
[0]) {
3995 pa
->kif
= pfi_kif_get(pa
->ifname
);
3996 if (pa
->kif
== NULL
) {
3997 pool_put(&pf_pooladdr_pl
, pa
);
4001 pfi_kif_ref(pa
->kif
, PFI_KIF_REF_RULE
);
4003 if (pfi_dynaddr_setup(&pa
->addr
, pp
->af
)) {
4004 pfi_dynaddr_remove(&pa
->addr
);
4005 pfi_kif_unref(pa
->kif
, PFI_KIF_REF_RULE
);
4006 pool_put(&pf_pooladdr_pl
, pa
);
4010 TAILQ_INSERT_TAIL(&pf_pabuf
, pa
, entries
);
4014 case DIOCGETADDRS
: {
4016 pp
->anchor
[sizeof (pp
->anchor
) - 1] = '\0';
4017 pool
= pf_get_pool(pp
->anchor
, pp
->ticket
, pp
->r_action
,
4018 pp
->r_num
, 0, 1, 0);
4023 TAILQ_FOREACH(pa
, &pool
->list
, entries
)
4031 pp
->anchor
[sizeof (pp
->anchor
) - 1] = '\0';
4032 pool
= pf_get_pool(pp
->anchor
, pp
->ticket
, pp
->r_action
,
4033 pp
->r_num
, 0, 1, 1);
4038 pa
= TAILQ_FIRST(&pool
->list
);
4039 while ((pa
!= NULL
) && (nr
< pp
->nr
)) {
4040 pa
= TAILQ_NEXT(pa
, entries
);
4047 pf_pooladdr_copyout(pa
, &pp
->addr
);
4048 pfi_dynaddr_copyout(&pp
->addr
.addr
);
4049 pf_tbladdr_copyout(&pp
->addr
.addr
);
4050 pf_rtlabel_copyout(&pp
->addr
.addr
);
4054 case DIOCCHANGEADDR
: {
4055 struct pfioc_pooladdr
*pca
= pp
;
4056 struct pf_pooladdr
*oldpa
= NULL
, *newpa
= NULL
;
4057 struct pf_ruleset
*ruleset
;
4059 if (pca
->action
< PF_CHANGE_ADD_HEAD
||
4060 pca
->action
> PF_CHANGE_REMOVE
) {
4064 if (pca
->addr
.addr
.type
!= PF_ADDR_ADDRMASK
&&
4065 pca
->addr
.addr
.type
!= PF_ADDR_DYNIFTL
&&
4066 pca
->addr
.addr
.type
!= PF_ADDR_TABLE
) {
4071 pca
->anchor
[sizeof (pca
->anchor
) - 1] = '\0';
4072 ruleset
= pf_find_ruleset(pca
->anchor
);
4073 if (ruleset
== NULL
) {
4077 pool
= pf_get_pool(pca
->anchor
, pca
->ticket
, pca
->r_action
,
4078 pca
->r_num
, pca
->r_last
, 1, 1);
4083 if (pca
->action
!= PF_CHANGE_REMOVE
) {
4084 newpa
= pool_get(&pf_pooladdr_pl
, PR_WAITOK
);
4085 if (newpa
== NULL
) {
4089 pf_pooladdr_copyin(&pca
->addr
, newpa
);
4091 if (pca
->af
== AF_INET
) {
4092 pool_put(&pf_pooladdr_pl
, newpa
);
4093 error
= EAFNOSUPPORT
;
4098 if (pca
->af
== AF_INET6
) {
4099 pool_put(&pf_pooladdr_pl
, newpa
);
4100 error
= EAFNOSUPPORT
;
4104 if (newpa
->ifname
[0]) {
4105 newpa
->kif
= pfi_kif_get(newpa
->ifname
);
4106 if (newpa
->kif
== NULL
) {
4107 pool_put(&pf_pooladdr_pl
, newpa
);
4111 pfi_kif_ref(newpa
->kif
, PFI_KIF_REF_RULE
);
4114 if (pfi_dynaddr_setup(&newpa
->addr
, pca
->af
) ||
4115 pf_tbladdr_setup(ruleset
, &newpa
->addr
)) {
4116 pfi_dynaddr_remove(&newpa
->addr
);
4117 pfi_kif_unref(newpa
->kif
, PFI_KIF_REF_RULE
);
4118 pool_put(&pf_pooladdr_pl
, newpa
);
4124 if (pca
->action
== PF_CHANGE_ADD_HEAD
)
4125 oldpa
= TAILQ_FIRST(&pool
->list
);
4126 else if (pca
->action
== PF_CHANGE_ADD_TAIL
)
4127 oldpa
= TAILQ_LAST(&pool
->list
, pf_palist
);
4131 oldpa
= TAILQ_FIRST(&pool
->list
);
4132 while ((oldpa
!= NULL
) && (i
< (int)pca
->nr
)) {
4133 oldpa
= TAILQ_NEXT(oldpa
, entries
);
4136 if (oldpa
== NULL
) {
4142 if (pca
->action
== PF_CHANGE_REMOVE
) {
4143 TAILQ_REMOVE(&pool
->list
, oldpa
, entries
);
4144 pfi_dynaddr_remove(&oldpa
->addr
);
4145 pf_tbladdr_remove(&oldpa
->addr
);
4146 pfi_kif_unref(oldpa
->kif
, PFI_KIF_REF_RULE
);
4147 pool_put(&pf_pooladdr_pl
, oldpa
);
4150 TAILQ_INSERT_TAIL(&pool
->list
, newpa
, entries
);
4151 else if (pca
->action
== PF_CHANGE_ADD_HEAD
||
4152 pca
->action
== PF_CHANGE_ADD_BEFORE
)
4153 TAILQ_INSERT_BEFORE(oldpa
, newpa
, entries
);
4155 TAILQ_INSERT_AFTER(&pool
->list
, oldpa
,
4159 pool
->cur
= TAILQ_FIRST(&pool
->list
);
4160 PF_ACPY(&pool
->counter
, &pool
->cur
->addr
.v
.a
.addr
,
4174 pfioctl_ioc_ruleset(u_long cmd
, struct pfioc_ruleset
*pr
, struct proc
*p
)
4180 case DIOCGETRULESETS
: {
4181 struct pf_ruleset
*ruleset
;
4182 struct pf_anchor
*anchor
;
4184 pr
->path
[sizeof (pr
->path
) - 1] = '\0';
4185 pr
->name
[sizeof (pr
->name
) - 1] = '\0';
4186 if ((ruleset
= pf_find_ruleset(pr
->path
)) == NULL
) {
4191 if (ruleset
->anchor
== NULL
) {
4192 /* XXX kludge for pf_main_ruleset */
4193 RB_FOREACH(anchor
, pf_anchor_global
, &pf_anchors
)
4194 if (anchor
->parent
== NULL
)
4197 RB_FOREACH(anchor
, pf_anchor_node
,
4198 &ruleset
->anchor
->children
)
4204 case DIOCGETRULESET
: {
4205 struct pf_ruleset
*ruleset
;
4206 struct pf_anchor
*anchor
;
4209 pr
->path
[sizeof (pr
->path
) - 1] = '\0';
4210 if ((ruleset
= pf_find_ruleset(pr
->path
)) == NULL
) {
4215 if (ruleset
->anchor
== NULL
) {
4216 /* XXX kludge for pf_main_ruleset */
4217 RB_FOREACH(anchor
, pf_anchor_global
, &pf_anchors
)
4218 if (anchor
->parent
== NULL
&& nr
++ == pr
->nr
) {
4219 strlcpy(pr
->name
, anchor
->name
,
4224 RB_FOREACH(anchor
, pf_anchor_node
,
4225 &ruleset
->anchor
->children
)
4226 if (nr
++ == pr
->nr
) {
4227 strlcpy(pr
->name
, anchor
->name
,
4246 pfioctl_ioc_trans(u_long cmd
, struct pfioc_trans_32
*io32
,
4247 struct pfioc_trans_64
*io64
, struct proc
*p
)
4249 int p64
= proc_is64bit(p
);
4250 int error
= 0, esize
, size
;
4253 esize
= (p64
? io64
->esize
: io32
->esize
);
4254 size
= (p64
? io64
->size
: io32
->size
);
4255 buf
= (p64
? io64
->array
: io32
->array
);
4259 struct pfioc_trans_e
*ioe
;
4260 struct pfr_table
*table
;
4263 if (esize
!= sizeof (*ioe
)) {
4267 ioe
= _MALLOC(sizeof (*ioe
), M_TEMP
, M_WAITOK
);
4268 table
= _MALLOC(sizeof (*table
), M_TEMP
, M_WAITOK
);
4269 for (i
= 0; i
< size
; i
++, buf
+= sizeof (*ioe
)) {
4270 if (copyin(buf
, ioe
, sizeof (*ioe
))) {
4271 _FREE(table
, M_TEMP
);
4276 ioe
->anchor
[sizeof (ioe
->anchor
) - 1] = '\0';
4277 switch (ioe
->rs_num
) {
4278 case PF_RULESET_ALTQ
:
4281 if (ioe
->anchor
[0]) {
4282 _FREE(table
, M_TEMP
);
4287 error
= pf_begin_altq(&ioe
->ticket
);
4289 _FREE(table
, M_TEMP
);
4294 #endif /* PF_ALTQ */
4296 case PF_RULESET_TABLE
:
4297 bzero(table
, sizeof (*table
));
4298 strlcpy(table
->pfrt_anchor
, ioe
->anchor
,
4299 sizeof (table
->pfrt_anchor
));
4300 if ((error
= pfr_ina_begin(table
,
4301 &ioe
->ticket
, NULL
, 0))) {
4302 _FREE(table
, M_TEMP
);
4308 if ((error
= pf_begin_rules(&ioe
->ticket
,
4309 ioe
->rs_num
, ioe
->anchor
))) {
4310 _FREE(table
, M_TEMP
);
4316 if (copyout(ioe
, buf
, sizeof (*ioe
))) {
4317 _FREE(table
, M_TEMP
);
4323 _FREE(table
, M_TEMP
);
4328 case DIOCXROLLBACK
: {
4329 struct pfioc_trans_e
*ioe
;
4330 struct pfr_table
*table
;
4333 if (esize
!= sizeof (*ioe
)) {
4337 ioe
= _MALLOC(sizeof (*ioe
), M_TEMP
, M_WAITOK
);
4338 table
= _MALLOC(sizeof (*table
), M_TEMP
, M_WAITOK
);
4339 for (i
= 0; i
< size
; i
++, buf
+= sizeof (*ioe
)) {
4340 if (copyin(buf
, ioe
, sizeof (*ioe
))) {
4341 _FREE(table
, M_TEMP
);
4346 ioe
->anchor
[sizeof (ioe
->anchor
) - 1] = '\0';
4347 switch (ioe
->rs_num
) {
4348 case PF_RULESET_ALTQ
:
4351 if (ioe
->anchor
[0]) {
4352 _FREE(table
, M_TEMP
);
4357 error
= pf_rollback_altq(ioe
->ticket
);
4359 _FREE(table
, M_TEMP
);
4361 goto fail
; /* really bad */
4364 #endif /* PF_ALTQ */
4366 case PF_RULESET_TABLE
:
4367 bzero(table
, sizeof (*table
));
4368 strlcpy(table
->pfrt_anchor
, ioe
->anchor
,
4369 sizeof (table
->pfrt_anchor
));
4370 if ((error
= pfr_ina_rollback(table
,
4371 ioe
->ticket
, NULL
, 0))) {
4372 _FREE(table
, M_TEMP
);
4374 goto fail
; /* really bad */
4378 if ((error
= pf_rollback_rules(ioe
->ticket
,
4379 ioe
->rs_num
, ioe
->anchor
))) {
4380 _FREE(table
, M_TEMP
);
4382 goto fail
; /* really bad */
4387 _FREE(table
, M_TEMP
);
4393 struct pfioc_trans_e
*ioe
;
4394 struct pfr_table
*table
;
4395 struct pf_ruleset
*rs
;
4396 user_addr_t _buf
= buf
;
4399 if (esize
!= sizeof (*ioe
)) {
4403 ioe
= _MALLOC(sizeof (*ioe
), M_TEMP
, M_WAITOK
);
4404 table
= _MALLOC(sizeof (*table
), M_TEMP
, M_WAITOK
);
4405 /* first makes sure everything will succeed */
4406 for (i
= 0; i
< size
; i
++, buf
+= sizeof (*ioe
)) {
4407 if (copyin(buf
, ioe
, sizeof (*ioe
))) {
4408 _FREE(table
, M_TEMP
);
4413 ioe
->anchor
[sizeof (ioe
->anchor
) - 1] = '\0';
4414 switch (ioe
->rs_num
) {
4415 case PF_RULESET_ALTQ
:
4418 if (ioe
->anchor
[0]) {
4419 _FREE(table
, M_TEMP
);
4424 if (!altqs_inactive_open
||
4426 ticket_altqs_inactive
) {
4427 _FREE(table
, M_TEMP
);
4433 #endif /* PF_ALTQ */
4435 case PF_RULESET_TABLE
:
4436 rs
= pf_find_ruleset(ioe
->anchor
);
4437 if (rs
== NULL
|| !rs
->topen
|| ioe
->ticket
!=
4439 _FREE(table
, M_TEMP
);
4446 if (ioe
->rs_num
< 0 || ioe
->rs_num
>=
4448 _FREE(table
, M_TEMP
);
4453 rs
= pf_find_ruleset(ioe
->anchor
);
4455 !rs
->rules
[ioe
->rs_num
].inactive
.open
||
4456 rs
->rules
[ioe
->rs_num
].inactive
.ticket
!=
4458 _FREE(table
, M_TEMP
);
4467 /* now do the commit - no errors should happen here */
4468 for (i
= 0; i
< size
; i
++, buf
+= sizeof (*ioe
)) {
4469 if (copyin(buf
, ioe
, sizeof (*ioe
))) {
4470 _FREE(table
, M_TEMP
);
4475 ioe
->anchor
[sizeof (ioe
->anchor
) - 1] = '\0';
4476 switch (ioe
->rs_num
) {
4477 case PF_RULESET_ALTQ
:
4480 (error
= pf_commit_altq(ioe
->ticket
))) {
4481 _FREE(table
, M_TEMP
);
4483 goto fail
; /* really bad */
4485 #endif /* PF_ALTQ */
4487 case PF_RULESET_TABLE
:
4488 bzero(table
, sizeof (*table
));
4489 strlcpy(table
->pfrt_anchor
, ioe
->anchor
,
4490 sizeof (table
->pfrt_anchor
));
4491 if ((error
= pfr_ina_commit(table
, ioe
->ticket
,
4493 _FREE(table
, M_TEMP
);
4495 goto fail
; /* really bad */
4499 if ((error
= pf_commit_rules(ioe
->ticket
,
4500 ioe
->rs_num
, ioe
->anchor
))) {
4501 _FREE(table
, M_TEMP
);
4503 goto fail
; /* really bad */
4508 _FREE(table
, M_TEMP
);
4522 pfioctl_ioc_src_nodes(u_long cmd
, struct pfioc_src_nodes_32
*psn32
,
4523 struct pfioc_src_nodes_64
*psn64
, struct proc
*p
)
4525 int p64
= proc_is64bit(p
);
4529 case DIOCGETSRCNODES
: {
4530 struct pf_src_node
*n
, *pstore
;
4535 space
= (p64
? psn64
->psn_len
: psn32
->psn_len
);
4537 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
)
4540 size
= sizeof (struct pf_src_node
) * nr
;
4542 psn64
->psn_len
= size
;
4544 psn32
->psn_len
= size
;
4548 pstore
= _MALLOC(sizeof (*pstore
), M_TEMP
, M_WAITOK
);
4549 if (pstore
== NULL
) {
4553 buf
= (p64
? psn64
->psn_buf
: psn32
->psn_buf
);
4555 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
) {
4556 uint64_t secs
= pf_time_second(), diff
;
4558 if ((nr
+ 1) * sizeof (*pstore
) > (unsigned)space
)
4561 bcopy(n
, pstore
, sizeof (*pstore
));
4562 if (n
->rule
.ptr
!= NULL
)
4563 pstore
->rule
.nr
= n
->rule
.ptr
->nr
;
4564 pstore
->creation
= secs
- pstore
->creation
;
4565 if (pstore
->expire
> secs
)
4566 pstore
->expire
-= secs
;
4570 /* adjust the connection rate estimate */
4571 diff
= secs
- n
->conn_rate
.last
;
4572 if (diff
>= n
->conn_rate
.seconds
)
4573 pstore
->conn_rate
.count
= 0;
4575 pstore
->conn_rate
.count
-=
4576 n
->conn_rate
.count
* diff
/
4577 n
->conn_rate
.seconds
;
4579 _RB_PARENT(pstore
, entry
) = NULL
;
4580 RB_LEFT(pstore
, entry
) = RB_RIGHT(pstore
, entry
) = NULL
;
4583 error
= copyout(pstore
, buf
, sizeof (*pstore
));
4585 _FREE(pstore
, M_TEMP
);
4588 buf
+= sizeof (*pstore
);
4592 size
= sizeof (struct pf_src_node
) * nr
;
4594 psn64
->psn_len
= size
;
4596 psn32
->psn_len
= size
;
4598 _FREE(pstore
, M_TEMP
);
4612 pfioctl_ioc_src_node_kill(u_long cmd
, struct pfioc_src_node_kill
*psnk
,
4619 case DIOCKILLSRCNODES
: {
4620 struct pf_src_node
*sn
;
4624 RB_FOREACH(sn
, pf_src_tree
, &tree_src_tracking
) {
4625 if (PF_MATCHA(psnk
->psnk_src
.neg
,
4626 &psnk
->psnk_src
.addr
.v
.a
.addr
,
4627 &psnk
->psnk_src
.addr
.v
.a
.mask
,
4628 &sn
->addr
, sn
->af
) &&
4629 PF_MATCHA(psnk
->psnk_dst
.neg
,
4630 &psnk
->psnk_dst
.addr
.v
.a
.addr
,
4631 &psnk
->psnk_dst
.addr
.v
.a
.mask
,
4632 &sn
->raddr
, sn
->af
)) {
4633 /* Handle state to src_node linkage */
4634 if (sn
->states
!= 0) {
4635 RB_FOREACH(s
, pf_state_tree_id
,
4637 if (s
->src_node
== sn
)
4639 if (s
->nat_src_node
== sn
)
4640 s
->nat_src_node
= NULL
;
4650 pf_purge_expired_src_nodes();
4652 psnk
->psnk_af
= killed
;
4665 pfioctl_ioc_iface(u_long cmd
, struct pfioc_iface_32
*io32
,
4666 struct pfioc_iface_64
*io64
, struct proc
*p
)
4668 int p64
= proc_is64bit(p
);
4672 case DIOCIGETIFACES
: {
4676 buf
= (p64
? io64
->pfiio_buffer
: io32
->pfiio_buffer
);
4677 esize
= (p64
? io64
->pfiio_esize
: io32
->pfiio_esize
);
4679 /* esize must be that of the user space version of pfi_kif */
4680 if (esize
!= sizeof (struct pfi_uif
)) {
4685 io64
->pfiio_name
[sizeof (io64
->pfiio_name
) - 1] = '\0';
4687 io32
->pfiio_name
[sizeof (io32
->pfiio_name
) - 1] = '\0';
4688 error
= pfi_get_ifaces(
4689 p64
? io64
->pfiio_name
: io32
->pfiio_name
, buf
,
4690 p64
? &io64
->pfiio_size
: &io32
->pfiio_size
);
4694 case DIOCSETIFFLAG
: {
4696 io64
->pfiio_name
[sizeof (io64
->pfiio_name
) - 1] = '\0';
4698 io32
->pfiio_name
[sizeof (io32
->pfiio_name
) - 1] = '\0';
4700 error
= pfi_set_flags(
4701 p64
? io64
->pfiio_name
: io32
->pfiio_name
,
4702 p64
? io64
->pfiio_flags
: io32
->pfiio_flags
);
4706 case DIOCCLRIFFLAG
: {
4708 io64
->pfiio_name
[sizeof (io64
->pfiio_name
) - 1] = '\0';
4710 io32
->pfiio_name
[sizeof (io32
->pfiio_name
) - 1] = '\0';
4712 error
= pfi_clear_flags(
4713 p64
? io64
->pfiio_name
: io32
->pfiio_name
,
4714 p64
? io64
->pfiio_flags
: io32
->pfiio_flags
);
4727 pf_af_hook(struct ifnet
*ifp
, struct mbuf
**mppn
, struct mbuf
**mp
,
4728 unsigned int af
, int input
, struct ip_fw_args
*fwa
)
4730 int error
= 0, reentry
;
4731 struct mbuf
*nextpkt
;
4733 reentry
= net_thread_check_lock(NET_THREAD_HELD_PF
);
4735 lck_rw_lock_shared(pf_perim_lock
);
4739 lck_mtx_lock(pf_lock
);
4740 net_thread_set_lock(NET_THREAD_HELD_PF
);
4743 if (mppn
!= NULL
&& *mppn
!= NULL
)
4744 VERIFY(*mppn
== *mp
);
4745 if ((nextpkt
= (*mp
)->m_nextpkt
) != NULL
)
4746 (*mp
)->m_nextpkt
= NULL
;
4751 error
= pf_inet_hook(ifp
, mp
, input
, fwa
);
4757 error
= pf_inet6_hook(ifp
, mp
, input
, fwa
);
4764 /* When packet valid, link to the next packet */
4765 if (*mp
!= NULL
&& nextpkt
!= NULL
) {
4766 struct mbuf
*m
= *mp
;
4767 while (m
->m_nextpkt
!= NULL
)
4769 m
->m_nextpkt
= nextpkt
;
4771 /* Fix up linkage of previous packet in the chain */
4779 net_thread_unset_lock(NET_THREAD_HELD_PF
);
4780 lck_mtx_unlock(pf_lock
);
4784 lck_rw_done(pf_perim_lock
);
4792 pf_inet_hook(struct ifnet
*ifp
, struct mbuf
**mp
, int input
,
4793 struct ip_fw_args
*fwa
)
4795 struct mbuf
*m
= *mp
;
4796 #if BYTE_ORDER != BIG_ENDIAN
4797 struct ip
*ip
= mtod(m
, struct ip
*);
4802 * If the packet is outbound, is originated locally, is flagged for
4803 * delayed UDP/TCP checksum calculation, and is about to be processed
4804 * for an interface that doesn't support the appropriate checksum
4805 * offloading, then calculated the checksum here so that PF can adjust
4808 if (!input
&& m
->m_pkthdr
.rcvif
== NULL
) {
4809 static const int mask
= CSUM_DELAY_DATA
;
4810 const int flags
= m
->m_pkthdr
.csum_flags
&
4811 ~IF_HWASSIST_CSUM_FLAGS(ifp
->if_hwassist
);
4814 in_delayed_cksum(m
);
4815 m
->m_pkthdr
.csum_flags
&= ~mask
;
4819 #if BYTE_ORDER != BIG_ENDIAN
4823 if (pf_test(input
? PF_IN
: PF_OUT
, ifp
, mp
, NULL
, fwa
) != PF_PASS
) {
4827 error
= EHOSTUNREACH
;
4832 #if BYTE_ORDER != BIG_ENDIAN
4835 ip
= mtod(*mp
, struct ip
*);
4847 pf_inet6_hook(struct ifnet
*ifp
, struct mbuf
**mp
, int input
,
4848 struct ip_fw_args
*fwa
)
4853 * If the packet is outbound, is originated locally, is flagged for
4854 * delayed UDP/TCP checksum calculation, and is about to be processed
4855 * for an interface that doesn't support the appropriate checksum
4856 * offloading, then calculated the checksum here so that PF can adjust
4859 if (!input
&& (*mp
)->m_pkthdr
.rcvif
== NULL
) {
4860 static const int mask
= CSUM_DELAY_IPV6_DATA
;
4861 const int flags
= (*mp
)->m_pkthdr
.csum_flags
&
4862 ~IF_HWASSIST_CSUM_FLAGS(ifp
->if_hwassist
);
4865 in6_delayed_cksum(*mp
, sizeof(struct ip6_hdr
));
4866 (*mp
)->m_pkthdr
.csum_flags
&= ~mask
;
4870 if (pf_test6(input
? PF_IN
: PF_OUT
, ifp
, mp
, NULL
, fwa
) != PF_PASS
) {
4874 error
= EHOSTUNREACH
;
4884 pf_ifaddr_hook(struct ifnet
*ifp
, unsigned long cmd
)
4886 lck_rw_lock_shared(pf_perim_lock
);
4887 lck_mtx_lock(pf_lock
);
4894 case SIOCAIFADDR_IN6_32
:
4895 case SIOCAIFADDR_IN6_64
:
4896 case SIOCDIFADDR_IN6
:
4898 if (ifp
->if_pf_kif
!= NULL
)
4899 pfi_kifaddr_update(ifp
->if_pf_kif
);
4902 panic("%s: unexpected ioctl %lu", __func__
, cmd
);
4906 lck_mtx_unlock(pf_lock
);
4907 lck_rw_done(pf_perim_lock
);
4912 * Caller acquires dlil lock as writer (exclusive)
4915 pf_ifnet_hook(struct ifnet
*ifp
, int attach
)
4917 lck_rw_lock_shared(pf_perim_lock
);
4918 lck_mtx_lock(pf_lock
);
4920 pfi_attach_ifnet(ifp
);
4922 pfi_detach_ifnet(ifp
);
4923 lck_mtx_unlock(pf_lock
);
4924 lck_rw_done(pf_perim_lock
);
4928 pf_attach_hooks(void)
4930 ifnet_head_lock_shared();
4932 * Check against ifnet_addrs[] before proceeding, in case this
4933 * is called very early on, e.g. during dlil_init() before any
4934 * network interface is attached.
4936 if (ifnet_addrs
!= NULL
) {
4939 for (i
= 0; i
<= if_index
; i
++) {
4940 struct ifnet
*ifp
= ifindex2ifnet
[i
];
4942 pfi_attach_ifnet(ifp
);
4950 /* currently unused along with pfdetach() */
4952 pf_detach_hooks(void)
4954 ifnet_head_lock_shared();
4955 if (ifnet_addrs
!= NULL
) {
4956 for (i
= 0; i
<= if_index
; i
++) {
4959 struct ifnet
*ifp
= ifindex2ifnet
[i
];
4960 if (ifp
!= NULL
&& ifp
->if_pf_kif
!= NULL
) {
4961 pfi_detach_ifnet(ifp
);