2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $apfw: pf_table.c,v 1.4 2008/08/27 00:01:32 jhw Exp $ */
30 /* $OpenBSD: pf_table.c,v 1.68 2006/05/02 10:08:45 dhartmei Exp $ */
33 * Copyright (c) 2002 Cedric Berger
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
40 * - Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * - Redistributions in binary form must reproduce the above
43 * copyright notice, this list of conditions and the following
44 * disclaimer in the documentation and/or other materials provided
45 * with the distribution.
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
48 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
49 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
50 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
51 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
52 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
53 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
54 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
55 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
57 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
58 * POSSIBILITY OF SUCH DAMAGE.
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/socket.h>
66 #include <sys/kernel.h>
67 #include <sys/malloc.h>
70 #include <net/route.h>
71 #include <netinet/in.h>
72 #include <net/radix.h>
73 #include <net/pfvar.h>
75 #define ACCEPT_FLAGS(flags, oklist) \
77 if ((flags & ~(oklist)) & \
82 #define COPYIN(from, to, size, flags) \
83 ((flags & PFR_FLAG_USERIOCTL) ? \
84 copyin((from), (to), (size)) : \
85 (bcopy((void *)(uintptr_t)(from), (to), (size)), 0))
87 #define COPYOUT(from, to, size, flags) \
88 ((flags & PFR_FLAG_USERIOCTL) ? \
89 copyout((from), (to), (size)) : \
90 (bcopy((from), (void *)(uintptr_t)(to), (size)), 0))
92 #define FILLIN_SIN(sin, addr) \
94 (sin).sin_len = sizeof (sin); \
95 (sin).sin_family = AF_INET; \
96 (sin).sin_addr = (addr); \
99 #define FILLIN_SIN6(sin6, addr) \
101 (sin6).sin6_len = sizeof (sin6); \
102 (sin6).sin6_family = AF_INET6; \
103 (sin6).sin6_addr = (addr); \
106 #define SWAP(type, a1, a2) \
113 #define SUNION2PF(su, af) (((af) == AF_INET) ? \
114 (struct pf_addr *)&(su)->sin.sin_addr : \
115 (struct pf_addr *)&(su)->sin6.sin6_addr)
117 #define AF_BITS(af) (((af) == AF_INET) ? 32 : 128)
118 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
119 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
120 #define KENTRY_RNF_ROOT(ke) \
121 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
123 #define NO_ADDRESSES (-1)
124 #define ENQUEUE_UNMARKED_ONLY (1)
125 #define INVERT_NEG_FLAG (1)
127 struct pfr_walktree
{
138 user_addr_t pfrw1_addr
;
139 user_addr_t pfrw1_astats
;
140 struct pfr_kentryworkq
*pfrw1_workq
;
141 struct pfr_kentry
*pfrw1_kentry
;
142 struct pfi_dynaddr
*pfrw1_dyn
;
147 #define pfrw_addr pfrw_1.pfrw1_addr
148 #define pfrw_astats pfrw_1.pfrw1_astats
149 #define pfrw_workq pfrw_1.pfrw1_workq
150 #define pfrw_kentry pfrw_1.pfrw1_kentry
151 #define pfrw_dyn pfrw_1.pfrw1_dyn
152 #define pfrw_cnt pfrw_free
154 #define senderr(e) do { rv = (e); goto _bad; } while (0)
156 struct pool pfr_ktable_pl
;
157 struct pool pfr_kentry_pl
;
159 static struct pool pfr_kentry_pl2
;
160 static struct sockaddr_in pfr_sin
;
161 static struct sockaddr_in6 pfr_sin6
;
162 static union sockaddr_union pfr_mask
;
163 static struct pf_addr pfr_ffaddr
;
165 static void pfr_copyout_addr(struct pfr_addr
*, struct pfr_kentry
*ke
);
166 static int pfr_validate_addr(struct pfr_addr
*);
167 static void pfr_enqueue_addrs(struct pfr_ktable
*, struct pfr_kentryworkq
*,
169 static void pfr_mark_addrs(struct pfr_ktable
*);
170 static struct pfr_kentry
*pfr_lookup_addr(struct pfr_ktable
*,
171 struct pfr_addr
*, int);
172 static struct pfr_kentry
*pfr_create_kentry(struct pfr_addr
*, boolean_t
);
173 static void pfr_destroy_kentries(struct pfr_kentryworkq
*);
174 static void pfr_destroy_kentry(struct pfr_kentry
*);
175 static void pfr_insert_kentries(struct pfr_ktable
*,
176 struct pfr_kentryworkq
*, u_int64_t
);
177 static void pfr_remove_kentries(struct pfr_ktable
*, struct pfr_kentryworkq
*);
178 static void pfr_clstats_kentries(struct pfr_kentryworkq
*, u_int64_t
, int);
179 static void pfr_reset_feedback(user_addr_t
, int, int);
180 static void pfr_prepare_network(union sockaddr_union
*, int, int);
181 static int pfr_route_kentry(struct pfr_ktable
*, struct pfr_kentry
*);
182 static int pfr_unroute_kentry(struct pfr_ktable
*, struct pfr_kentry
*);
183 static int pfr_walktree(struct radix_node
*, void *);
184 static int pfr_validate_table(struct pfr_table
*, int, int);
185 static int pfr_fix_anchor(char *);
186 static void pfr_commit_ktable(struct pfr_ktable
*, u_int64_t
);
187 static void pfr_insert_ktables(struct pfr_ktableworkq
*);
188 static void pfr_insert_ktable(struct pfr_ktable
*);
189 static void pfr_setflags_ktables(struct pfr_ktableworkq
*);
190 static void pfr_setflags_ktable(struct pfr_ktable
*, int);
191 static void pfr_clstats_ktables(struct pfr_ktableworkq
*, u_int64_t
, int);
192 static void pfr_clstats_ktable(struct pfr_ktable
*, u_int64_t
, int);
193 static struct pfr_ktable
*pfr_create_ktable(struct pfr_table
*, u_int64_t
, int);
194 static void pfr_destroy_ktables(struct pfr_ktableworkq
*, int);
195 static void pfr_destroy_ktable(struct pfr_ktable
*, int);
196 static int pfr_ktable_compare(struct pfr_ktable
*, struct pfr_ktable
*);
197 static struct pfr_ktable
*pfr_lookup_table(struct pfr_table
*);
198 static void pfr_clean_node_mask(struct pfr_ktable
*, struct pfr_kentryworkq
*);
199 static int pfr_table_count(struct pfr_table
*, int);
200 static int pfr_skip_table(struct pfr_table
*, struct pfr_ktable
*, int);
201 static struct pfr_kentry
*pfr_kentry_byidx(struct pfr_ktable
*, int, int);
203 RB_PROTOTYPE_SC(static, pfr_ktablehead
, pfr_ktable
, pfrkt_tree
,
205 RB_GENERATE(pfr_ktablehead
, pfr_ktable
, pfrkt_tree
, pfr_ktable_compare
);
207 static struct pfr_ktablehead pfr_ktables
;
208 static struct pfr_table pfr_nulltable
;
209 static int pfr_ktable_cnt
;
214 pool_init(&pfr_ktable_pl
, sizeof(struct pfr_ktable
), 0, 0, 0,
216 pool_init(&pfr_kentry_pl
, sizeof(struct pfr_kentry
), 0, 0, 0,
218 pool_init(&pfr_kentry_pl2
, sizeof(struct pfr_kentry
), 0, 0, 0,
221 pfr_sin
.sin_len
= sizeof(pfr_sin
);
222 pfr_sin
.sin_family
= AF_INET
;
223 pfr_sin6
.sin6_len
= sizeof(pfr_sin6
);
224 pfr_sin6
.sin6_family
= AF_INET6
;
226 memset(&pfr_ffaddr
, 0xff, sizeof(pfr_ffaddr
));
233 pool_destroy(&pfr_ktable_pl
);
234 pool_destroy(&pfr_kentry_pl
);
235 pool_destroy(&pfr_kentry_pl2
);
240 pfr_clr_addrs(struct pfr_table
*tbl
, int *ndel
, int flags
)
242 struct pfr_ktable
*kt
;
243 struct pfr_kentryworkq workq
;
245 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
);
246 if (pfr_validate_table(tbl
, 0, flags
& PFR_FLAG_USERIOCTL
)) {
249 kt
= pfr_lookup_table(tbl
);
250 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
253 if (kt
->pfrkt_flags
& PFR_TFLAG_CONST
) {
256 pfr_enqueue_addrs(kt
, &workq
, ndel
, 0);
258 if (!(flags
& PFR_FLAG_DUMMY
)) {
259 pfr_remove_kentries(kt
, &workq
);
261 printf("pfr_clr_addrs: corruption detected (%d).\n",
270 pfr_add_addrs(struct pfr_table
*tbl
, user_addr_t _addr
, int size
,
271 int *nadd
, int flags
)
273 struct pfr_ktable
*kt
, *tmpkt
;
274 struct pfr_kentryworkq workq
;
275 struct pfr_kentry
*p
, *q
;
278 user_addr_t addr
= _addr
;
279 u_int64_t tzero
= pf_calendar_time_second();
281 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
283 if (pfr_validate_table(tbl
, 0, flags
& PFR_FLAG_USERIOCTL
)) {
286 kt
= pfr_lookup_table(tbl
);
287 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
290 if (kt
->pfrkt_flags
& PFR_TFLAG_CONST
) {
293 tmpkt
= pfr_create_ktable(&pfr_nulltable
, 0, 0);
298 for (i
= 0; i
< size
; i
++, addr
+= sizeof(ad
)) {
299 if (COPYIN(addr
, &ad
, sizeof(ad
), flags
)) {
302 if (pfr_validate_addr(&ad
)) {
305 p
= pfr_lookup_addr(kt
, &ad
, 1);
306 q
= pfr_lookup_addr(tmpkt
, &ad
, 1);
307 if (flags
& PFR_FLAG_FEEDBACK
) {
309 ad
.pfra_fback
= PFR_FB_DUPLICATE
;
310 } else if (p
== NULL
) {
311 ad
.pfra_fback
= PFR_FB_ADDED
;
312 } else if (p
->pfrke_not
!= ad
.pfra_not
) {
313 ad
.pfra_fback
= PFR_FB_CONFLICT
;
315 ad
.pfra_fback
= PFR_FB_NONE
;
318 if (p
== NULL
&& q
== NULL
) {
319 p
= pfr_create_kentry(&ad
,
320 !(flags
& PFR_FLAG_USERIOCTL
));
324 if (pfr_route_kentry(tmpkt
, p
)) {
325 pfr_destroy_kentry(p
);
326 ad
.pfra_fback
= PFR_FB_NONE
;
328 SLIST_INSERT_HEAD(&workq
, p
, pfrke_workq
);
332 if (flags
& PFR_FLAG_FEEDBACK
) {
333 if (COPYOUT(&ad
, addr
, sizeof(ad
), flags
)) {
338 pfr_clean_node_mask(tmpkt
, &workq
);
339 if (!(flags
& PFR_FLAG_DUMMY
)) {
340 pfr_insert_kentries(kt
, &workq
, tzero
);
342 pfr_destroy_kentries(&workq
);
347 pfr_destroy_ktable(tmpkt
, 0);
350 pfr_clean_node_mask(tmpkt
, &workq
);
351 pfr_destroy_kentries(&workq
);
352 if (flags
& PFR_FLAG_FEEDBACK
) {
353 pfr_reset_feedback(_addr
, size
, flags
);
355 pfr_destroy_ktable(tmpkt
, 0);
360 pfr_del_addrs(struct pfr_table
*tbl
, user_addr_t _addr
, int size
,
361 int *ndel
, int flags
)
363 struct pfr_ktable
*kt
;
364 struct pfr_kentryworkq workq
;
365 struct pfr_kentry
*p
;
367 user_addr_t addr
= _addr
;
368 int i
, rv
, xdel
= 0, log
= 1;
370 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
372 if (pfr_validate_table(tbl
, 0, flags
& PFR_FLAG_USERIOCTL
)) {
375 kt
= pfr_lookup_table(tbl
);
376 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
379 if (kt
->pfrkt_flags
& PFR_TFLAG_CONST
) {
383 * there are two algorithms to choose from here.
385 * n: number of addresses to delete
386 * N: number of addresses in the table
388 * one is O(N) and is better for large 'n'
389 * one is O(n*LOG(N)) and is better for small 'n'
391 * following code try to decide which one is best.
393 for (i
= kt
->pfrkt_cnt
; i
> 0; i
>>= 1) {
396 if (size
> kt
->pfrkt_cnt
/ log
) {
397 /* full table scan */
400 /* iterate over addresses to delete */
401 for (i
= 0; i
< size
; i
++, addr
+= sizeof(ad
)) {
402 if (COPYIN(addr
, &ad
, sizeof(ad
), flags
)) {
405 if (pfr_validate_addr(&ad
)) {
408 p
= pfr_lookup_addr(kt
, &ad
, 1);
415 for (addr
= _addr
, i
= 0; i
< size
; i
++, addr
+= sizeof(ad
)) {
416 if (COPYIN(addr
, &ad
, sizeof(ad
), flags
)) {
419 if (pfr_validate_addr(&ad
)) {
422 p
= pfr_lookup_addr(kt
, &ad
, 1);
423 if (flags
& PFR_FLAG_FEEDBACK
) {
425 ad
.pfra_fback
= PFR_FB_NONE
;
426 } else if (p
->pfrke_not
!= ad
.pfra_not
) {
427 ad
.pfra_fback
= PFR_FB_CONFLICT
;
428 } else if (p
->pfrke_mark
) {
429 ad
.pfra_fback
= PFR_FB_DUPLICATE
;
431 ad
.pfra_fback
= PFR_FB_DELETED
;
434 if (p
!= NULL
&& p
->pfrke_not
== ad
.pfra_not
&&
437 SLIST_INSERT_HEAD(&workq
, p
, pfrke_workq
);
440 if (flags
& PFR_FLAG_FEEDBACK
) {
441 if (COPYOUT(&ad
, addr
, sizeof(ad
), flags
)) {
446 if (!(flags
& PFR_FLAG_DUMMY
)) {
447 pfr_remove_kentries(kt
, &workq
);
454 if (flags
& PFR_FLAG_FEEDBACK
) {
455 pfr_reset_feedback(_addr
, size
, flags
);
461 pfr_set_addrs(struct pfr_table
*tbl
, user_addr_t _addr
, int size
,
462 int *size2
, int *nadd
, int *ndel
, int *nchange
, int flags
,
463 u_int32_t ignore_pfrt_flags
)
465 struct pfr_ktable
*kt
, *tmpkt
;
466 struct pfr_kentryworkq addq
, delq
, changeq
;
467 struct pfr_kentry
*p
, *q
;
469 user_addr_t addr
= _addr
;
470 int i
, rv
, xadd
= 0, xdel
= 0, xchange
= 0;
471 u_int64_t tzero
= pf_calendar_time_second();
473 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
475 if (pfr_validate_table(tbl
, ignore_pfrt_flags
, flags
&
476 PFR_FLAG_USERIOCTL
)) {
479 kt
= pfr_lookup_table(tbl
);
480 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
483 if (kt
->pfrkt_flags
& PFR_TFLAG_CONST
) {
486 tmpkt
= pfr_create_ktable(&pfr_nulltable
, 0, 0);
493 SLIST_INIT(&changeq
);
494 for (i
= 0; i
< size
; i
++, addr
+= sizeof(ad
)) {
495 if (COPYIN(addr
, &ad
, sizeof(ad
), flags
)) {
498 if (pfr_validate_addr(&ad
)) {
501 ad
.pfra_fback
= PFR_FB_NONE
;
502 p
= pfr_lookup_addr(kt
, &ad
, 1);
505 ad
.pfra_fback
= PFR_FB_DUPLICATE
;
509 if (p
->pfrke_not
!= ad
.pfra_not
) {
510 SLIST_INSERT_HEAD(&changeq
, p
, pfrke_workq
);
511 ad
.pfra_fback
= PFR_FB_CHANGED
;
515 q
= pfr_lookup_addr(tmpkt
, &ad
, 1);
517 ad
.pfra_fback
= PFR_FB_DUPLICATE
;
520 p
= pfr_create_kentry(&ad
,
521 !(flags
& PFR_FLAG_USERIOCTL
));
525 if (pfr_route_kentry(tmpkt
, p
)) {
526 pfr_destroy_kentry(p
);
527 ad
.pfra_fback
= PFR_FB_NONE
;
529 SLIST_INSERT_HEAD(&addq
, p
, pfrke_workq
);
530 ad
.pfra_fback
= PFR_FB_ADDED
;
535 if (flags
& PFR_FLAG_FEEDBACK
) {
536 if (COPYOUT(&ad
, addr
, sizeof(ad
), flags
)) {
541 pfr_enqueue_addrs(kt
, &delq
, &xdel
, ENQUEUE_UNMARKED_ONLY
);
542 if ((flags
& PFR_FLAG_FEEDBACK
) && *size2
) {
543 if (*size2
< size
+ xdel
) {
544 *size2
= size
+ xdel
;
549 SLIST_FOREACH(p
, &delq
, pfrke_workq
) {
550 pfr_copyout_addr(&ad
, p
);
551 ad
.pfra_fback
= PFR_FB_DELETED
;
552 if (COPYOUT(&ad
, addr
, sizeof(ad
), flags
)) {
559 pfr_clean_node_mask(tmpkt
, &addq
);
560 if (!(flags
& PFR_FLAG_DUMMY
)) {
561 pfr_insert_kentries(kt
, &addq
, tzero
);
562 pfr_remove_kentries(kt
, &delq
);
563 pfr_clstats_kentries(&changeq
, tzero
, INVERT_NEG_FLAG
);
565 pfr_destroy_kentries(&addq
);
573 if (nchange
!= NULL
) {
576 if ((flags
& PFR_FLAG_FEEDBACK
) && size2
) {
577 *size2
= size
+ xdel
;
579 pfr_destroy_ktable(tmpkt
, 0);
582 pfr_clean_node_mask(tmpkt
, &addq
);
583 pfr_destroy_kentries(&addq
);
584 if (flags
& PFR_FLAG_FEEDBACK
) {
585 pfr_reset_feedback(_addr
, size
, flags
);
587 pfr_destroy_ktable(tmpkt
, 0);
592 pfr_tst_addrs(struct pfr_table
*tbl
, user_addr_t addr
, int size
,
593 int *nmatch
, int flags
)
595 struct pfr_ktable
*kt
;
596 struct pfr_kentry
*p
;
600 ACCEPT_FLAGS(flags
, PFR_FLAG_REPLACE
);
601 if (pfr_validate_table(tbl
, 0, 0)) {
604 kt
= pfr_lookup_table(tbl
);
605 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
609 for (i
= 0; i
< size
; i
++, addr
+= sizeof(ad
)) {
610 if (COPYIN(addr
, &ad
, sizeof(ad
), flags
)) {
613 if (pfr_validate_addr(&ad
)) {
616 if (ADDR_NETWORK(&ad
)) {
619 p
= pfr_lookup_addr(kt
, &ad
, 0);
620 if (flags
& PFR_FLAG_REPLACE
) {
621 pfr_copyout_addr(&ad
, p
);
623 ad
.pfra_fback
= (p
== NULL
) ? PFR_FB_NONE
:
624 (p
->pfrke_not
? PFR_FB_NOTMATCH
: PFR_FB_MATCH
);
625 if (p
!= NULL
&& !p
->pfrke_not
) {
628 if (COPYOUT(&ad
, addr
, sizeof(ad
), flags
)) {
632 if (nmatch
!= NULL
) {
639 pfr_get_addrs(struct pfr_table
*tbl
, user_addr_t addr
, int *size
,
642 struct pfr_ktable
*kt
;
643 struct pfr_walktree w
;
646 ACCEPT_FLAGS(flags
, 0);
647 if (pfr_validate_table(tbl
, 0, 0)) {
650 kt
= pfr_lookup_table(tbl
);
651 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
654 if (kt
->pfrkt_cnt
> *size
) {
655 *size
= kt
->pfrkt_cnt
;
659 bzero(&w
, sizeof(w
));
660 w
.pfrw_op
= PFRW_GET_ADDRS
;
662 w
.pfrw_free
= kt
->pfrkt_cnt
;
663 w
.pfrw_flags
= flags
;
664 rv
= kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
);
666 rv
= kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
,
674 printf("pfr_get_addrs: corruption detected (%d).\n",
678 *size
= kt
->pfrkt_cnt
;
683 pfr_get_astats(struct pfr_table
*tbl
, user_addr_t addr
, int *size
,
686 struct pfr_ktable
*kt
;
687 struct pfr_walktree w
;
688 struct pfr_kentryworkq workq
;
690 u_int64_t tzero
= pf_calendar_time_second();
692 /* XXX PFR_FLAG_CLSTATS disabled */
693 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
);
694 if (pfr_validate_table(tbl
, 0, 0)) {
697 kt
= pfr_lookup_table(tbl
);
698 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
701 if (kt
->pfrkt_cnt
> *size
) {
702 *size
= kt
->pfrkt_cnt
;
706 bzero(&w
, sizeof(w
));
707 w
.pfrw_op
= PFRW_GET_ASTATS
;
708 w
.pfrw_astats
= addr
;
709 w
.pfrw_free
= kt
->pfrkt_cnt
;
710 w
.pfrw_flags
= flags
;
711 rv
= kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
);
713 rv
= kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
,
716 if (!rv
&& (flags
& PFR_FLAG_CLSTATS
)) {
717 pfr_enqueue_addrs(kt
, &workq
, NULL
, 0);
718 pfr_clstats_kentries(&workq
, tzero
, 0);
725 printf("pfr_get_astats: corruption detected (%d).\n",
729 *size
= kt
->pfrkt_cnt
;
734 pfr_clr_astats(struct pfr_table
*tbl
, user_addr_t _addr
, int size
,
735 int *nzero
, int flags
)
737 struct pfr_ktable
*kt
;
738 struct pfr_kentryworkq workq
;
739 struct pfr_kentry
*p
;
741 user_addr_t addr
= _addr
;
742 int i
, rv
, xzero
= 0;
744 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
746 if (pfr_validate_table(tbl
, 0, 0)) {
749 kt
= pfr_lookup_table(tbl
);
750 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
754 for (i
= 0; i
< size
; i
++, addr
+= sizeof(ad
)) {
755 if (COPYIN(addr
, &ad
, sizeof(ad
), flags
)) {
758 if (pfr_validate_addr(&ad
)) {
761 p
= pfr_lookup_addr(kt
, &ad
, 1);
762 if (flags
& PFR_FLAG_FEEDBACK
) {
763 ad
.pfra_fback
= (p
!= NULL
) ?
764 PFR_FB_CLEARED
: PFR_FB_NONE
;
765 if (COPYOUT(&ad
, addr
, sizeof(ad
), flags
)) {
770 SLIST_INSERT_HEAD(&workq
, p
, pfrke_workq
);
775 if (!(flags
& PFR_FLAG_DUMMY
)) {
776 pfr_clstats_kentries(&workq
, 0, 0);
783 if (flags
& PFR_FLAG_FEEDBACK
) {
784 pfr_reset_feedback(_addr
, size
, flags
);
790 pfr_validate_addr(struct pfr_addr
*ad
)
794 switch (ad
->pfra_af
) {
797 if (ad
->pfra_net
> 32) {
803 if (ad
->pfra_net
> 128) {
810 if (ad
->pfra_net
< 128 &&
811 (((caddr_t
)ad
)[ad
->pfra_net
/ 8] & (0xFF >> (ad
->pfra_net
% 8)))) {
814 for (i
= (ad
->pfra_net
+ 7) / 8; i
< (int)sizeof(ad
->pfra_u
); i
++) {
815 if (((caddr_t
)ad
)[i
]) {
819 if (ad
->pfra_not
&& ad
->pfra_not
!= 1) {
822 if (ad
->pfra_fback
) {
829 pfr_enqueue_addrs(struct pfr_ktable
*kt
, struct pfr_kentryworkq
*workq
,
830 int *naddr
, int sweep
)
832 struct pfr_walktree w
;
835 bzero(&w
, sizeof(w
));
836 w
.pfrw_op
= sweep
? PFRW_SWEEP
: PFRW_ENQUEUE
;
837 w
.pfrw_workq
= workq
;
838 if (kt
->pfrkt_ip4
!= NULL
) {
839 if (kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
,
841 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
844 if (kt
->pfrkt_ip6
!= NULL
) {
845 if (kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
,
847 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
856 pfr_mark_addrs(struct pfr_ktable
*kt
)
858 struct pfr_walktree w
;
860 bzero(&w
, sizeof(w
));
861 w
.pfrw_op
= PFRW_MARK
;
862 if (kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
)) {
863 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
865 if (kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
, pfr_walktree
, &w
)) {
866 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
871 static struct pfr_kentry
*
872 pfr_lookup_addr(struct pfr_ktable
*kt
, struct pfr_addr
*ad
, int exact
)
874 union sockaddr_union sa
, mask
;
875 struct radix_node_head
*head
;
876 struct pfr_kentry
*ke
;
878 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
880 bzero(&sa
, sizeof(sa
));
881 if (ad
->pfra_af
== AF_INET
) {
882 FILLIN_SIN(sa
.sin
, ad
->pfra_ip4addr
);
883 head
= kt
->pfrkt_ip4
;
884 } else if (ad
->pfra_af
== AF_INET6
) {
885 FILLIN_SIN6(sa
.sin6
, ad
->pfra_ip6addr
);
886 head
= kt
->pfrkt_ip6
;
890 if (ADDR_NETWORK(ad
)) {
891 pfr_prepare_network(&mask
, ad
->pfra_af
, ad
->pfra_net
);
892 ke
= (struct pfr_kentry
*)rn_lookup(&sa
, &mask
, head
);
893 if (ke
&& KENTRY_RNF_ROOT(ke
)) {
897 ke
= (struct pfr_kentry
*)rn_match(&sa
, head
);
898 if (ke
&& KENTRY_RNF_ROOT(ke
)) {
901 if (exact
&& ke
&& KENTRY_NETWORK(ke
)) {
908 static struct pfr_kentry
*
909 pfr_create_kentry(struct pfr_addr
*ad
, boolean_t intr
)
911 struct pfr_kentry
*ke
;
914 ke
= pool_get(&pfr_kentry_pl2
, PR_WAITOK
);
916 ke
= pool_get(&pfr_kentry_pl
, PR_WAITOK
);
921 bzero(ke
, sizeof(*ke
));
923 if (ad
->pfra_af
== AF_INET
) {
924 FILLIN_SIN(ke
->pfrke_sa
.sin
, ad
->pfra_ip4addr
);
925 } else if (ad
->pfra_af
== AF_INET6
) {
926 FILLIN_SIN6(ke
->pfrke_sa
.sin6
, ad
->pfra_ip6addr
);
928 ke
->pfrke_af
= ad
->pfra_af
;
929 ke
->pfrke_net
= ad
->pfra_net
;
930 ke
->pfrke_not
= ad
->pfra_not
;
931 ke
->pfrke_intrpool
= (u_int8_t
)intr
;
936 pfr_destroy_kentries(struct pfr_kentryworkq
*workq
)
938 struct pfr_kentry
*p
, *q
;
940 for (p
= SLIST_FIRST(workq
); p
!= NULL
; p
= q
) {
941 q
= SLIST_NEXT(p
, pfrke_workq
);
942 pfr_destroy_kentry(p
);
947 pfr_destroy_kentry(struct pfr_kentry
*ke
)
949 if (ke
->pfrke_intrpool
) {
950 pool_put(&pfr_kentry_pl2
, ke
);
952 pool_put(&pfr_kentry_pl
, ke
);
957 pfr_insert_kentries(struct pfr_ktable
*kt
,
958 struct pfr_kentryworkq
*workq
, u_int64_t tzero
)
960 struct pfr_kentry
*p
;
963 SLIST_FOREACH(p
, workq
, pfrke_workq
) {
964 rv
= pfr_route_kentry(kt
, p
);
966 printf("pfr_insert_kentries: cannot route entry "
970 p
->pfrke_tzero
= tzero
;
977 pfr_insert_kentry(struct pfr_ktable
*kt
, struct pfr_addr
*ad
, u_int64_t tzero
)
979 struct pfr_kentry
*p
;
982 p
= pfr_lookup_addr(kt
, ad
, 1);
986 p
= pfr_create_kentry(ad
, TRUE
);
991 rv
= pfr_route_kentry(kt
, p
);
996 p
->pfrke_tzero
= tzero
;
1003 pfr_remove_kentries(struct pfr_ktable
*kt
,
1004 struct pfr_kentryworkq
*workq
)
1006 struct pfr_kentry
*p
;
1009 SLIST_FOREACH(p
, workq
, pfrke_workq
) {
1010 pfr_unroute_kentry(kt
, p
);
1014 pfr_destroy_kentries(workq
);
1018 pfr_clean_node_mask(struct pfr_ktable
*kt
,
1019 struct pfr_kentryworkq
*workq
)
1021 struct pfr_kentry
*p
;
1023 SLIST_FOREACH(p
, workq
, pfrke_workq
)
1024 pfr_unroute_kentry(kt
, p
);
1028 pfr_clstats_kentries(struct pfr_kentryworkq
*workq
, u_int64_t tzero
,
1031 struct pfr_kentry
*p
;
1033 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1035 SLIST_FOREACH(p
, workq
, pfrke_workq
) {
1037 p
->pfrke_not
= !p
->pfrke_not
;
1039 bzero(p
->pfrke_packets
, sizeof(p
->pfrke_packets
));
1040 bzero(p
->pfrke_bytes
, sizeof(p
->pfrke_bytes
));
1041 p
->pfrke_tzero
= tzero
;
1046 pfr_reset_feedback(user_addr_t addr
, int size
, int flags
)
1051 for (i
= 0; i
< size
; i
++, addr
+= sizeof(ad
)) {
1052 if (COPYIN(addr
, &ad
, sizeof(ad
), flags
)) {
1055 ad
.pfra_fback
= PFR_FB_NONE
;
1056 if (COPYOUT(&ad
, addr
, sizeof(ad
), flags
)) {
1063 pfr_prepare_network(union sockaddr_union
*sa
, int af
, int net
)
1067 bzero(sa
, sizeof(*sa
));
1068 if (af
== AF_INET
) {
1069 sa
->sin
.sin_len
= sizeof(sa
->sin
);
1070 sa
->sin
.sin_family
= AF_INET
;
1071 sa
->sin
.sin_addr
.s_addr
= net
? htonl(-1 << (32 - net
)) : 0;
1072 } else if (af
== AF_INET6
) {
1073 sa
->sin6
.sin6_len
= sizeof(sa
->sin6
);
1074 sa
->sin6
.sin6_family
= AF_INET6
;
1075 for (i
= 0; i
< 4; i
++) {
1077 sa
->sin6
.sin6_addr
.s6_addr32
[i
] =
1078 net
? htonl(-1 << (32 - net
)) : 0;
1081 sa
->sin6
.sin6_addr
.s6_addr32
[i
] = 0xFFFFFFFF;
1088 pfr_route_kentry(struct pfr_ktable
*kt
, struct pfr_kentry
*ke
)
1090 union sockaddr_union mask
;
1091 struct radix_node
*rn
;
1092 struct radix_node_head
*head
;
1094 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1096 bzero(ke
->pfrke_node
, sizeof(ke
->pfrke_node
));
1097 if (ke
->pfrke_af
== AF_INET
) {
1098 head
= kt
->pfrkt_ip4
;
1099 } else if (ke
->pfrke_af
== AF_INET6
) {
1100 head
= kt
->pfrkt_ip6
;
1105 if (KENTRY_NETWORK(ke
)) {
1106 pfr_prepare_network(&mask
, ke
->pfrke_af
, ke
->pfrke_net
);
1107 rn
= rn_addroute(&ke
->pfrke_sa
, &mask
, head
, ke
->pfrke_node
);
1109 rn
= rn_addroute(&ke
->pfrke_sa
, NULL
, head
, ke
->pfrke_node
);
1112 return rn
== NULL
? -1 : 0;
1116 pfr_unroute_kentry(struct pfr_ktable
*kt
, struct pfr_kentry
*ke
)
1118 union sockaddr_union mask
;
1119 struct radix_node
*rn
;
1120 struct radix_node_head
*head
;
1122 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1124 if (ke
->pfrke_af
== AF_INET
) {
1125 head
= kt
->pfrkt_ip4
;
1126 } else if (ke
->pfrke_af
== AF_INET6
) {
1127 head
= kt
->pfrkt_ip6
;
1132 if (KENTRY_NETWORK(ke
)) {
1133 pfr_prepare_network(&mask
, ke
->pfrke_af
, ke
->pfrke_net
);
1134 rn
= rn_delete(&ke
->pfrke_sa
, &mask
, head
);
1136 rn
= rn_delete(&ke
->pfrke_sa
, NULL
, head
);
1140 printf("pfr_unroute_kentry: delete failed.\n");
1147 pfr_copyout_addr(struct pfr_addr
*ad
, struct pfr_kentry
*ke
)
1149 bzero(ad
, sizeof(*ad
));
1153 ad
->pfra_af
= ke
->pfrke_af
;
1154 ad
->pfra_net
= ke
->pfrke_net
;
1155 ad
->pfra_not
= ke
->pfrke_not
;
1156 if (ad
->pfra_af
== AF_INET
) {
1157 ad
->pfra_ip4addr
= ke
->pfrke_sa
.sin
.sin_addr
;
1158 } else if (ad
->pfra_af
== AF_INET6
) {
1159 ad
->pfra_ip6addr
= ke
->pfrke_sa
.sin6
.sin6_addr
;
1164 pfr_walktree(struct radix_node
*rn
, void *arg
)
1166 struct pfr_kentry
*ke
= (struct pfr_kentry
*)rn
;
1167 struct pfr_walktree
*w
= arg
;
1168 int flags
= w
->pfrw_flags
;
1170 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1172 switch (w
->pfrw_op
) {
1177 if (ke
->pfrke_mark
) {
1182 SLIST_INSERT_HEAD(w
->pfrw_workq
, ke
, pfrke_workq
);
1185 case PFRW_GET_ADDRS
:
1186 if (w
->pfrw_free
-- > 0) {
1189 pfr_copyout_addr(&ad
, ke
);
1190 if (copyout(&ad
, w
->pfrw_addr
, sizeof(ad
))) {
1193 w
->pfrw_addr
+= sizeof(ad
);
1196 case PFRW_GET_ASTATS
:
1197 if (w
->pfrw_free
-- > 0) {
1198 struct pfr_astats as
;
1200 bzero(&as
, sizeof(as
));
1202 pfr_copyout_addr(&as
.pfras_a
, ke
);
1204 bcopy(ke
->pfrke_packets
, as
.pfras_packets
,
1205 sizeof(as
.pfras_packets
));
1206 bcopy(ke
->pfrke_bytes
, as
.pfras_bytes
,
1207 sizeof(as
.pfras_bytes
));
1208 as
.pfras_tzero
= ke
->pfrke_tzero
;
1210 if (COPYOUT(&as
, w
->pfrw_astats
, sizeof(as
), flags
)) {
1213 w
->pfrw_astats
+= sizeof(as
);
1217 if (ke
->pfrke_not
) {
1218 break; /* negative entries are ignored */
1220 if (!w
->pfrw_cnt
--) {
1221 w
->pfrw_kentry
= ke
;
1222 return 1; /* finish search */
1225 case PFRW_DYNADDR_UPDATE
:
1226 if (ke
->pfrke_af
== AF_INET
) {
1227 if (w
->pfrw_dyn
->pfid_acnt4
++ > 0) {
1230 pfr_prepare_network(&pfr_mask
, AF_INET
, ke
->pfrke_net
);
1231 w
->pfrw_dyn
->pfid_addr4
= *SUNION2PF(
1232 &ke
->pfrke_sa
, AF_INET
);
1233 w
->pfrw_dyn
->pfid_mask4
= *SUNION2PF(
1234 &pfr_mask
, AF_INET
);
1235 } else if (ke
->pfrke_af
== AF_INET6
) {
1236 if (w
->pfrw_dyn
->pfid_acnt6
++ > 0) {
1239 pfr_prepare_network(&pfr_mask
, AF_INET6
, ke
->pfrke_net
);
1240 w
->pfrw_dyn
->pfid_addr6
= *SUNION2PF(
1241 &ke
->pfrke_sa
, AF_INET6
);
1242 w
->pfrw_dyn
->pfid_mask6
= *SUNION2PF(
1243 &pfr_mask
, AF_INET6
);
1251 pfr_clr_tables(struct pfr_table
*filter
, int *ndel
, int flags
)
1253 struct pfr_ktableworkq workq
;
1254 struct pfr_ktable
*p
;
1257 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1259 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
1261 if (pfr_fix_anchor(filter
->pfrt_anchor
)) {
1264 if (pfr_table_count(filter
, flags
) < 0) {
1269 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1270 if (pfr_skip_table(filter
, p
, flags
)) {
1273 if (strcmp(p
->pfrkt_anchor
, PF_RESERVED_ANCHOR
) == 0) {
1276 if (!(p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
1279 p
->pfrkt_nflags
= p
->pfrkt_flags
& ~PFR_TFLAG_ACTIVE
;
1280 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1283 if (!(flags
& PFR_FLAG_DUMMY
)) {
1284 pfr_setflags_ktables(&workq
);
1293 pfr_add_tables(user_addr_t tbl
, int size
, int *nadd
, int flags
)
1295 struct pfr_ktableworkq addq
, changeq
;
1296 struct pfr_ktable
*p
, *q
, *r
, key
;
1297 int i
, rv
, xadd
= 0;
1298 u_int64_t tzero
= pf_calendar_time_second();
1300 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1302 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
);
1304 SLIST_INIT(&changeq
);
1305 for (i
= 0; i
< size
; i
++, tbl
+= sizeof(key
.pfrkt_t
)) {
1306 if (COPYIN(tbl
, &key
.pfrkt_t
, sizeof(key
.pfrkt_t
), flags
)) {
1309 pfr_table_copyin_cleanup(&key
.pfrkt_t
);
1310 if (pfr_validate_table(&key
.pfrkt_t
, PFR_TFLAG_USRMASK
,
1311 flags
& PFR_FLAG_USERIOCTL
)) {
1314 key
.pfrkt_flags
|= PFR_TFLAG_ACTIVE
;
1315 p
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1317 p
= pfr_create_ktable(&key
.pfrkt_t
, tzero
, 1);
1321 SLIST_FOREACH(q
, &addq
, pfrkt_workq
) {
1322 if (!pfr_ktable_compare(p
, q
)) {
1323 pfr_destroy_ktable(p
, 0);
1327 SLIST_INSERT_HEAD(&addq
, p
, pfrkt_workq
);
1329 if (!key
.pfrkt_anchor
[0]) {
1333 /* find or create root table */
1334 bzero(key
.pfrkt_anchor
, sizeof(key
.pfrkt_anchor
));
1335 r
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1340 SLIST_FOREACH(q
, &addq
, pfrkt_workq
) {
1341 if (!pfr_ktable_compare(&key
, q
)) {
1346 key
.pfrkt_flags
= 0;
1347 r
= pfr_create_ktable(&key
.pfrkt_t
, 0, 1);
1351 SLIST_INSERT_HEAD(&addq
, r
, pfrkt_workq
);
1353 } else if (!(p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
1354 SLIST_FOREACH(q
, &changeq
, pfrkt_workq
)
1355 if (!pfr_ktable_compare(&key
, q
)) {
1358 p
->pfrkt_nflags
= (p
->pfrkt_flags
&
1359 ~PFR_TFLAG_USRMASK
) | key
.pfrkt_flags
;
1360 SLIST_INSERT_HEAD(&changeq
, p
, pfrkt_workq
);
1366 if (!(flags
& PFR_FLAG_DUMMY
)) {
1367 pfr_insert_ktables(&addq
);
1368 pfr_setflags_ktables(&changeq
);
1370 pfr_destroy_ktables(&addq
, 0);
1377 pfr_destroy_ktables(&addq
, 0);
1382 pfr_del_tables(user_addr_t tbl
, int size
, int *ndel
, int flags
)
1384 struct pfr_ktableworkq workq
;
1385 struct pfr_ktable
*p
, *q
, key
;
1388 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1390 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
);
1392 for (i
= 0; i
< size
; i
++, tbl
+= sizeof(key
.pfrkt_t
)) {
1393 if (COPYIN(tbl
, &key
.pfrkt_t
, sizeof(key
.pfrkt_t
), flags
)) {
1396 pfr_table_copyin_cleanup(&key
.pfrkt_t
);
1397 if (pfr_validate_table(&key
.pfrkt_t
, 0,
1398 flags
& PFR_FLAG_USERIOCTL
)) {
1401 p
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1402 if (p
!= NULL
&& (p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
1403 SLIST_FOREACH(q
, &workq
, pfrkt_workq
)
1404 if (!pfr_ktable_compare(p
, q
)) {
1407 p
->pfrkt_nflags
= p
->pfrkt_flags
& ~PFR_TFLAG_ACTIVE
;
1408 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1415 if (!(flags
& PFR_FLAG_DUMMY
)) {
1416 pfr_setflags_ktables(&workq
);
1425 pfr_get_tables(struct pfr_table
*filter
, user_addr_t tbl
, int *size
,
1428 struct pfr_ktable
*p
;
1431 ACCEPT_FLAGS(flags
, PFR_FLAG_ALLRSETS
);
1432 if (pfr_fix_anchor(filter
->pfrt_anchor
)) {
1435 n
= nn
= pfr_table_count(filter
, flags
);
1443 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1444 if (pfr_skip_table(filter
, p
, flags
)) {
1450 if (COPYOUT(&p
->pfrkt_t
, tbl
, sizeof(p
->pfrkt_t
), flags
)) {
1453 tbl
+= sizeof(p
->pfrkt_t
);
1456 printf("pfr_get_tables: corruption detected (%d).\n", n
);
1464 pfr_get_tstats(struct pfr_table
*filter
, user_addr_t tbl
, int *size
,
1467 struct pfr_ktable
*p
;
1468 struct pfr_ktableworkq workq
;
1470 u_int64_t tzero
= pf_calendar_time_second();
1472 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1474 /* XXX PFR_FLAG_CLSTATS disabled */
1475 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_ALLRSETS
);
1476 if (pfr_fix_anchor(filter
->pfrt_anchor
)) {
1479 n
= nn
= pfr_table_count(filter
, flags
);
1488 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1489 if (pfr_skip_table(filter
, p
, flags
)) {
1495 if (COPYOUT(&p
->pfrkt_ts
, tbl
, sizeof(p
->pfrkt_ts
), flags
)) {
1498 tbl
+= sizeof(p
->pfrkt_ts
);
1499 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1501 if (flags
& PFR_FLAG_CLSTATS
) {
1502 pfr_clstats_ktables(&workq
, tzero
,
1503 flags
& PFR_FLAG_ADDRSTOO
);
1506 printf("pfr_get_tstats: corruption detected (%d).\n", n
);
1514 pfr_clr_tstats(user_addr_t tbl
, int size
, int *nzero
, int flags
)
1516 struct pfr_ktableworkq workq
;
1517 struct pfr_ktable
*p
, key
;
1519 u_int64_t tzero
= pf_calendar_time_second();
1521 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1523 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
1526 for (i
= 0; i
< size
; i
++, tbl
+= sizeof(key
.pfrkt_t
)) {
1527 if (COPYIN(tbl
, &key
.pfrkt_t
, sizeof(key
.pfrkt_t
), flags
)) {
1530 pfr_table_copyin_cleanup(&key
.pfrkt_t
);
1531 if (pfr_validate_table(&key
.pfrkt_t
, 0, 0)) {
1534 p
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1536 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1540 if (!(flags
& PFR_FLAG_DUMMY
)) {
1541 pfr_clstats_ktables(&workq
, tzero
, flags
& PFR_FLAG_ADDRSTOO
);
1543 if (nzero
!= NULL
) {
1550 pfr_set_tflags(user_addr_t tbl
, int size
, int setflag
, int clrflag
,
1551 int *nchange
, int *ndel
, int flags
)
1553 struct pfr_ktableworkq workq
;
1554 struct pfr_ktable
*p
, *q
, key
;
1555 int i
, xchange
= 0, xdel
= 0;
1557 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1559 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
);
1560 if ((setflag
& ~PFR_TFLAG_USRMASK
) ||
1561 (clrflag
& ~PFR_TFLAG_USRMASK
) ||
1562 (setflag
& clrflag
)) {
1566 for (i
= 0; i
< size
; i
++, tbl
+= sizeof(key
.pfrkt_t
)) {
1567 if (COPYIN(tbl
, &key
.pfrkt_t
, sizeof(key
.pfrkt_t
), flags
)) {
1570 pfr_table_copyin_cleanup(&key
.pfrkt_t
);
1571 if (pfr_validate_table(&key
.pfrkt_t
, 0,
1572 flags
& PFR_FLAG_USERIOCTL
)) {
1575 p
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1576 if (p
!= NULL
&& (p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
1577 p
->pfrkt_nflags
= (p
->pfrkt_flags
| setflag
) &
1579 if (p
->pfrkt_nflags
== p
->pfrkt_flags
) {
1582 SLIST_FOREACH(q
, &workq
, pfrkt_workq
)
1583 if (!pfr_ktable_compare(p
, q
)) {
1586 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1587 if ((p
->pfrkt_flags
& PFR_TFLAG_PERSIST
) &&
1588 (clrflag
& PFR_TFLAG_PERSIST
) &&
1589 !(p
->pfrkt_flags
& PFR_TFLAG_REFERENCED
)) {
1598 if (!(flags
& PFR_FLAG_DUMMY
)) {
1599 pfr_setflags_ktables(&workq
);
1601 if (nchange
!= NULL
) {
1611 pfr_ina_begin(struct pfr_table
*trs
, u_int32_t
*ticket
, int *ndel
, int flags
)
1613 struct pfr_ktableworkq workq
;
1614 struct pfr_ktable
*p
;
1615 struct pf_ruleset
*rs
;
1618 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1620 ACCEPT_FLAGS(flags
, PFR_FLAG_DUMMY
);
1621 rs
= pf_find_or_create_ruleset(trs
->pfrt_anchor
);
1626 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1627 if (!(p
->pfrkt_flags
& PFR_TFLAG_INACTIVE
) ||
1628 pfr_skip_table(trs
, p
, 0)) {
1631 p
->pfrkt_nflags
= p
->pfrkt_flags
& ~PFR_TFLAG_INACTIVE
;
1632 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1635 if (!(flags
& PFR_FLAG_DUMMY
)) {
1636 pfr_setflags_ktables(&workq
);
1637 if (ticket
!= NULL
) {
1638 *ticket
= ++rs
->tticket
;
1642 pf_remove_if_empty_ruleset(rs
);
1651 pfr_ina_define(struct pfr_table
*tbl
, user_addr_t addr
, int size
,
1652 int *nadd
, int *naddr
, u_int32_t ticket
, int flags
)
1654 struct pfr_ktableworkq tableq
;
1655 struct pfr_kentryworkq addrq
;
1656 struct pfr_ktable
*kt
, *rt
, *shadow
, key
;
1657 struct pfr_kentry
*p
;
1659 struct pf_ruleset
*rs
;
1660 int i
, rv
, xadd
= 0, xaddr
= 0;
1662 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1664 ACCEPT_FLAGS(flags
, PFR_FLAG_DUMMY
| PFR_FLAG_ADDRSTOO
);
1665 if (size
&& !(flags
& PFR_FLAG_ADDRSTOO
)) {
1668 if (pfr_validate_table(tbl
, PFR_TFLAG_USRMASK
,
1669 flags
& PFR_FLAG_USERIOCTL
)) {
1672 rs
= pf_find_ruleset(tbl
->pfrt_anchor
);
1673 if (rs
== NULL
|| !rs
->topen
|| ticket
!= rs
->tticket
) {
1676 tbl
->pfrt_flags
|= PFR_TFLAG_INACTIVE
;
1677 SLIST_INIT(&tableq
);
1678 kt
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, (struct pfr_ktable
*)(void *)tbl
);
1680 kt
= pfr_create_ktable(tbl
, 0, 1);
1684 SLIST_INSERT_HEAD(&tableq
, kt
, pfrkt_workq
);
1686 if (!tbl
->pfrt_anchor
[0]) {
1690 /* find or create root table */
1691 bzero(&key
, sizeof(key
));
1692 strlcpy(key
.pfrkt_name
, tbl
->pfrt_name
,
1693 sizeof(key
.pfrkt_name
));
1694 rt
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1696 kt
->pfrkt_root
= rt
;
1699 rt
= pfr_create_ktable(&key
.pfrkt_t
, 0, 1);
1701 pfr_destroy_ktables(&tableq
, 0);
1704 SLIST_INSERT_HEAD(&tableq
, rt
, pfrkt_workq
);
1705 kt
->pfrkt_root
= rt
;
1706 } else if (!(kt
->pfrkt_flags
& PFR_TFLAG_INACTIVE
)) {
1710 shadow
= pfr_create_ktable(tbl
, 0, 0);
1711 if (shadow
== NULL
) {
1712 pfr_destroy_ktables(&tableq
, 0);
1716 for (i
= 0; i
< size
; i
++, addr
+= sizeof(ad
)) {
1717 if (COPYIN(addr
, &ad
, sizeof(ad
), flags
)) {
1720 if (pfr_validate_addr(&ad
)) {
1723 if (pfr_lookup_addr(shadow
, &ad
, 1) != NULL
) {
1726 p
= pfr_create_kentry(&ad
, FALSE
);
1730 if (pfr_route_kentry(shadow
, p
)) {
1731 pfr_destroy_kentry(p
);
1734 SLIST_INSERT_HEAD(&addrq
, p
, pfrke_workq
);
1737 if (!(flags
& PFR_FLAG_DUMMY
)) {
1738 if (kt
->pfrkt_shadow
!= NULL
) {
1739 pfr_destroy_ktable(kt
->pfrkt_shadow
, 1);
1741 kt
->pfrkt_flags
|= PFR_TFLAG_INACTIVE
;
1742 pfr_insert_ktables(&tableq
);
1743 shadow
->pfrkt_cnt
= (flags
& PFR_FLAG_ADDRSTOO
) ?
1744 xaddr
: NO_ADDRESSES
;
1745 kt
->pfrkt_shadow
= shadow
;
1747 pfr_clean_node_mask(shadow
, &addrq
);
1748 pfr_destroy_ktable(shadow
, 0);
1749 pfr_destroy_ktables(&tableq
, 0);
1750 pfr_destroy_kentries(&addrq
);
1755 if (naddr
!= NULL
) {
1760 pfr_destroy_ktable(shadow
, 0);
1761 pfr_destroy_ktables(&tableq
, 0);
1762 pfr_destroy_kentries(&addrq
);
1767 pfr_ina_rollback(struct pfr_table
*trs
, u_int32_t ticket
, int *ndel
, int flags
)
1769 struct pfr_ktableworkq workq
;
1770 struct pfr_ktable
*p
;
1771 struct pf_ruleset
*rs
;
1774 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1776 ACCEPT_FLAGS(flags
, PFR_FLAG_DUMMY
);
1777 rs
= pf_find_ruleset(trs
->pfrt_anchor
);
1778 if (rs
== NULL
|| !rs
->topen
|| ticket
!= rs
->tticket
) {
1782 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1783 if (!(p
->pfrkt_flags
& PFR_TFLAG_INACTIVE
) ||
1784 pfr_skip_table(trs
, p
, 0)) {
1787 p
->pfrkt_nflags
= p
->pfrkt_flags
& ~PFR_TFLAG_INACTIVE
;
1788 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1791 if (!(flags
& PFR_FLAG_DUMMY
)) {
1792 pfr_setflags_ktables(&workq
);
1794 pf_remove_if_empty_ruleset(rs
);
1803 pfr_ina_commit(struct pfr_table
*trs
, u_int32_t ticket
, int *nadd
,
1804 int *nchange
, int flags
)
1806 struct pfr_ktable
*p
, *q
;
1807 struct pfr_ktableworkq workq
;
1808 struct pf_ruleset
*rs
;
1809 int xadd
= 0, xchange
= 0;
1810 u_int64_t tzero
= pf_calendar_time_second();
1812 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1814 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
);
1815 rs
= pf_find_ruleset(trs
->pfrt_anchor
);
1816 if (rs
== NULL
|| !rs
->topen
|| ticket
!= rs
->tticket
) {
1821 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1822 if (!(p
->pfrkt_flags
& PFR_TFLAG_INACTIVE
) ||
1823 pfr_skip_table(trs
, p
, 0)) {
1826 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1827 if (p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) {
1834 if (!(flags
& PFR_FLAG_DUMMY
)) {
1835 for (p
= SLIST_FIRST(&workq
); p
!= NULL
; p
= q
) {
1836 q
= SLIST_NEXT(p
, pfrkt_workq
);
1837 pfr_commit_ktable(p
, tzero
);
1840 pf_remove_if_empty_ruleset(rs
);
1845 if (nchange
!= NULL
) {
1853 pfr_commit_ktable(struct pfr_ktable
*kt
, u_int64_t tzero
)
1855 struct pfr_ktable
*shadow
= kt
->pfrkt_shadow
;
1858 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1860 if (shadow
->pfrkt_cnt
== NO_ADDRESSES
) {
1861 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
1862 pfr_clstats_ktable(kt
, tzero
, 1);
1864 } else if (kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) {
1865 /* kt might contain addresses */
1866 struct pfr_kentryworkq addrq
, addq
, changeq
, delq
, garbageq
;
1867 struct pfr_kentry
*p
, *q
, *next
;
1870 pfr_enqueue_addrs(shadow
, &addrq
, NULL
, 0);
1873 SLIST_INIT(&changeq
);
1875 SLIST_INIT(&garbageq
);
1876 pfr_clean_node_mask(shadow
, &addrq
);
1877 for (p
= SLIST_FIRST(&addrq
); p
!= NULL
; p
= next
) {
1878 next
= SLIST_NEXT(p
, pfrke_workq
); /* XXX */
1879 pfr_copyout_addr(&ad
, p
);
1880 q
= pfr_lookup_addr(kt
, &ad
, 1);
1882 if (q
->pfrke_not
!= p
->pfrke_not
) {
1883 SLIST_INSERT_HEAD(&changeq
, q
,
1887 SLIST_INSERT_HEAD(&garbageq
, p
, pfrke_workq
);
1889 p
->pfrke_tzero
= tzero
;
1890 SLIST_INSERT_HEAD(&addq
, p
, pfrke_workq
);
1893 pfr_enqueue_addrs(kt
, &delq
, NULL
, ENQUEUE_UNMARKED_ONLY
);
1894 pfr_insert_kentries(kt
, &addq
, tzero
);
1895 pfr_remove_kentries(kt
, &delq
);
1896 pfr_clstats_kentries(&changeq
, tzero
, INVERT_NEG_FLAG
);
1897 pfr_destroy_kentries(&garbageq
);
1899 /* kt cannot contain addresses */
1900 SWAP(struct radix_node_head
*, kt
->pfrkt_ip4
,
1902 SWAP(struct radix_node_head
*, kt
->pfrkt_ip6
,
1904 SWAP(int, kt
->pfrkt_cnt
, shadow
->pfrkt_cnt
);
1905 pfr_clstats_ktable(kt
, tzero
, 1);
1907 nflags
= ((shadow
->pfrkt_flags
& PFR_TFLAG_USRMASK
) |
1908 (kt
->pfrkt_flags
& PFR_TFLAG_SETMASK
) | PFR_TFLAG_ACTIVE
) &
1909 ~PFR_TFLAG_INACTIVE
;
1910 pfr_destroy_ktable(shadow
, 0);
1911 kt
->pfrkt_shadow
= NULL
;
1912 pfr_setflags_ktable(kt
, nflags
);
1916 pfr_table_copyin_cleanup(struct pfr_table
*tbl
)
1918 tbl
->pfrt_anchor
[sizeof(tbl
->pfrt_anchor
) - 1] = '\0';
1919 tbl
->pfrt_name
[sizeof(tbl
->pfrt_name
) - 1] = '\0';
1923 pfr_validate_table(struct pfr_table
*tbl
, int allowedflags
, int no_reserved
)
1927 if (!tbl
->pfrt_name
[0]) {
1930 if (no_reserved
&& strcmp(tbl
->pfrt_anchor
, PF_RESERVED_ANCHOR
) == 0) {
1933 if (tbl
->pfrt_name
[PF_TABLE_NAME_SIZE
- 1]) {
1936 for (i
= strlen(tbl
->pfrt_name
); i
< PF_TABLE_NAME_SIZE
; i
++) {
1937 if (tbl
->pfrt_name
[i
]) {
1941 if (pfr_fix_anchor(tbl
->pfrt_anchor
)) {
1944 if (tbl
->pfrt_flags
& ~allowedflags
) {
1951 * Rewrite anchors referenced by tables to remove slashes
1952 * and check for validity.
1955 pfr_fix_anchor(char *anchor
)
1957 size_t siz
= MAXPATHLEN
;
1960 if (anchor
[0] == '/') {
1966 while (*++path
== '/') {
1969 bcopy(path
, anchor
, siz
- off
);
1970 memset(anchor
+ siz
- off
, 0, off
);
1972 if (anchor
[siz
- 1]) {
1975 for (i
= strlen(anchor
); i
< siz
; i
++) {
1984 pfr_table_count(struct pfr_table
*filter
, int flags
)
1986 struct pf_ruleset
*rs
;
1988 if (flags
& PFR_FLAG_ALLRSETS
) {
1989 return pfr_ktable_cnt
;
1991 if (filter
->pfrt_anchor
[0]) {
1992 rs
= pf_find_ruleset(filter
->pfrt_anchor
);
1993 return (rs
!= NULL
) ? rs
->tables
: -1;
1995 return pf_main_ruleset
.tables
;
1999 pfr_skip_table(struct pfr_table
*filter
, struct pfr_ktable
*kt
, int flags
)
2001 if (flags
& PFR_FLAG_ALLRSETS
) {
2004 if (strcmp(filter
->pfrt_anchor
, kt
->pfrkt_anchor
)) {
2011 pfr_insert_ktables(struct pfr_ktableworkq
*workq
)
2013 struct pfr_ktable
*p
;
2015 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2017 SLIST_FOREACH(p
, workq
, pfrkt_workq
)
2018 pfr_insert_ktable(p
);
2022 pfr_insert_ktable(struct pfr_ktable
*kt
)
2024 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2026 RB_INSERT(pfr_ktablehead
, &pfr_ktables
, kt
);
2028 if (kt
->pfrkt_root
!= NULL
) {
2029 if (!kt
->pfrkt_root
->pfrkt_refcnt
[PFR_REFCNT_ANCHOR
]++) {
2030 pfr_setflags_ktable(kt
->pfrkt_root
,
2031 kt
->pfrkt_root
->pfrkt_flags
| PFR_TFLAG_REFDANCHOR
);
2037 pfr_setflags_ktables(struct pfr_ktableworkq
*workq
)
2039 struct pfr_ktable
*p
, *q
;
2041 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2043 for (p
= SLIST_FIRST(workq
); p
; p
= q
) {
2044 q
= SLIST_NEXT(p
, pfrkt_workq
);
2045 pfr_setflags_ktable(p
, p
->pfrkt_nflags
);
2050 pfr_setflags_ktable(struct pfr_ktable
*kt
, int newf
)
2052 struct pfr_kentryworkq addrq
;
2054 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2056 if (!(newf
& PFR_TFLAG_REFERENCED
) &&
2057 !(newf
& PFR_TFLAG_REFDANCHOR
) &&
2058 !(newf
& PFR_TFLAG_PERSIST
)) {
2059 newf
&= ~PFR_TFLAG_ACTIVE
;
2061 if (!(newf
& PFR_TFLAG_ACTIVE
)) {
2062 newf
&= ~PFR_TFLAG_USRMASK
;
2064 if (!(newf
& PFR_TFLAG_SETMASK
)) {
2065 RB_REMOVE(pfr_ktablehead
, &pfr_ktables
, kt
);
2066 if (kt
->pfrkt_root
!= NULL
) {
2067 if (!--kt
->pfrkt_root
->pfrkt_refcnt
[PFR_REFCNT_ANCHOR
]) {
2068 pfr_setflags_ktable(kt
->pfrkt_root
,
2069 kt
->pfrkt_root
->pfrkt_flags
&
2070 ~PFR_TFLAG_REFDANCHOR
);
2073 pfr_destroy_ktable(kt
, 1);
2077 if (!(newf
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_cnt
) {
2078 pfr_enqueue_addrs(kt
, &addrq
, NULL
, 0);
2079 pfr_remove_kentries(kt
, &addrq
);
2081 if (!(newf
& PFR_TFLAG_INACTIVE
) && kt
->pfrkt_shadow
!= NULL
) {
2082 pfr_destroy_ktable(kt
->pfrkt_shadow
, 1);
2083 kt
->pfrkt_shadow
= NULL
;
2085 kt
->pfrkt_flags
= newf
;
2089 pfr_clstats_ktables(struct pfr_ktableworkq
*workq
, u_int64_t tzero
, int recurse
)
2091 struct pfr_ktable
*p
;
2093 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2095 SLIST_FOREACH(p
, workq
, pfrkt_workq
)
2096 pfr_clstats_ktable(p
, tzero
, recurse
);
2100 pfr_clstats_ktable(struct pfr_ktable
*kt
, u_int64_t tzero
, int recurse
)
2102 struct pfr_kentryworkq addrq
;
2104 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2107 pfr_enqueue_addrs(kt
, &addrq
, NULL
, 0);
2108 pfr_clstats_kentries(&addrq
, tzero
, 0);
2110 bzero(kt
->pfrkt_packets
, sizeof(kt
->pfrkt_packets
));
2111 bzero(kt
->pfrkt_bytes
, sizeof(kt
->pfrkt_bytes
));
2112 kt
->pfrkt_match
= kt
->pfrkt_nomatch
= 0;
2113 kt
->pfrkt_tzero
= tzero
;
2116 static struct pfr_ktable
*
2117 pfr_create_ktable(struct pfr_table
*tbl
, u_int64_t tzero
, int attachruleset
)
2119 struct pfr_ktable
*kt
;
2120 struct pf_ruleset
*rs
;
2122 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2124 kt
= pool_get(&pfr_ktable_pl
, PR_WAITOK
);
2128 bzero(kt
, sizeof(*kt
));
2131 if (attachruleset
) {
2132 rs
= pf_find_or_create_ruleset(tbl
->pfrt_anchor
);
2134 pfr_destroy_ktable(kt
, 0);
2141 if (!rn_inithead((void **)&kt
->pfrkt_ip4
,
2142 offsetof(struct sockaddr_in
, sin_addr
) * 8) ||
2143 !rn_inithead((void **)&kt
->pfrkt_ip6
,
2144 offsetof(struct sockaddr_in6
, sin6_addr
) * 8)) {
2145 pfr_destroy_ktable(kt
, 0);
2148 kt
->pfrkt_tzero
= tzero
;
2154 pfr_destroy_ktables(struct pfr_ktableworkq
*workq
, int flushaddr
)
2156 struct pfr_ktable
*p
, *q
;
2158 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2160 for (p
= SLIST_FIRST(workq
); p
; p
= q
) {
2161 q
= SLIST_NEXT(p
, pfrkt_workq
);
2162 pfr_destroy_ktable(p
, flushaddr
);
2167 pfr_destroy_ktable(struct pfr_ktable
*kt
, int flushaddr
)
2169 struct pfr_kentryworkq addrq
;
2171 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2174 pfr_enqueue_addrs(kt
, &addrq
, NULL
, 0);
2175 pfr_clean_node_mask(kt
, &addrq
);
2176 pfr_destroy_kentries(&addrq
);
2178 if (kt
->pfrkt_ip4
!= NULL
) {
2179 _FREE((caddr_t
)kt
->pfrkt_ip4
, M_RTABLE
);
2181 if (kt
->pfrkt_ip6
!= NULL
) {
2182 _FREE((caddr_t
)kt
->pfrkt_ip6
, M_RTABLE
);
2184 if (kt
->pfrkt_shadow
!= NULL
) {
2185 pfr_destroy_ktable(kt
->pfrkt_shadow
, flushaddr
);
2187 if (kt
->pfrkt_rs
!= NULL
) {
2188 kt
->pfrkt_rs
->tables
--;
2189 pf_remove_if_empty_ruleset(kt
->pfrkt_rs
);
2191 pool_put(&pfr_ktable_pl
, kt
);
2195 pfr_ktable_compare(struct pfr_ktable
*p
, struct pfr_ktable
*q
)
2199 if ((d
= strncmp(p
->pfrkt_name
, q
->pfrkt_name
, PF_TABLE_NAME_SIZE
))) {
2202 return strcmp(p
->pfrkt_anchor
, q
->pfrkt_anchor
);
2205 static struct pfr_ktable
*
2206 pfr_lookup_table(struct pfr_table
*tbl
)
2208 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2210 /* struct pfr_ktable start like a struct pfr_table */
2211 return RB_FIND(pfr_ktablehead
, &pfr_ktables
,
2212 (struct pfr_ktable
*)(void *)tbl
);
2216 pfr_match_addr(struct pfr_ktable
*kt
, struct pf_addr
*a
, sa_family_t af
)
2218 struct pfr_kentry
*ke
= NULL
;
2221 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2223 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_root
!= NULL
) {
2224 kt
= kt
->pfrkt_root
;
2226 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
2233 pfr_sin
.sin_addr
.s_addr
= a
->addr32
[0];
2234 ke
= (struct pfr_kentry
*)rn_match(&pfr_sin
, kt
->pfrkt_ip4
);
2235 if (ke
&& KENTRY_RNF_ROOT(ke
)) {
2241 bcopy(a
, &pfr_sin6
.sin6_addr
, sizeof(pfr_sin6
.sin6_addr
));
2242 ke
= (struct pfr_kentry
*)rn_match(&pfr_sin6
, kt
->pfrkt_ip6
);
2243 if (ke
&& KENTRY_RNF_ROOT(ke
)) {
2248 match
= (ke
&& !ke
->pfrke_not
);
2252 kt
->pfrkt_nomatch
++;
2258 pfr_update_stats(struct pfr_ktable
*kt
, struct pf_addr
*a
, sa_family_t af
,
2259 u_int64_t len
, int dir_out
, int op_pass
, int notrule
)
2261 struct pfr_kentry
*ke
= NULL
;
2263 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2265 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_root
!= NULL
) {
2266 kt
= kt
->pfrkt_root
;
2268 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
2275 pfr_sin
.sin_addr
.s_addr
= a
->addr32
[0];
2276 ke
= (struct pfr_kentry
*)rn_match(&pfr_sin
, kt
->pfrkt_ip4
);
2277 if (ke
&& KENTRY_RNF_ROOT(ke
)) {
2283 bcopy(a
, &pfr_sin6
.sin6_addr
, sizeof(pfr_sin6
.sin6_addr
));
2284 ke
= (struct pfr_kentry
*)rn_match(&pfr_sin6
, kt
->pfrkt_ip6
);
2285 if (ke
&& KENTRY_RNF_ROOT(ke
)) {
2292 if ((ke
== NULL
|| ke
->pfrke_not
) != notrule
) {
2293 if (op_pass
!= PFR_OP_PASS
) {
2294 printf("pfr_update_stats: assertion failed.\n");
2296 op_pass
= PFR_OP_XPASS
;
2298 kt
->pfrkt_packets
[dir_out
][op_pass
]++;
2299 kt
->pfrkt_bytes
[dir_out
][op_pass
] += len
;
2300 if (ke
!= NULL
&& op_pass
!= PFR_OP_XPASS
) {
2301 ke
->pfrke_packets
[dir_out
][op_pass
]++;
2302 ke
->pfrke_bytes
[dir_out
][op_pass
] += len
;
2307 pfr_attach_table(struct pf_ruleset
*rs
, char *name
)
2309 struct pfr_ktable
*kt
, *rt
;
2310 struct pfr_table tbl
;
2311 struct pf_anchor
*ac
= rs
->anchor
;
2313 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2315 bzero(&tbl
, sizeof(tbl
));
2316 strlcpy(tbl
.pfrt_name
, name
, sizeof(tbl
.pfrt_name
));
2318 strlcpy(tbl
.pfrt_anchor
, ac
->path
, sizeof(tbl
.pfrt_anchor
));
2320 kt
= pfr_lookup_table(&tbl
);
2322 kt
= pfr_create_ktable(&tbl
, pf_calendar_time_second(), 1);
2327 bzero(tbl
.pfrt_anchor
, sizeof(tbl
.pfrt_anchor
));
2328 rt
= pfr_lookup_table(&tbl
);
2330 rt
= pfr_create_ktable(&tbl
, 0, 1);
2332 pfr_destroy_ktable(kt
, 0);
2335 pfr_insert_ktable(rt
);
2337 kt
->pfrkt_root
= rt
;
2339 pfr_insert_ktable(kt
);
2341 if (!kt
->pfrkt_refcnt
[PFR_REFCNT_RULE
]++) {
2342 pfr_setflags_ktable(kt
, kt
->pfrkt_flags
| PFR_TFLAG_REFERENCED
);
2348 pfr_detach_table(struct pfr_ktable
*kt
)
2350 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2352 if (kt
->pfrkt_refcnt
[PFR_REFCNT_RULE
] <= 0) {
2353 printf("pfr_detach_table: refcount = %d.\n",
2354 kt
->pfrkt_refcnt
[PFR_REFCNT_RULE
]);
2355 } else if (!--kt
->pfrkt_refcnt
[PFR_REFCNT_RULE
]) {
2356 pfr_setflags_ktable(kt
, kt
->pfrkt_flags
& ~PFR_TFLAG_REFERENCED
);
2361 pfr_pool_get(struct pfr_ktable
*kt
, int *pidx
, struct pf_addr
*counter
,
2362 struct pf_addr
**raddr
, struct pf_addr
**rmask
, sa_family_t af
)
2364 struct pfr_kentry
*ke
, *ke2
;
2365 struct pf_addr
*addr
;
2366 union sockaddr_union mask
;
2367 int idx
= -1, use_counter
= 0;
2369 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2371 if (af
== AF_INET
) {
2372 addr
= (struct pf_addr
*)&pfr_sin
.sin_addr
;
2373 } else if (af
== AF_INET6
) {
2374 addr
= (struct pf_addr
*)&pfr_sin6
.sin6_addr
;
2379 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_root
!= NULL
) {
2380 kt
= kt
->pfrkt_root
;
2382 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
2389 if (counter
!= NULL
&& idx
>= 0) {
2397 ke
= pfr_kentry_byidx(kt
, idx
, af
);
2399 kt
->pfrkt_nomatch
++;
2402 pfr_prepare_network(&pfr_mask
, af
, ke
->pfrke_net
);
2403 *raddr
= SUNION2PF(&ke
->pfrke_sa
, af
);
2404 *rmask
= SUNION2PF(&pfr_mask
, af
);
2407 /* is supplied address within block? */
2408 if (!PF_MATCHA(0, *raddr
, *rmask
, counter
, af
)) {
2409 /* no, go to next block in table */
2414 PF_ACPY(addr
, counter
, af
);
2416 /* use first address of block */
2417 PF_ACPY(addr
, *raddr
, af
);
2420 if (!KENTRY_NETWORK(ke
)) {
2421 /* this is a single IP address - no possible nested block */
2422 PF_ACPY(counter
, addr
, af
);
2428 /* we don't want to use a nested block */
2429 if (af
== AF_INET
) {
2430 ke2
= (struct pfr_kentry
*)rn_match(&pfr_sin
,
2432 } else if (af
== AF_INET6
) {
2433 ke2
= (struct pfr_kentry
*)rn_match(&pfr_sin6
,
2436 return -1; /* never happens */
2438 /* no need to check KENTRY_RNF_ROOT() here */
2440 /* lookup return the same block - perfect */
2441 PF_ACPY(counter
, addr
, af
);
2447 /* we need to increase the counter past the nested block */
2448 pfr_prepare_network(&mask
, AF_INET
, ke2
->pfrke_net
);
2449 PF_POOLMASK(addr
, addr
, SUNION2PF(&mask
, af
), &pfr_ffaddr
, af
);
2451 if (!PF_MATCHA(0, *raddr
, *rmask
, addr
, af
)) {
2452 /* ok, we reached the end of our main block */
2453 /* go to next block in table */
2461 static struct pfr_kentry
*
2462 pfr_kentry_byidx(struct pfr_ktable
*kt
, int idx
, int af
)
2464 struct pfr_walktree w
;
2466 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2468 bzero(&w
, sizeof(w
));
2469 w
.pfrw_op
= PFRW_POOL_GET
;
2475 (void) kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
,
2477 return w
.pfrw_kentry
;
2480 (void) kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
,
2482 return w
.pfrw_kentry
;
2489 pfr_dynaddr_update(struct pfr_ktable
*kt
, struct pfi_dynaddr
*dyn
)
2491 struct pfr_walktree w
;
2493 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2495 bzero(&w
, sizeof(w
));
2496 w
.pfrw_op
= PFRW_DYNADDR_UPDATE
;
2499 dyn
->pfid_acnt4
= 0;
2500 dyn
->pfid_acnt6
= 0;
2501 if (!dyn
->pfid_af
|| dyn
->pfid_af
== AF_INET
) {
2502 (void) kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
,
2505 if (!dyn
->pfid_af
|| dyn
->pfid_af
== AF_INET6
) {
2506 (void) kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
,