2 * Copyright (c) 2007-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $apfw: pf_table.c,v 1.4 2008/08/27 00:01:32 jhw Exp $ */
30 /* $OpenBSD: pf_table.c,v 1.68 2006/05/02 10:08:45 dhartmei Exp $ */
33 * Copyright (c) 2002 Cedric Berger
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
40 * - Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * - Redistributions in binary form must reproduce the above
43 * copyright notice, this list of conditions and the following
44 * disclaimer in the documentation and/or other materials provided
45 * with the distribution.
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
48 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
49 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
50 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
51 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
52 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
53 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
54 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
55 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
57 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
58 * POSSIBILITY OF SUCH DAMAGE.
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/socket.h>
66 #include <sys/kernel.h>
67 #include <sys/malloc.h>
70 #include <net/route.h>
71 #include <netinet/in.h>
72 #include <net/radix.h>
73 #include <net/pfvar.h>
75 #define ACCEPT_FLAGS(flags, oklist) \
77 if ((flags & ~(oklist)) & \
82 #define COPYIN(from, to, size, flags) \
83 ((flags & PFR_FLAG_USERIOCTL) ? \
84 copyin((from), (to), (size)) : \
85 (bcopy((void *)(uintptr_t)(from), (to), (size)), 0))
87 #define COPYOUT(from, to, size, flags) \
88 ((flags & PFR_FLAG_USERIOCTL) ? \
89 copyout((from), (to), (size)) : \
90 (bcopy((from), (void *)(uintptr_t)(to), (size)), 0))
92 #define FILLIN_SIN(sin, addr) \
94 (sin).sin_len = sizeof (sin); \
95 (sin).sin_family = AF_INET; \
96 (sin).sin_addr = (addr); \
99 #define FILLIN_SIN6(sin6, addr) \
101 (sin6).sin6_len = sizeof (sin6); \
102 (sin6).sin6_family = AF_INET6; \
103 (sin6).sin6_addr = (addr); \
106 #define SWAP(type, a1, a2) \
113 #define SUNION2PF(su, af) (((af) == AF_INET) ? \
114 (struct pf_addr *)&(su)->sin.sin_addr : \
115 (struct pf_addr *)&(su)->sin6.sin6_addr)
117 #define AF_BITS(af) (((af) == AF_INET) ? 32 : 128)
118 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
119 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
120 #define KENTRY_RNF_ROOT(ke) \
121 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
123 #define NO_ADDRESSES (-1)
124 #define ENQUEUE_UNMARKED_ONLY (1)
125 #define INVERT_NEG_FLAG (1)
127 struct pfr_walktree
{
138 user_addr_t pfrw1_addr
;
139 user_addr_t pfrw1_astats
;
140 struct pfr_kentryworkq
*pfrw1_workq
;
141 struct pfr_kentry
*pfrw1_kentry
;
142 struct pfi_dynaddr
*pfrw1_dyn
;
147 #define pfrw_addr pfrw_1.pfrw1_addr
148 #define pfrw_astats pfrw_1.pfrw1_astats
149 #define pfrw_workq pfrw_1.pfrw1_workq
150 #define pfrw_kentry pfrw_1.pfrw1_kentry
151 #define pfrw_dyn pfrw_1.pfrw1_dyn
152 #define pfrw_cnt pfrw_free
154 #define senderr(e) do { rv = (e); goto _bad; } while (0)
156 struct pool pfr_ktable_pl
;
157 struct pool pfr_kentry_pl
;
159 static struct pool pfr_kentry_pl2
;
160 static struct sockaddr_in pfr_sin
;
161 static struct sockaddr_in6 pfr_sin6
;
162 static union sockaddr_union pfr_mask
;
163 static struct pf_addr pfr_ffaddr
;
165 static void pfr_copyout_addr(struct pfr_addr
*, struct pfr_kentry
*ke
);
166 static int pfr_validate_addr(struct pfr_addr
*);
167 static void pfr_enqueue_addrs(struct pfr_ktable
*, struct pfr_kentryworkq
*,
169 static void pfr_mark_addrs(struct pfr_ktable
*);
170 static struct pfr_kentry
*pfr_lookup_addr(struct pfr_ktable
*,
171 struct pfr_addr
*, int);
172 static struct pfr_kentry
*pfr_create_kentry(struct pfr_addr
*, int);
173 static void pfr_destroy_kentries(struct pfr_kentryworkq
*);
174 static void pfr_destroy_kentry(struct pfr_kentry
*);
175 static void pfr_insert_kentries(struct pfr_ktable
*,
176 struct pfr_kentryworkq
*, u_int64_t
);
177 static void pfr_remove_kentries(struct pfr_ktable
*, struct pfr_kentryworkq
*);
178 static void pfr_clstats_kentries(struct pfr_kentryworkq
*, u_int64_t
, int);
179 static void pfr_reset_feedback(user_addr_t
, int, int);
180 static void pfr_prepare_network(union sockaddr_union
*, int, int);
181 static int pfr_route_kentry(struct pfr_ktable
*, struct pfr_kentry
*);
182 static int pfr_unroute_kentry(struct pfr_ktable
*, struct pfr_kentry
*);
183 static int pfr_walktree(struct radix_node
*, void *);
184 static int pfr_validate_table(struct pfr_table
*, int, int);
185 static int pfr_fix_anchor(char *);
186 static void pfr_commit_ktable(struct pfr_ktable
*, u_int64_t
);
187 static void pfr_insert_ktables(struct pfr_ktableworkq
*);
188 static void pfr_insert_ktable(struct pfr_ktable
*);
189 static void pfr_setflags_ktables(struct pfr_ktableworkq
*);
190 static void pfr_setflags_ktable(struct pfr_ktable
*, int);
191 static void pfr_clstats_ktables(struct pfr_ktableworkq
*, u_int64_t
, int);
192 static void pfr_clstats_ktable(struct pfr_ktable
*, u_int64_t
, int);
193 static struct pfr_ktable
*pfr_create_ktable(struct pfr_table
*, u_int64_t
, int);
194 static void pfr_destroy_ktables(struct pfr_ktableworkq
*, int);
195 static void pfr_destroy_ktable(struct pfr_ktable
*, int);
196 static int pfr_ktable_compare(struct pfr_ktable
*, struct pfr_ktable
*);
197 static struct pfr_ktable
*pfr_lookup_table(struct pfr_table
*);
198 static void pfr_clean_node_mask(struct pfr_ktable
*, struct pfr_kentryworkq
*);
199 static int pfr_table_count(struct pfr_table
*, int);
200 static int pfr_skip_table(struct pfr_table
*, struct pfr_ktable
*, int);
201 static struct pfr_kentry
*pfr_kentry_byidx(struct pfr_ktable
*, int, int);
203 RB_PROTOTYPE_SC(static, pfr_ktablehead
, pfr_ktable
, pfrkt_tree
,
205 RB_GENERATE(pfr_ktablehead
, pfr_ktable
, pfrkt_tree
, pfr_ktable_compare
);
207 static struct pfr_ktablehead pfr_ktables
;
208 static struct pfr_table pfr_nulltable
;
209 static int pfr_ktable_cnt
;
214 pool_init(&pfr_ktable_pl
, sizeof (struct pfr_ktable
), 0, 0, 0,
216 pool_init(&pfr_kentry_pl
, sizeof (struct pfr_kentry
), 0, 0, 0,
218 pool_init(&pfr_kentry_pl2
, sizeof (struct pfr_kentry
), 0, 0, 0,
221 pfr_sin
.sin_len
= sizeof (pfr_sin
);
222 pfr_sin
.sin_family
= AF_INET
;
223 pfr_sin6
.sin6_len
= sizeof (pfr_sin6
);
224 pfr_sin6
.sin6_family
= AF_INET6
;
226 memset(&pfr_ffaddr
, 0xff, sizeof (pfr_ffaddr
));
233 pool_destroy(&pfr_ktable_pl
);
234 pool_destroy(&pfr_kentry_pl
);
235 pool_destroy(&pfr_kentry_pl2
);
240 pfr_clr_addrs(struct pfr_table
*tbl
, int *ndel
, int flags
)
242 struct pfr_ktable
*kt
;
243 struct pfr_kentryworkq workq
;
245 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
);
246 if (pfr_validate_table(tbl
, 0, flags
& PFR_FLAG_USERIOCTL
))
248 kt
= pfr_lookup_table(tbl
);
249 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
251 if (kt
->pfrkt_flags
& PFR_TFLAG_CONST
)
253 pfr_enqueue_addrs(kt
, &workq
, ndel
, 0);
255 if (!(flags
& PFR_FLAG_DUMMY
)) {
256 pfr_remove_kentries(kt
, &workq
);
258 printf("pfr_clr_addrs: corruption detected (%d).\n",
267 pfr_add_addrs(struct pfr_table
*tbl
, user_addr_t _addr
, int size
,
268 int *nadd
, int flags
)
270 struct pfr_ktable
*kt
, *tmpkt
;
271 struct pfr_kentryworkq workq
;
272 struct pfr_kentry
*p
, *q
;
275 user_addr_t addr
= _addr
;
276 u_int64_t tzero
= pf_calendar_time_second();
278 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
280 if (pfr_validate_table(tbl
, 0, flags
& PFR_FLAG_USERIOCTL
))
282 kt
= pfr_lookup_table(tbl
);
283 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
285 if (kt
->pfrkt_flags
& PFR_TFLAG_CONST
)
287 tmpkt
= pfr_create_ktable(&pfr_nulltable
, 0, 0);
291 for (i
= 0; i
< size
; i
++, addr
+= sizeof (ad
)) {
292 if (COPYIN(addr
, &ad
, sizeof (ad
), flags
))
294 if (pfr_validate_addr(&ad
))
296 p
= pfr_lookup_addr(kt
, &ad
, 1);
297 q
= pfr_lookup_addr(tmpkt
, &ad
, 1);
298 if (flags
& PFR_FLAG_FEEDBACK
) {
300 ad
.pfra_fback
= PFR_FB_DUPLICATE
;
302 ad
.pfra_fback
= PFR_FB_ADDED
;
303 else if (p
->pfrke_not
!= ad
.pfra_not
)
304 ad
.pfra_fback
= PFR_FB_CONFLICT
;
306 ad
.pfra_fback
= PFR_FB_NONE
;
308 if (p
== NULL
&& q
== NULL
) {
309 p
= pfr_create_kentry(&ad
,
310 !(flags
& PFR_FLAG_USERIOCTL
));
313 if (pfr_route_kentry(tmpkt
, p
)) {
314 pfr_destroy_kentry(p
);
315 ad
.pfra_fback
= PFR_FB_NONE
;
317 SLIST_INSERT_HEAD(&workq
, p
, pfrke_workq
);
321 if (flags
& PFR_FLAG_FEEDBACK
)
322 if (COPYOUT(&ad
, addr
, sizeof (ad
), flags
))
325 pfr_clean_node_mask(tmpkt
, &workq
);
326 if (!(flags
& PFR_FLAG_DUMMY
)) {
327 pfr_insert_kentries(kt
, &workq
, tzero
);
329 pfr_destroy_kentries(&workq
);
332 pfr_destroy_ktable(tmpkt
, 0);
335 pfr_clean_node_mask(tmpkt
, &workq
);
336 pfr_destroy_kentries(&workq
);
337 if (flags
& PFR_FLAG_FEEDBACK
)
338 pfr_reset_feedback(_addr
, size
, flags
);
339 pfr_destroy_ktable(tmpkt
, 0);
344 pfr_del_addrs(struct pfr_table
*tbl
, user_addr_t _addr
, int size
,
345 int *ndel
, int flags
)
347 struct pfr_ktable
*kt
;
348 struct pfr_kentryworkq workq
;
349 struct pfr_kentry
*p
;
351 user_addr_t addr
= _addr
;
352 int i
, rv
, xdel
= 0, log
= 1;
354 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
356 if (pfr_validate_table(tbl
, 0, flags
& PFR_FLAG_USERIOCTL
))
358 kt
= pfr_lookup_table(tbl
);
359 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
361 if (kt
->pfrkt_flags
& PFR_TFLAG_CONST
)
364 * there are two algorithms to choose from here.
366 * n: number of addresses to delete
367 * N: number of addresses in the table
369 * one is O(N) and is better for large 'n'
370 * one is O(n*LOG(N)) and is better for small 'n'
372 * following code try to decide which one is best.
374 for (i
= kt
->pfrkt_cnt
; i
> 0; i
>>= 1)
376 if (size
> kt
->pfrkt_cnt
/log
) {
377 /* full table scan */
380 /* iterate over addresses to delete */
381 for (i
= 0; i
< size
; i
++, addr
+= sizeof (ad
)) {
382 if (COPYIN(addr
, &ad
, sizeof (ad
), flags
))
384 if (pfr_validate_addr(&ad
))
386 p
= pfr_lookup_addr(kt
, &ad
, 1);
392 for (addr
= _addr
, i
= 0; i
< size
; i
++, addr
+= sizeof (ad
)) {
393 if (COPYIN(addr
, &ad
, sizeof (ad
), flags
))
395 if (pfr_validate_addr(&ad
))
397 p
= pfr_lookup_addr(kt
, &ad
, 1);
398 if (flags
& PFR_FLAG_FEEDBACK
) {
400 ad
.pfra_fback
= PFR_FB_NONE
;
401 else if (p
->pfrke_not
!= ad
.pfra_not
)
402 ad
.pfra_fback
= PFR_FB_CONFLICT
;
403 else if (p
->pfrke_mark
)
404 ad
.pfra_fback
= PFR_FB_DUPLICATE
;
406 ad
.pfra_fback
= PFR_FB_DELETED
;
408 if (p
!= NULL
&& p
->pfrke_not
== ad
.pfra_not
&&
411 SLIST_INSERT_HEAD(&workq
, p
, pfrke_workq
);
414 if (flags
& PFR_FLAG_FEEDBACK
)
415 if (COPYOUT(&ad
, addr
, sizeof (ad
), flags
))
418 if (!(flags
& PFR_FLAG_DUMMY
)) {
419 pfr_remove_kentries(kt
, &workq
);
425 if (flags
& PFR_FLAG_FEEDBACK
)
426 pfr_reset_feedback(_addr
, size
, flags
);
431 pfr_set_addrs(struct pfr_table
*tbl
, user_addr_t _addr
, int size
,
432 int *size2
, int *nadd
, int *ndel
, int *nchange
, int flags
,
433 u_int32_t ignore_pfrt_flags
)
435 struct pfr_ktable
*kt
, *tmpkt
;
436 struct pfr_kentryworkq addq
, delq
, changeq
;
437 struct pfr_kentry
*p
, *q
;
439 user_addr_t addr
= _addr
;
440 int i
, rv
, xadd
= 0, xdel
= 0, xchange
= 0;
441 u_int64_t tzero
= pf_calendar_time_second();
443 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
445 if (pfr_validate_table(tbl
, ignore_pfrt_flags
, flags
&
448 kt
= pfr_lookup_table(tbl
);
449 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
451 if (kt
->pfrkt_flags
& PFR_TFLAG_CONST
)
453 tmpkt
= pfr_create_ktable(&pfr_nulltable
, 0, 0);
459 SLIST_INIT(&changeq
);
460 for (i
= 0; i
< size
; i
++, addr
+= sizeof (ad
)) {
461 if (COPYIN(addr
, &ad
, sizeof (ad
), flags
))
463 if (pfr_validate_addr(&ad
))
465 ad
.pfra_fback
= PFR_FB_NONE
;
466 p
= pfr_lookup_addr(kt
, &ad
, 1);
469 ad
.pfra_fback
= PFR_FB_DUPLICATE
;
473 if (p
->pfrke_not
!= ad
.pfra_not
) {
474 SLIST_INSERT_HEAD(&changeq
, p
, pfrke_workq
);
475 ad
.pfra_fback
= PFR_FB_CHANGED
;
479 q
= pfr_lookup_addr(tmpkt
, &ad
, 1);
481 ad
.pfra_fback
= PFR_FB_DUPLICATE
;
484 p
= pfr_create_kentry(&ad
,
485 !(flags
& PFR_FLAG_USERIOCTL
));
488 if (pfr_route_kentry(tmpkt
, p
)) {
489 pfr_destroy_kentry(p
);
490 ad
.pfra_fback
= PFR_FB_NONE
;
492 SLIST_INSERT_HEAD(&addq
, p
, pfrke_workq
);
493 ad
.pfra_fback
= PFR_FB_ADDED
;
498 if (flags
& PFR_FLAG_FEEDBACK
)
499 if (COPYOUT(&ad
, addr
, sizeof (ad
), flags
))
502 pfr_enqueue_addrs(kt
, &delq
, &xdel
, ENQUEUE_UNMARKED_ONLY
);
503 if ((flags
& PFR_FLAG_FEEDBACK
) && *size2
) {
504 if (*size2
< size
+xdel
) {
510 SLIST_FOREACH(p
, &delq
, pfrke_workq
) {
511 pfr_copyout_addr(&ad
, p
);
512 ad
.pfra_fback
= PFR_FB_DELETED
;
513 if (COPYOUT(&ad
, addr
, sizeof (ad
), flags
))
519 pfr_clean_node_mask(tmpkt
, &addq
);
520 if (!(flags
& PFR_FLAG_DUMMY
)) {
521 pfr_insert_kentries(kt
, &addq
, tzero
);
522 pfr_remove_kentries(kt
, &delq
);
523 pfr_clstats_kentries(&changeq
, tzero
, INVERT_NEG_FLAG
);
525 pfr_destroy_kentries(&addq
);
532 if ((flags
& PFR_FLAG_FEEDBACK
) && size2
)
534 pfr_destroy_ktable(tmpkt
, 0);
537 pfr_clean_node_mask(tmpkt
, &addq
);
538 pfr_destroy_kentries(&addq
);
539 if (flags
& PFR_FLAG_FEEDBACK
)
540 pfr_reset_feedback(_addr
, size
, flags
);
541 pfr_destroy_ktable(tmpkt
, 0);
546 pfr_tst_addrs(struct pfr_table
*tbl
, user_addr_t addr
, int size
,
547 int *nmatch
, int flags
)
549 struct pfr_ktable
*kt
;
550 struct pfr_kentry
*p
;
554 ACCEPT_FLAGS(flags
, PFR_FLAG_REPLACE
);
555 if (pfr_validate_table(tbl
, 0, 0))
557 kt
= pfr_lookup_table(tbl
);
558 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
561 for (i
= 0; i
< size
; i
++, addr
+= sizeof (ad
)) {
562 if (COPYIN(addr
, &ad
, sizeof (ad
), flags
))
564 if (pfr_validate_addr(&ad
))
566 if (ADDR_NETWORK(&ad
))
568 p
= pfr_lookup_addr(kt
, &ad
, 0);
569 if (flags
& PFR_FLAG_REPLACE
)
570 pfr_copyout_addr(&ad
, p
);
571 ad
.pfra_fback
= (p
== NULL
) ? PFR_FB_NONE
:
572 (p
->pfrke_not
? PFR_FB_NOTMATCH
: PFR_FB_MATCH
);
573 if (p
!= NULL
&& !p
->pfrke_not
)
575 if (COPYOUT(&ad
, addr
, sizeof (ad
), flags
))
584 pfr_get_addrs(struct pfr_table
*tbl
, user_addr_t addr
, int *size
,
587 struct pfr_ktable
*kt
;
588 struct pfr_walktree w
;
591 ACCEPT_FLAGS(flags
, 0);
592 if (pfr_validate_table(tbl
, 0, 0))
594 kt
= pfr_lookup_table(tbl
);
595 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
597 if (kt
->pfrkt_cnt
> *size
) {
598 *size
= kt
->pfrkt_cnt
;
602 bzero(&w
, sizeof (w
));
603 w
.pfrw_op
= PFRW_GET_ADDRS
;
605 w
.pfrw_free
= kt
->pfrkt_cnt
;
606 w
.pfrw_flags
= flags
;
607 rv
= kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
);
609 rv
= kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
,
615 printf("pfr_get_addrs: corruption detected (%d).\n",
619 *size
= kt
->pfrkt_cnt
;
624 pfr_get_astats(struct pfr_table
*tbl
, user_addr_t addr
, int *size
,
627 struct pfr_ktable
*kt
;
628 struct pfr_walktree w
;
629 struct pfr_kentryworkq workq
;
631 u_int64_t tzero
= pf_calendar_time_second();
633 /* XXX PFR_FLAG_CLSTATS disabled */
634 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
);
635 if (pfr_validate_table(tbl
, 0, 0))
637 kt
= pfr_lookup_table(tbl
);
638 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
640 if (kt
->pfrkt_cnt
> *size
) {
641 *size
= kt
->pfrkt_cnt
;
645 bzero(&w
, sizeof (w
));
646 w
.pfrw_op
= PFRW_GET_ASTATS
;
647 w
.pfrw_astats
= addr
;
648 w
.pfrw_free
= kt
->pfrkt_cnt
;
649 w
.pfrw_flags
= flags
;
650 rv
= kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
);
652 rv
= kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
,
654 if (!rv
&& (flags
& PFR_FLAG_CLSTATS
)) {
655 pfr_enqueue_addrs(kt
, &workq
, NULL
, 0);
656 pfr_clstats_kentries(&workq
, tzero
, 0);
662 printf("pfr_get_astats: corruption detected (%d).\n",
666 *size
= kt
->pfrkt_cnt
;
671 pfr_clr_astats(struct pfr_table
*tbl
, user_addr_t _addr
, int size
,
672 int *nzero
, int flags
)
674 struct pfr_ktable
*kt
;
675 struct pfr_kentryworkq workq
;
676 struct pfr_kentry
*p
;
678 user_addr_t addr
= _addr
;
679 int i
, rv
, xzero
= 0;
681 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
683 if (pfr_validate_table(tbl
, 0, 0))
685 kt
= pfr_lookup_table(tbl
);
686 if (kt
== NULL
|| !(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
689 for (i
= 0; i
< size
; i
++, addr
+= sizeof (ad
)) {
690 if (COPYIN(addr
, &ad
, sizeof (ad
), flags
))
692 if (pfr_validate_addr(&ad
))
694 p
= pfr_lookup_addr(kt
, &ad
, 1);
695 if (flags
& PFR_FLAG_FEEDBACK
) {
696 ad
.pfra_fback
= (p
!= NULL
) ?
697 PFR_FB_CLEARED
: PFR_FB_NONE
;
698 if (COPYOUT(&ad
, addr
, sizeof (ad
), flags
))
702 SLIST_INSERT_HEAD(&workq
, p
, pfrke_workq
);
707 if (!(flags
& PFR_FLAG_DUMMY
)) {
708 pfr_clstats_kentries(&workq
, 0, 0);
714 if (flags
& PFR_FLAG_FEEDBACK
)
715 pfr_reset_feedback(_addr
, size
, flags
);
720 pfr_validate_addr(struct pfr_addr
*ad
)
724 switch (ad
->pfra_af
) {
727 if (ad
->pfra_net
> 32)
733 if (ad
->pfra_net
> 128)
740 if (ad
->pfra_net
< 128 &&
741 (((caddr_t
)ad
)[ad
->pfra_net
/8] & (0xFF >> (ad
->pfra_net%8
))))
743 for (i
= (ad
->pfra_net
+7)/8; i
< (int)sizeof (ad
->pfra_u
); i
++)
744 if (((caddr_t
)ad
)[i
])
746 if (ad
->pfra_not
&& ad
->pfra_not
!= 1)
754 pfr_enqueue_addrs(struct pfr_ktable
*kt
, struct pfr_kentryworkq
*workq
,
755 int *naddr
, int sweep
)
757 struct pfr_walktree w
;
760 bzero(&w
, sizeof (w
));
761 w
.pfrw_op
= sweep
? PFRW_SWEEP
: PFRW_ENQUEUE
;
762 w
.pfrw_workq
= workq
;
763 if (kt
->pfrkt_ip4
!= NULL
)
764 if (kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
,
766 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
767 if (kt
->pfrkt_ip6
!= NULL
)
768 if (kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
,
770 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
776 pfr_mark_addrs(struct pfr_ktable
*kt
)
778 struct pfr_walktree w
;
780 bzero(&w
, sizeof (w
));
781 w
.pfrw_op
= PFRW_MARK
;
782 if (kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
, pfr_walktree
, &w
))
783 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
784 if (kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
, pfr_walktree
, &w
))
785 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
789 static struct pfr_kentry
*
790 pfr_lookup_addr(struct pfr_ktable
*kt
, struct pfr_addr
*ad
, int exact
)
792 union sockaddr_union sa
, mask
;
793 struct radix_node_head
*head
;
794 struct pfr_kentry
*ke
;
796 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
798 bzero(&sa
, sizeof (sa
));
799 if (ad
->pfra_af
== AF_INET
) {
800 FILLIN_SIN(sa
.sin
, ad
->pfra_ip4addr
);
801 head
= kt
->pfrkt_ip4
;
802 } else if (ad
->pfra_af
== AF_INET6
) {
803 FILLIN_SIN6(sa
.sin6
, ad
->pfra_ip6addr
);
804 head
= kt
->pfrkt_ip6
;
808 if (ADDR_NETWORK(ad
)) {
809 pfr_prepare_network(&mask
, ad
->pfra_af
, ad
->pfra_net
);
810 ke
= (struct pfr_kentry
*)rn_lookup(&sa
, &mask
, head
);
811 if (ke
&& KENTRY_RNF_ROOT(ke
))
814 ke
= (struct pfr_kentry
*)rn_match(&sa
, head
);
815 if (ke
&& KENTRY_RNF_ROOT(ke
))
817 if (exact
&& ke
&& KENTRY_NETWORK(ke
))
823 static struct pfr_kentry
*
824 pfr_create_kentry(struct pfr_addr
*ad
, int intr
)
826 struct pfr_kentry
*ke
;
829 ke
= pool_get(&pfr_kentry_pl2
, PR_WAITOK
);
831 ke
= pool_get(&pfr_kentry_pl
, PR_WAITOK
);
834 bzero(ke
, sizeof (*ke
));
836 if (ad
->pfra_af
== AF_INET
)
837 FILLIN_SIN(ke
->pfrke_sa
.sin
, ad
->pfra_ip4addr
);
838 else if (ad
->pfra_af
== AF_INET6
)
839 FILLIN_SIN6(ke
->pfrke_sa
.sin6
, ad
->pfra_ip6addr
);
840 ke
->pfrke_af
= ad
->pfra_af
;
841 ke
->pfrke_net
= ad
->pfra_net
;
842 ke
->pfrke_not
= ad
->pfra_not
;
843 ke
->pfrke_intrpool
= intr
;
848 pfr_destroy_kentries(struct pfr_kentryworkq
*workq
)
850 struct pfr_kentry
*p
, *q
;
852 for (p
= SLIST_FIRST(workq
); p
!= NULL
; p
= q
) {
853 q
= SLIST_NEXT(p
, pfrke_workq
);
854 pfr_destroy_kentry(p
);
859 pfr_destroy_kentry(struct pfr_kentry
*ke
)
861 if (ke
->pfrke_intrpool
)
862 pool_put(&pfr_kentry_pl2
, ke
);
864 pool_put(&pfr_kentry_pl
, ke
);
868 pfr_insert_kentries(struct pfr_ktable
*kt
,
869 struct pfr_kentryworkq
*workq
, u_int64_t tzero
)
871 struct pfr_kentry
*p
;
874 SLIST_FOREACH(p
, workq
, pfrke_workq
) {
875 rv
= pfr_route_kentry(kt
, p
);
877 printf("pfr_insert_kentries: cannot route entry "
881 p
->pfrke_tzero
= tzero
;
888 pfr_insert_kentry(struct pfr_ktable
*kt
, struct pfr_addr
*ad
, u_int64_t tzero
)
890 struct pfr_kentry
*p
;
893 p
= pfr_lookup_addr(kt
, ad
, 1);
896 p
= pfr_create_kentry(ad
, 1);
900 rv
= pfr_route_kentry(kt
, p
);
904 p
->pfrke_tzero
= tzero
;
911 pfr_remove_kentries(struct pfr_ktable
*kt
,
912 struct pfr_kentryworkq
*workq
)
914 struct pfr_kentry
*p
;
917 SLIST_FOREACH(p
, workq
, pfrke_workq
) {
918 pfr_unroute_kentry(kt
, p
);
922 pfr_destroy_kentries(workq
);
926 pfr_clean_node_mask(struct pfr_ktable
*kt
,
927 struct pfr_kentryworkq
*workq
)
929 struct pfr_kentry
*p
;
931 SLIST_FOREACH(p
, workq
, pfrke_workq
)
932 pfr_unroute_kentry(kt
, p
);
936 pfr_clstats_kentries(struct pfr_kentryworkq
*workq
, u_int64_t tzero
,
939 struct pfr_kentry
*p
;
941 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
943 SLIST_FOREACH(p
, workq
, pfrke_workq
) {
945 p
->pfrke_not
= !p
->pfrke_not
;
946 bzero(p
->pfrke_packets
, sizeof (p
->pfrke_packets
));
947 bzero(p
->pfrke_bytes
, sizeof (p
->pfrke_bytes
));
948 p
->pfrke_tzero
= tzero
;
953 pfr_reset_feedback(user_addr_t addr
, int size
, int flags
)
958 for (i
= 0; i
< size
; i
++, addr
+= sizeof (ad
)) {
959 if (COPYIN(addr
, &ad
, sizeof (ad
), flags
))
961 ad
.pfra_fback
= PFR_FB_NONE
;
962 if (COPYOUT(&ad
, addr
, sizeof (ad
), flags
))
968 pfr_prepare_network(union sockaddr_union
*sa
, int af
, int net
)
972 bzero(sa
, sizeof (*sa
));
974 sa
->sin
.sin_len
= sizeof (sa
->sin
);
975 sa
->sin
.sin_family
= AF_INET
;
976 sa
->sin
.sin_addr
.s_addr
= net
? htonl(-1 << (32-net
)) : 0;
977 } else if (af
== AF_INET6
) {
978 sa
->sin6
.sin6_len
= sizeof (sa
->sin6
);
979 sa
->sin6
.sin6_family
= AF_INET6
;
980 for (i
= 0; i
< 4; i
++) {
982 sa
->sin6
.sin6_addr
.s6_addr32
[i
] =
983 net
? htonl(-1 << (32-net
)) : 0;
986 sa
->sin6
.sin6_addr
.s6_addr32
[i
] = 0xFFFFFFFF;
993 pfr_route_kentry(struct pfr_ktable
*kt
, struct pfr_kentry
*ke
)
995 union sockaddr_union mask
;
996 struct radix_node
*rn
;
997 struct radix_node_head
*head
;
999 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1001 bzero(ke
->pfrke_node
, sizeof (ke
->pfrke_node
));
1002 if (ke
->pfrke_af
== AF_INET
)
1003 head
= kt
->pfrkt_ip4
;
1004 else if (ke
->pfrke_af
== AF_INET6
)
1005 head
= kt
->pfrkt_ip6
;
1009 if (KENTRY_NETWORK(ke
)) {
1010 pfr_prepare_network(&mask
, ke
->pfrke_af
, ke
->pfrke_net
);
1011 rn
= rn_addroute(&ke
->pfrke_sa
, &mask
, head
, ke
->pfrke_node
);
1013 rn
= rn_addroute(&ke
->pfrke_sa
, NULL
, head
, ke
->pfrke_node
);
1015 return (rn
== NULL
? -1 : 0);
1019 pfr_unroute_kentry(struct pfr_ktable
*kt
, struct pfr_kentry
*ke
)
1021 union sockaddr_union mask
;
1022 struct radix_node
*rn
;
1023 struct radix_node_head
*head
;
1025 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1027 if (ke
->pfrke_af
== AF_INET
)
1028 head
= kt
->pfrkt_ip4
;
1029 else if (ke
->pfrke_af
== AF_INET6
)
1030 head
= kt
->pfrkt_ip6
;
1034 if (KENTRY_NETWORK(ke
)) {
1035 pfr_prepare_network(&mask
, ke
->pfrke_af
, ke
->pfrke_net
);
1036 rn
= rn_delete(&ke
->pfrke_sa
, &mask
, head
);
1038 rn
= rn_delete(&ke
->pfrke_sa
, NULL
, head
);
1041 printf("pfr_unroute_kentry: delete failed.\n");
1048 pfr_copyout_addr(struct pfr_addr
*ad
, struct pfr_kentry
*ke
)
1050 bzero(ad
, sizeof (*ad
));
1053 ad
->pfra_af
= ke
->pfrke_af
;
1054 ad
->pfra_net
= ke
->pfrke_net
;
1055 ad
->pfra_not
= ke
->pfrke_not
;
1056 if (ad
->pfra_af
== AF_INET
)
1057 ad
->pfra_ip4addr
= ke
->pfrke_sa
.sin
.sin_addr
;
1058 else if (ad
->pfra_af
== AF_INET6
)
1059 ad
->pfra_ip6addr
= ke
->pfrke_sa
.sin6
.sin6_addr
;
1063 pfr_walktree(struct radix_node
*rn
, void *arg
)
1065 struct pfr_kentry
*ke
= (struct pfr_kentry
*)rn
;
1066 struct pfr_walktree
*w
= arg
;
1067 int flags
= w
->pfrw_flags
;
1069 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1071 switch (w
->pfrw_op
) {
1080 SLIST_INSERT_HEAD(w
->pfrw_workq
, ke
, pfrke_workq
);
1083 case PFRW_GET_ADDRS
:
1084 if (w
->pfrw_free
-- > 0) {
1087 pfr_copyout_addr(&ad
, ke
);
1088 if (copyout(&ad
, w
->pfrw_addr
, sizeof (ad
)))
1090 w
->pfrw_addr
+= sizeof (ad
);
1093 case PFRW_GET_ASTATS
:
1094 if (w
->pfrw_free
-- > 0) {
1095 struct pfr_astats as
;
1097 pfr_copyout_addr(&as
.pfras_a
, ke
);
1099 #if !defined(__LP64__)
1100 /* Initialized to avoid potential info leak to
1104 bcopy(ke
->pfrke_packets
, as
.pfras_packets
,
1105 sizeof (as
.pfras_packets
));
1106 bcopy(ke
->pfrke_bytes
, as
.pfras_bytes
,
1107 sizeof (as
.pfras_bytes
));
1108 as
.pfras_tzero
= ke
->pfrke_tzero
;
1110 if (COPYOUT(&as
, w
->pfrw_astats
, sizeof (as
), flags
))
1112 w
->pfrw_astats
+= sizeof (as
);
1117 break; /* negative entries are ignored */
1118 if (!w
->pfrw_cnt
--) {
1119 w
->pfrw_kentry
= ke
;
1120 return (1); /* finish search */
1123 case PFRW_DYNADDR_UPDATE
:
1124 if (ke
->pfrke_af
== AF_INET
) {
1125 if (w
->pfrw_dyn
->pfid_acnt4
++ > 0)
1127 pfr_prepare_network(&pfr_mask
, AF_INET
, ke
->pfrke_net
);
1128 w
->pfrw_dyn
->pfid_addr4
= *SUNION2PF(
1129 &ke
->pfrke_sa
, AF_INET
);
1130 w
->pfrw_dyn
->pfid_mask4
= *SUNION2PF(
1131 &pfr_mask
, AF_INET
);
1132 } else if (ke
->pfrke_af
== AF_INET6
) {
1133 if (w
->pfrw_dyn
->pfid_acnt6
++ > 0)
1135 pfr_prepare_network(&pfr_mask
, AF_INET6
, ke
->pfrke_net
);
1136 w
->pfrw_dyn
->pfid_addr6
= *SUNION2PF(
1137 &ke
->pfrke_sa
, AF_INET6
);
1138 w
->pfrw_dyn
->pfid_mask6
= *SUNION2PF(
1139 &pfr_mask
, AF_INET6
);
1147 pfr_clr_tables(struct pfr_table
*filter
, int *ndel
, int flags
)
1149 struct pfr_ktableworkq workq
;
1150 struct pfr_ktable
*p
;
1153 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1155 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
1157 if (pfr_fix_anchor(filter
->pfrt_anchor
))
1159 if (pfr_table_count(filter
, flags
) < 0)
1163 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1164 if (pfr_skip_table(filter
, p
, flags
))
1166 if (strcmp(p
->pfrkt_anchor
, PF_RESERVED_ANCHOR
) == 0)
1168 if (!(p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
1170 p
->pfrkt_nflags
= p
->pfrkt_flags
& ~PFR_TFLAG_ACTIVE
;
1171 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1174 if (!(flags
& PFR_FLAG_DUMMY
)) {
1175 pfr_setflags_ktables(&workq
);
1183 pfr_add_tables(user_addr_t tbl
, int size
, int *nadd
, int flags
)
1185 struct pfr_ktableworkq addq
, changeq
;
1186 struct pfr_ktable
*p
, *q
, *r
, key
;
1187 int i
, rv
, xadd
= 0;
1188 u_int64_t tzero
= pf_calendar_time_second();
1190 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1192 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
);
1194 SLIST_INIT(&changeq
);
1195 for (i
= 0; i
< size
; i
++, tbl
+= sizeof (key
.pfrkt_t
)) {
1196 if (COPYIN(tbl
, &key
.pfrkt_t
, sizeof (key
.pfrkt_t
), flags
))
1198 pfr_table_copyin_cleanup(&key
.pfrkt_t
);
1199 if (pfr_validate_table(&key
.pfrkt_t
, PFR_TFLAG_USRMASK
,
1200 flags
& PFR_FLAG_USERIOCTL
))
1202 key
.pfrkt_flags
|= PFR_TFLAG_ACTIVE
;
1203 p
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1205 p
= pfr_create_ktable(&key
.pfrkt_t
, tzero
, 1);
1208 SLIST_FOREACH(q
, &addq
, pfrkt_workq
) {
1209 if (!pfr_ktable_compare(p
, q
))
1212 SLIST_INSERT_HEAD(&addq
, p
, pfrkt_workq
);
1214 if (!key
.pfrkt_anchor
[0])
1217 /* find or create root table */
1218 bzero(key
.pfrkt_anchor
, sizeof (key
.pfrkt_anchor
));
1219 r
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1224 SLIST_FOREACH(q
, &addq
, pfrkt_workq
) {
1225 if (!pfr_ktable_compare(&key
, q
)) {
1230 key
.pfrkt_flags
= 0;
1231 r
= pfr_create_ktable(&key
.pfrkt_t
, 0, 1);
1234 SLIST_INSERT_HEAD(&addq
, r
, pfrkt_workq
);
1236 } else if (!(p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
1237 SLIST_FOREACH(q
, &changeq
, pfrkt_workq
)
1238 if (!pfr_ktable_compare(&key
, q
))
1240 p
->pfrkt_nflags
= (p
->pfrkt_flags
&
1241 ~PFR_TFLAG_USRMASK
) | key
.pfrkt_flags
;
1242 SLIST_INSERT_HEAD(&changeq
, p
, pfrkt_workq
);
1248 if (!(flags
& PFR_FLAG_DUMMY
)) {
1249 pfr_insert_ktables(&addq
);
1250 pfr_setflags_ktables(&changeq
);
1252 pfr_destroy_ktables(&addq
, 0);
1257 pfr_destroy_ktables(&addq
, 0);
1262 pfr_del_tables(user_addr_t tbl
, int size
, int *ndel
, int flags
)
1264 struct pfr_ktableworkq workq
;
1265 struct pfr_ktable
*p
, *q
, key
;
1268 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1270 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
);
1272 for (i
= 0; i
< size
; i
++, tbl
+= sizeof (key
.pfrkt_t
)) {
1273 if (COPYIN(tbl
, &key
.pfrkt_t
, sizeof (key
.pfrkt_t
), flags
))
1275 pfr_table_copyin_cleanup(&key
.pfrkt_t
);
1276 if (pfr_validate_table(&key
.pfrkt_t
, 0,
1277 flags
& PFR_FLAG_USERIOCTL
))
1279 p
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1280 if (p
!= NULL
&& (p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
1281 SLIST_FOREACH(q
, &workq
, pfrkt_workq
)
1282 if (!pfr_ktable_compare(p
, q
))
1284 p
->pfrkt_nflags
= p
->pfrkt_flags
& ~PFR_TFLAG_ACTIVE
;
1285 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1292 if (!(flags
& PFR_FLAG_DUMMY
)) {
1293 pfr_setflags_ktables(&workq
);
1301 pfr_get_tables(struct pfr_table
*filter
, user_addr_t tbl
, int *size
,
1304 struct pfr_ktable
*p
;
1307 ACCEPT_FLAGS(flags
, PFR_FLAG_ALLRSETS
);
1308 if (pfr_fix_anchor(filter
->pfrt_anchor
))
1310 n
= nn
= pfr_table_count(filter
, flags
);
1317 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1318 if (pfr_skip_table(filter
, p
, flags
))
1322 if (COPYOUT(&p
->pfrkt_t
, tbl
, sizeof (p
->pfrkt_t
), flags
))
1324 tbl
+= sizeof (p
->pfrkt_t
);
1327 printf("pfr_get_tables: corruption detected (%d).\n", n
);
1335 pfr_get_tstats(struct pfr_table
*filter
, user_addr_t tbl
, int *size
,
1338 struct pfr_ktable
*p
;
1339 struct pfr_ktableworkq workq
;
1341 u_int64_t tzero
= pf_calendar_time_second();
1343 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1345 /* XXX PFR_FLAG_CLSTATS disabled */
1346 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_ALLRSETS
);
1347 if (pfr_fix_anchor(filter
->pfrt_anchor
))
1349 n
= nn
= pfr_table_count(filter
, flags
);
1357 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1358 if (pfr_skip_table(filter
, p
, flags
))
1362 if (COPYOUT(&p
->pfrkt_ts
, tbl
, sizeof (p
->pfrkt_ts
), flags
)) {
1365 tbl
+= sizeof (p
->pfrkt_ts
);
1366 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1368 if (flags
& PFR_FLAG_CLSTATS
)
1369 pfr_clstats_ktables(&workq
, tzero
,
1370 flags
& PFR_FLAG_ADDRSTOO
);
1372 printf("pfr_get_tstats: corruption detected (%d).\n", n
);
1380 pfr_clr_tstats(user_addr_t tbl
, int size
, int *nzero
, int flags
)
1382 struct pfr_ktableworkq workq
;
1383 struct pfr_ktable
*p
, key
;
1385 u_int64_t tzero
= pf_calendar_time_second();
1387 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1389 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
|
1392 for (i
= 0; i
< size
; i
++, tbl
+= sizeof (key
.pfrkt_t
)) {
1393 if (COPYIN(tbl
, &key
.pfrkt_t
, sizeof (key
.pfrkt_t
), flags
))
1395 pfr_table_copyin_cleanup(&key
.pfrkt_t
);
1396 if (pfr_validate_table(&key
.pfrkt_t
, 0, 0))
1398 p
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1400 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1404 if (!(flags
& PFR_FLAG_DUMMY
)) {
1405 pfr_clstats_ktables(&workq
, tzero
, flags
& PFR_FLAG_ADDRSTOO
);
1413 pfr_set_tflags(user_addr_t tbl
, int size
, int setflag
, int clrflag
,
1414 int *nchange
, int *ndel
, int flags
)
1416 struct pfr_ktableworkq workq
;
1417 struct pfr_ktable
*p
, *q
, key
;
1418 int i
, xchange
= 0, xdel
= 0;
1420 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1422 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
);
1423 if ((setflag
& ~PFR_TFLAG_USRMASK
) ||
1424 (clrflag
& ~PFR_TFLAG_USRMASK
) ||
1425 (setflag
& clrflag
))
1428 for (i
= 0; i
< size
; i
++, tbl
+= sizeof (key
.pfrkt_t
)) {
1429 if (COPYIN(tbl
, &key
.pfrkt_t
, sizeof (key
.pfrkt_t
), flags
))
1431 pfr_table_copyin_cleanup(&key
.pfrkt_t
);
1432 if (pfr_validate_table(&key
.pfrkt_t
, 0,
1433 flags
& PFR_FLAG_USERIOCTL
))
1435 p
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1436 if (p
!= NULL
&& (p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)) {
1437 p
->pfrkt_nflags
= (p
->pfrkt_flags
| setflag
) &
1439 if (p
->pfrkt_nflags
== p
->pfrkt_flags
)
1441 SLIST_FOREACH(q
, &workq
, pfrkt_workq
)
1442 if (!pfr_ktable_compare(p
, q
))
1444 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1445 if ((p
->pfrkt_flags
& PFR_TFLAG_PERSIST
) &&
1446 (clrflag
& PFR_TFLAG_PERSIST
) &&
1447 !(p
->pfrkt_flags
& PFR_TFLAG_REFERENCED
))
1455 if (!(flags
& PFR_FLAG_DUMMY
)) {
1456 pfr_setflags_ktables(&workq
);
1458 if (nchange
!= NULL
)
1466 pfr_ina_begin(struct pfr_table
*trs
, u_int32_t
*ticket
, int *ndel
, int flags
)
1468 struct pfr_ktableworkq workq
;
1469 struct pfr_ktable
*p
;
1470 struct pf_ruleset
*rs
;
1473 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1475 ACCEPT_FLAGS(flags
, PFR_FLAG_DUMMY
);
1476 rs
= pf_find_or_create_ruleset(trs
->pfrt_anchor
);
1480 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1481 if (!(p
->pfrkt_flags
& PFR_TFLAG_INACTIVE
) ||
1482 pfr_skip_table(trs
, p
, 0))
1484 p
->pfrkt_nflags
= p
->pfrkt_flags
& ~PFR_TFLAG_INACTIVE
;
1485 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1488 if (!(flags
& PFR_FLAG_DUMMY
)) {
1489 pfr_setflags_ktables(&workq
);
1491 *ticket
= ++rs
->tticket
;
1494 pf_remove_if_empty_ruleset(rs
);
1501 pfr_ina_define(struct pfr_table
*tbl
, user_addr_t addr
, int size
,
1502 int *nadd
, int *naddr
, u_int32_t ticket
, int flags
)
1504 struct pfr_ktableworkq tableq
;
1505 struct pfr_kentryworkq addrq
;
1506 struct pfr_ktable
*kt
, *rt
, *shadow
, key
;
1507 struct pfr_kentry
*p
;
1509 struct pf_ruleset
*rs
;
1510 int i
, rv
, xadd
= 0, xaddr
= 0;
1512 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1514 ACCEPT_FLAGS(flags
, PFR_FLAG_DUMMY
| PFR_FLAG_ADDRSTOO
);
1515 if (size
&& !(flags
& PFR_FLAG_ADDRSTOO
))
1517 if (pfr_validate_table(tbl
, PFR_TFLAG_USRMASK
,
1518 flags
& PFR_FLAG_USERIOCTL
))
1520 rs
= pf_find_ruleset(tbl
->pfrt_anchor
);
1521 if (rs
== NULL
|| !rs
->topen
|| ticket
!= rs
->tticket
)
1523 tbl
->pfrt_flags
|= PFR_TFLAG_INACTIVE
;
1524 SLIST_INIT(&tableq
);
1525 kt
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, (struct pfr_ktable
*)(void *)tbl
);
1527 kt
= pfr_create_ktable(tbl
, 0, 1);
1530 SLIST_INSERT_HEAD(&tableq
, kt
, pfrkt_workq
);
1532 if (!tbl
->pfrt_anchor
[0])
1535 /* find or create root table */
1536 bzero(&key
, sizeof (key
));
1537 strlcpy(key
.pfrkt_name
, tbl
->pfrt_name
,
1538 sizeof (key
.pfrkt_name
));
1539 rt
= RB_FIND(pfr_ktablehead
, &pfr_ktables
, &key
);
1541 kt
->pfrkt_root
= rt
;
1544 rt
= pfr_create_ktable(&key
.pfrkt_t
, 0, 1);
1546 pfr_destroy_ktables(&tableq
, 0);
1549 SLIST_INSERT_HEAD(&tableq
, rt
, pfrkt_workq
);
1550 kt
->pfrkt_root
= rt
;
1551 } else if (!(kt
->pfrkt_flags
& PFR_TFLAG_INACTIVE
))
1554 shadow
= pfr_create_ktable(tbl
, 0, 0);
1555 if (shadow
== NULL
) {
1556 pfr_destroy_ktables(&tableq
, 0);
1560 for (i
= 0; i
< size
; i
++, addr
+= sizeof (ad
)) {
1561 if (COPYIN(addr
, &ad
, sizeof (ad
), flags
))
1563 if (pfr_validate_addr(&ad
))
1565 if (pfr_lookup_addr(shadow
, &ad
, 1) != NULL
)
1567 p
= pfr_create_kentry(&ad
, 0);
1570 if (pfr_route_kentry(shadow
, p
)) {
1571 pfr_destroy_kentry(p
);
1574 SLIST_INSERT_HEAD(&addrq
, p
, pfrke_workq
);
1577 if (!(flags
& PFR_FLAG_DUMMY
)) {
1578 if (kt
->pfrkt_shadow
!= NULL
)
1579 pfr_destroy_ktable(kt
->pfrkt_shadow
, 1);
1580 kt
->pfrkt_flags
|= PFR_TFLAG_INACTIVE
;
1581 pfr_insert_ktables(&tableq
);
1582 shadow
->pfrkt_cnt
= (flags
& PFR_FLAG_ADDRSTOO
) ?
1583 xaddr
: NO_ADDRESSES
;
1584 kt
->pfrkt_shadow
= shadow
;
1586 pfr_clean_node_mask(shadow
, &addrq
);
1587 pfr_destroy_ktable(shadow
, 0);
1588 pfr_destroy_ktables(&tableq
, 0);
1589 pfr_destroy_kentries(&addrq
);
1597 pfr_destroy_ktable(shadow
, 0);
1598 pfr_destroy_ktables(&tableq
, 0);
1599 pfr_destroy_kentries(&addrq
);
1604 pfr_ina_rollback(struct pfr_table
*trs
, u_int32_t ticket
, int *ndel
, int flags
)
1606 struct pfr_ktableworkq workq
;
1607 struct pfr_ktable
*p
;
1608 struct pf_ruleset
*rs
;
1611 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1613 ACCEPT_FLAGS(flags
, PFR_FLAG_DUMMY
);
1614 rs
= pf_find_ruleset(trs
->pfrt_anchor
);
1615 if (rs
== NULL
|| !rs
->topen
|| ticket
!= rs
->tticket
)
1618 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1619 if (!(p
->pfrkt_flags
& PFR_TFLAG_INACTIVE
) ||
1620 pfr_skip_table(trs
, p
, 0))
1622 p
->pfrkt_nflags
= p
->pfrkt_flags
& ~PFR_TFLAG_INACTIVE
;
1623 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1626 if (!(flags
& PFR_FLAG_DUMMY
)) {
1627 pfr_setflags_ktables(&workq
);
1629 pf_remove_if_empty_ruleset(rs
);
1637 pfr_ina_commit(struct pfr_table
*trs
, u_int32_t ticket
, int *nadd
,
1638 int *nchange
, int flags
)
1640 struct pfr_ktable
*p
, *q
;
1641 struct pfr_ktableworkq workq
;
1642 struct pf_ruleset
*rs
;
1643 int xadd
= 0, xchange
= 0;
1644 u_int64_t tzero
= pf_calendar_time_second();
1646 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1648 ACCEPT_FLAGS(flags
, PFR_FLAG_ATOMIC
| PFR_FLAG_DUMMY
);
1649 rs
= pf_find_ruleset(trs
->pfrt_anchor
);
1650 if (rs
== NULL
|| !rs
->topen
|| ticket
!= rs
->tticket
)
1654 RB_FOREACH(p
, pfr_ktablehead
, &pfr_ktables
) {
1655 if (!(p
->pfrkt_flags
& PFR_TFLAG_INACTIVE
) ||
1656 pfr_skip_table(trs
, p
, 0))
1658 SLIST_INSERT_HEAD(&workq
, p
, pfrkt_workq
);
1659 if (p
->pfrkt_flags
& PFR_TFLAG_ACTIVE
)
1665 if (!(flags
& PFR_FLAG_DUMMY
)) {
1666 for (p
= SLIST_FIRST(&workq
); p
!= NULL
; p
= q
) {
1667 q
= SLIST_NEXT(p
, pfrkt_workq
);
1668 pfr_commit_ktable(p
, tzero
);
1671 pf_remove_if_empty_ruleset(rs
);
1675 if (nchange
!= NULL
)
1682 pfr_commit_ktable(struct pfr_ktable
*kt
, u_int64_t tzero
)
1684 struct pfr_ktable
*shadow
= kt
->pfrkt_shadow
;
1687 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1689 if (shadow
->pfrkt_cnt
== NO_ADDRESSES
) {
1690 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
1691 pfr_clstats_ktable(kt
, tzero
, 1);
1692 } else if (kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) {
1693 /* kt might contain addresses */
1694 struct pfr_kentryworkq addrq
, addq
, changeq
, delq
, garbageq
;
1695 struct pfr_kentry
*p
, *q
, *next
;
1698 pfr_enqueue_addrs(shadow
, &addrq
, NULL
, 0);
1701 SLIST_INIT(&changeq
);
1703 SLIST_INIT(&garbageq
);
1704 pfr_clean_node_mask(shadow
, &addrq
);
1705 for (p
= SLIST_FIRST(&addrq
); p
!= NULL
; p
= next
) {
1706 next
= SLIST_NEXT(p
, pfrke_workq
); /* XXX */
1707 pfr_copyout_addr(&ad
, p
);
1708 q
= pfr_lookup_addr(kt
, &ad
, 1);
1710 if (q
->pfrke_not
!= p
->pfrke_not
)
1711 SLIST_INSERT_HEAD(&changeq
, q
,
1714 SLIST_INSERT_HEAD(&garbageq
, p
, pfrke_workq
);
1716 p
->pfrke_tzero
= tzero
;
1717 SLIST_INSERT_HEAD(&addq
, p
, pfrke_workq
);
1720 pfr_enqueue_addrs(kt
, &delq
, NULL
, ENQUEUE_UNMARKED_ONLY
);
1721 pfr_insert_kentries(kt
, &addq
, tzero
);
1722 pfr_remove_kentries(kt
, &delq
);
1723 pfr_clstats_kentries(&changeq
, tzero
, INVERT_NEG_FLAG
);
1724 pfr_destroy_kentries(&garbageq
);
1726 /* kt cannot contain addresses */
1727 SWAP(struct radix_node_head
*, kt
->pfrkt_ip4
,
1729 SWAP(struct radix_node_head
*, kt
->pfrkt_ip6
,
1731 SWAP(int, kt
->pfrkt_cnt
, shadow
->pfrkt_cnt
);
1732 pfr_clstats_ktable(kt
, tzero
, 1);
1734 nflags
= ((shadow
->pfrkt_flags
& PFR_TFLAG_USRMASK
) |
1735 (kt
->pfrkt_flags
& PFR_TFLAG_SETMASK
) | PFR_TFLAG_ACTIVE
) &
1736 ~PFR_TFLAG_INACTIVE
;
1737 pfr_destroy_ktable(shadow
, 0);
1738 kt
->pfrkt_shadow
= NULL
;
1739 pfr_setflags_ktable(kt
, nflags
);
1743 pfr_table_copyin_cleanup(struct pfr_table
*tbl
)
1745 tbl
->pfrt_anchor
[sizeof (tbl
->pfrt_anchor
) - 1] = '\0';
1746 tbl
->pfrt_name
[sizeof (tbl
->pfrt_name
) - 1] = '\0';
1750 pfr_validate_table(struct pfr_table
*tbl
, int allowedflags
, int no_reserved
)
1754 if (!tbl
->pfrt_name
[0])
1756 if (no_reserved
&& strcmp(tbl
->pfrt_anchor
, PF_RESERVED_ANCHOR
) == 0)
1758 if (tbl
->pfrt_name
[PF_TABLE_NAME_SIZE
-1])
1760 for (i
= strlen(tbl
->pfrt_name
); i
< PF_TABLE_NAME_SIZE
; i
++)
1761 if (tbl
->pfrt_name
[i
])
1763 if (pfr_fix_anchor(tbl
->pfrt_anchor
))
1765 if (tbl
->pfrt_flags
& ~allowedflags
)
1771 * Rewrite anchors referenced by tables to remove slashes
1772 * and check for validity.
1775 pfr_fix_anchor(char *anchor
)
1777 size_t siz
= MAXPATHLEN
;
1780 if (anchor
[0] == '/') {
1786 while (*++path
== '/')
1788 bcopy(path
, anchor
, siz
- off
);
1789 memset(anchor
+ siz
- off
, 0, off
);
1791 if (anchor
[siz
- 1])
1793 for (i
= strlen(anchor
); i
< (int)siz
; i
++)
1800 pfr_table_count(struct pfr_table
*filter
, int flags
)
1802 struct pf_ruleset
*rs
;
1804 if (flags
& PFR_FLAG_ALLRSETS
)
1805 return (pfr_ktable_cnt
);
1806 if (filter
->pfrt_anchor
[0]) {
1807 rs
= pf_find_ruleset(filter
->pfrt_anchor
);
1808 return ((rs
!= NULL
) ? rs
->tables
: -1);
1810 return (pf_main_ruleset
.tables
);
1814 pfr_skip_table(struct pfr_table
*filter
, struct pfr_ktable
*kt
, int flags
)
1816 if (flags
& PFR_FLAG_ALLRSETS
)
1818 if (strcmp(filter
->pfrt_anchor
, kt
->pfrkt_anchor
))
1824 pfr_insert_ktables(struct pfr_ktableworkq
*workq
)
1826 struct pfr_ktable
*p
;
1828 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1830 SLIST_FOREACH(p
, workq
, pfrkt_workq
)
1831 pfr_insert_ktable(p
);
1835 pfr_insert_ktable(struct pfr_ktable
*kt
)
1837 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1839 RB_INSERT(pfr_ktablehead
, &pfr_ktables
, kt
);
1841 if (kt
->pfrkt_root
!= NULL
)
1842 if (!kt
->pfrkt_root
->pfrkt_refcnt
[PFR_REFCNT_ANCHOR
]++)
1843 pfr_setflags_ktable(kt
->pfrkt_root
,
1844 kt
->pfrkt_root
->pfrkt_flags
|PFR_TFLAG_REFDANCHOR
);
1848 pfr_setflags_ktables(struct pfr_ktableworkq
*workq
)
1850 struct pfr_ktable
*p
, *q
;
1852 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1854 for (p
= SLIST_FIRST(workq
); p
; p
= q
) {
1855 q
= SLIST_NEXT(p
, pfrkt_workq
);
1856 pfr_setflags_ktable(p
, p
->pfrkt_nflags
);
1861 pfr_setflags_ktable(struct pfr_ktable
*kt
, int newf
)
1863 struct pfr_kentryworkq addrq
;
1865 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1867 if (!(newf
& PFR_TFLAG_REFERENCED
) &&
1868 !(newf
& PFR_TFLAG_PERSIST
))
1869 newf
&= ~PFR_TFLAG_ACTIVE
;
1870 if (!(newf
& PFR_TFLAG_ACTIVE
))
1871 newf
&= ~PFR_TFLAG_USRMASK
;
1872 if (!(newf
& PFR_TFLAG_SETMASK
)) {
1873 RB_REMOVE(pfr_ktablehead
, &pfr_ktables
, kt
);
1874 if (kt
->pfrkt_root
!= NULL
)
1875 if (!--kt
->pfrkt_root
->pfrkt_refcnt
[PFR_REFCNT_ANCHOR
])
1876 pfr_setflags_ktable(kt
->pfrkt_root
,
1877 kt
->pfrkt_root
->pfrkt_flags
&
1878 ~PFR_TFLAG_REFDANCHOR
);
1879 pfr_destroy_ktable(kt
, 1);
1883 if (!(newf
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_cnt
) {
1884 pfr_enqueue_addrs(kt
, &addrq
, NULL
, 0);
1885 pfr_remove_kentries(kt
, &addrq
);
1887 if (!(newf
& PFR_TFLAG_INACTIVE
) && kt
->pfrkt_shadow
!= NULL
) {
1888 pfr_destroy_ktable(kt
->pfrkt_shadow
, 1);
1889 kt
->pfrkt_shadow
= NULL
;
1891 kt
->pfrkt_flags
= newf
;
1895 pfr_clstats_ktables(struct pfr_ktableworkq
*workq
, u_int64_t tzero
, int recurse
)
1897 struct pfr_ktable
*p
;
1899 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1901 SLIST_FOREACH(p
, workq
, pfrkt_workq
)
1902 pfr_clstats_ktable(p
, tzero
, recurse
);
1906 pfr_clstats_ktable(struct pfr_ktable
*kt
, u_int64_t tzero
, int recurse
)
1908 struct pfr_kentryworkq addrq
;
1910 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1913 pfr_enqueue_addrs(kt
, &addrq
, NULL
, 0);
1914 pfr_clstats_kentries(&addrq
, tzero
, 0);
1916 bzero(kt
->pfrkt_packets
, sizeof (kt
->pfrkt_packets
));
1917 bzero(kt
->pfrkt_bytes
, sizeof (kt
->pfrkt_bytes
));
1918 kt
->pfrkt_match
= kt
->pfrkt_nomatch
= 0;
1919 kt
->pfrkt_tzero
= tzero
;
1922 static struct pfr_ktable
*
1923 pfr_create_ktable(struct pfr_table
*tbl
, u_int64_t tzero
, int attachruleset
)
1925 struct pfr_ktable
*kt
;
1926 struct pf_ruleset
*rs
;
1928 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1930 kt
= pool_get(&pfr_ktable_pl
, PR_WAITOK
);
1933 bzero(kt
, sizeof (*kt
));
1936 if (attachruleset
) {
1937 rs
= pf_find_or_create_ruleset(tbl
->pfrt_anchor
);
1939 pfr_destroy_ktable(kt
, 0);
1946 if (!rn_inithead((void **)&kt
->pfrkt_ip4
,
1947 offsetof(struct sockaddr_in
, sin_addr
) * 8) ||
1948 !rn_inithead((void **)&kt
->pfrkt_ip6
,
1949 offsetof(struct sockaddr_in6
, sin6_addr
) * 8)) {
1950 pfr_destroy_ktable(kt
, 0);
1953 kt
->pfrkt_tzero
= tzero
;
1959 pfr_destroy_ktables(struct pfr_ktableworkq
*workq
, int flushaddr
)
1961 struct pfr_ktable
*p
, *q
;
1963 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1965 for (p
= SLIST_FIRST(workq
); p
; p
= q
) {
1966 q
= SLIST_NEXT(p
, pfrkt_workq
);
1967 pfr_destroy_ktable(p
, flushaddr
);
1972 pfr_destroy_ktable(struct pfr_ktable
*kt
, int flushaddr
)
1974 struct pfr_kentryworkq addrq
;
1976 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1979 pfr_enqueue_addrs(kt
, &addrq
, NULL
, 0);
1980 pfr_clean_node_mask(kt
, &addrq
);
1981 pfr_destroy_kentries(&addrq
);
1983 if (kt
->pfrkt_ip4
!= NULL
)
1984 _FREE((caddr_t
)kt
->pfrkt_ip4
, M_RTABLE
);
1985 if (kt
->pfrkt_ip6
!= NULL
)
1986 _FREE((caddr_t
)kt
->pfrkt_ip6
, M_RTABLE
);
1987 if (kt
->pfrkt_shadow
!= NULL
)
1988 pfr_destroy_ktable(kt
->pfrkt_shadow
, flushaddr
);
1989 if (kt
->pfrkt_rs
!= NULL
) {
1990 kt
->pfrkt_rs
->tables
--;
1991 pf_remove_if_empty_ruleset(kt
->pfrkt_rs
);
1993 pool_put(&pfr_ktable_pl
, kt
);
1997 pfr_ktable_compare(struct pfr_ktable
*p
, struct pfr_ktable
*q
)
2001 if ((d
= strncmp(p
->pfrkt_name
, q
->pfrkt_name
, PF_TABLE_NAME_SIZE
)))
2003 return (strcmp(p
->pfrkt_anchor
, q
->pfrkt_anchor
));
2006 static struct pfr_ktable
*
2007 pfr_lookup_table(struct pfr_table
*tbl
)
2009 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2011 /* struct pfr_ktable start like a struct pfr_table */
2012 return (RB_FIND(pfr_ktablehead
, &pfr_ktables
,
2013 (struct pfr_ktable
*)(void *)tbl
));
2017 pfr_match_addr(struct pfr_ktable
*kt
, struct pf_addr
*a
, sa_family_t af
)
2019 struct pfr_kentry
*ke
= NULL
;
2022 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2024 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_root
!= NULL
)
2025 kt
= kt
->pfrkt_root
;
2026 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
2032 pfr_sin
.sin_addr
.s_addr
= a
->addr32
[0];
2033 ke
= (struct pfr_kentry
*)rn_match(&pfr_sin
, kt
->pfrkt_ip4
);
2034 if (ke
&& KENTRY_RNF_ROOT(ke
))
2040 bcopy(a
, &pfr_sin6
.sin6_addr
, sizeof (pfr_sin6
.sin6_addr
));
2041 ke
= (struct pfr_kentry
*)rn_match(&pfr_sin6
, kt
->pfrkt_ip6
);
2042 if (ke
&& KENTRY_RNF_ROOT(ke
))
2047 match
= (ke
&& !ke
->pfrke_not
);
2051 kt
->pfrkt_nomatch
++;
2056 pfr_update_stats(struct pfr_ktable
*kt
, struct pf_addr
*a
, sa_family_t af
,
2057 u_int64_t len
, int dir_out
, int op_pass
, int notrule
)
2059 struct pfr_kentry
*ke
= NULL
;
2061 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2063 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_root
!= NULL
)
2064 kt
= kt
->pfrkt_root
;
2065 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
2071 pfr_sin
.sin_addr
.s_addr
= a
->addr32
[0];
2072 ke
= (struct pfr_kentry
*)rn_match(&pfr_sin
, kt
->pfrkt_ip4
);
2073 if (ke
&& KENTRY_RNF_ROOT(ke
))
2079 bcopy(a
, &pfr_sin6
.sin6_addr
, sizeof (pfr_sin6
.sin6_addr
));
2080 ke
= (struct pfr_kentry
*)rn_match(&pfr_sin6
, kt
->pfrkt_ip6
);
2081 if (ke
&& KENTRY_RNF_ROOT(ke
))
2088 if ((ke
== NULL
|| ke
->pfrke_not
) != notrule
) {
2089 if (op_pass
!= PFR_OP_PASS
)
2090 printf("pfr_update_stats: assertion failed.\n");
2091 op_pass
= PFR_OP_XPASS
;
2093 kt
->pfrkt_packets
[dir_out
][op_pass
]++;
2094 kt
->pfrkt_bytes
[dir_out
][op_pass
] += len
;
2095 if (ke
!= NULL
&& op_pass
!= PFR_OP_XPASS
) {
2096 ke
->pfrke_packets
[dir_out
][op_pass
]++;
2097 ke
->pfrke_bytes
[dir_out
][op_pass
] += len
;
2102 pfr_attach_table(struct pf_ruleset
*rs
, char *name
)
2104 struct pfr_ktable
*kt
, *rt
;
2105 struct pfr_table tbl
;
2106 struct pf_anchor
*ac
= rs
->anchor
;
2108 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2110 bzero(&tbl
, sizeof (tbl
));
2111 strlcpy(tbl
.pfrt_name
, name
, sizeof (tbl
.pfrt_name
));
2113 strlcpy(tbl
.pfrt_anchor
, ac
->path
, sizeof (tbl
.pfrt_anchor
));
2114 kt
= pfr_lookup_table(&tbl
);
2116 kt
= pfr_create_ktable(&tbl
, pf_calendar_time_second(), 1);
2120 bzero(tbl
.pfrt_anchor
, sizeof (tbl
.pfrt_anchor
));
2121 rt
= pfr_lookup_table(&tbl
);
2123 rt
= pfr_create_ktable(&tbl
, 0, 1);
2125 pfr_destroy_ktable(kt
, 0);
2128 pfr_insert_ktable(rt
);
2130 kt
->pfrkt_root
= rt
;
2132 pfr_insert_ktable(kt
);
2134 if (!kt
->pfrkt_refcnt
[PFR_REFCNT_RULE
]++)
2135 pfr_setflags_ktable(kt
, kt
->pfrkt_flags
|PFR_TFLAG_REFERENCED
);
2140 pfr_detach_table(struct pfr_ktable
*kt
)
2142 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2144 if (kt
->pfrkt_refcnt
[PFR_REFCNT_RULE
] <= 0)
2145 printf("pfr_detach_table: refcount = %d.\n",
2146 kt
->pfrkt_refcnt
[PFR_REFCNT_RULE
]);
2147 else if (!--kt
->pfrkt_refcnt
[PFR_REFCNT_RULE
])
2148 pfr_setflags_ktable(kt
, kt
->pfrkt_flags
&~PFR_TFLAG_REFERENCED
);
2152 pfr_pool_get(struct pfr_ktable
*kt
, int *pidx
, struct pf_addr
*counter
,
2153 struct pf_addr
**raddr
, struct pf_addr
**rmask
, sa_family_t af
)
2155 struct pfr_kentry
*ke
, *ke2
;
2156 struct pf_addr
*addr
;
2157 union sockaddr_union mask
;
2158 int idx
= -1, use_counter
= 0;
2160 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2163 addr
= (struct pf_addr
*)&pfr_sin
.sin_addr
;
2164 else if (af
== AF_INET6
)
2165 addr
= (struct pf_addr
*)&pfr_sin6
.sin6_addr
;
2169 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_root
!= NULL
)
2170 kt
= kt
->pfrkt_root
;
2171 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
))
2176 if (counter
!= NULL
&& idx
>= 0)
2182 ke
= pfr_kentry_byidx(kt
, idx
, af
);
2184 kt
->pfrkt_nomatch
++;
2187 pfr_prepare_network(&pfr_mask
, af
, ke
->pfrke_net
);
2188 *raddr
= SUNION2PF(&ke
->pfrke_sa
, af
);
2189 *rmask
= SUNION2PF(&pfr_mask
, af
);
2192 /* is supplied address within block? */
2193 if (!PF_MATCHA(0, *raddr
, *rmask
, counter
, af
)) {
2194 /* no, go to next block in table */
2199 PF_ACPY(addr
, counter
, af
);
2201 /* use first address of block */
2202 PF_ACPY(addr
, *raddr
, af
);
2205 if (!KENTRY_NETWORK(ke
)) {
2206 /* this is a single IP address - no possible nested block */
2207 PF_ACPY(counter
, addr
, af
);
2213 /* we don't want to use a nested block */
2215 ke2
= (struct pfr_kentry
*)rn_match(&pfr_sin
,
2217 else if (af
== AF_INET6
)
2218 ke2
= (struct pfr_kentry
*)rn_match(&pfr_sin6
,
2221 return (-1); /* never happens */
2222 /* no need to check KENTRY_RNF_ROOT() here */
2224 /* lookup return the same block - perfect */
2225 PF_ACPY(counter
, addr
, af
);
2231 /* we need to increase the counter past the nested block */
2232 pfr_prepare_network(&mask
, AF_INET
, ke2
->pfrke_net
);
2233 PF_POOLMASK(addr
, addr
, SUNION2PF(&mask
, af
), &pfr_ffaddr
, af
);
2235 if (!PF_MATCHA(0, *raddr
, *rmask
, addr
, af
)) {
2236 /* ok, we reached the end of our main block */
2237 /* go to next block in table */
2245 static struct pfr_kentry
*
2246 pfr_kentry_byidx(struct pfr_ktable
*kt
, int idx
, int af
)
2248 struct pfr_walktree w
;
2250 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2252 bzero(&w
, sizeof (w
));
2253 w
.pfrw_op
= PFRW_POOL_GET
;
2259 (void) kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
,
2261 return (w
.pfrw_kentry
);
2265 (void) kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
,
2267 return (w
.pfrw_kentry
);
2275 pfr_dynaddr_update(struct pfr_ktable
*kt
, struct pfi_dynaddr
*dyn
)
2277 struct pfr_walktree w
;
2279 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
2281 bzero(&w
, sizeof (w
));
2282 w
.pfrw_op
= PFRW_DYNADDR_UPDATE
;
2285 dyn
->pfid_acnt4
= 0;
2286 dyn
->pfid_acnt6
= 0;
2287 if (!dyn
->pfid_af
|| dyn
->pfid_af
== AF_INET
)
2288 (void) kt
->pfrkt_ip4
->rnh_walktree(kt
->pfrkt_ip4
,
2290 if (!dyn
->pfid_af
|| dyn
->pfid_af
== AF_INET6
)
2291 (void) kt
->pfrkt_ip6
->rnh_walktree(kt
->pfrkt_ip6
,