]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/pf_table.c
c66741be2f74ee2588720148b141eb3b2c0a0980
[apple/xnu.git] / bsd / net / pf_table.c
1 /*
2 * Copyright (c) 2007-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $apfw: pf_table.c,v 1.4 2008/08/27 00:01:32 jhw Exp $ */
30 /* $OpenBSD: pf_table.c,v 1.68 2006/05/02 10:08:45 dhartmei Exp $ */
31
32 /*
33 * Copyright (c) 2002 Cedric Berger
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * - Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * - Redistributions in binary form must reproduce the above
43 * copyright notice, this list of conditions and the following
44 * disclaimer in the documentation and/or other materials provided
45 * with the distribution.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
48 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
49 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
50 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
51 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
52 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
53 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
54 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
55 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
57 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
58 * POSSIBILITY OF SUCH DAMAGE.
59 *
60 */
61
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/socket.h>
65 #include <sys/mbuf.h>
66 #include <sys/kernel.h>
67 #include <sys/malloc.h>
68
69 #include <net/if.h>
70 #include <net/route.h>
71 #include <netinet/in.h>
72 #include <net/radix.h>
73 #include <net/pfvar.h>
74
75 #define ACCEPT_FLAGS(flags, oklist) \
76 do { \
77 if ((flags & ~(oklist)) & \
78 PFR_FLAG_ALLMASK) \
79 return (EINVAL); \
80 } while (0)
81
82 #define COPYIN(from, to, size, flags) \
83 ((flags & PFR_FLAG_USERIOCTL) ? \
84 copyin((from), (to), (size)) : \
85 (bcopy((void *)(uintptr_t)(from), (to), (size)), 0))
86
87 #define COPYOUT(from, to, size, flags) \
88 ((flags & PFR_FLAG_USERIOCTL) ? \
89 copyout((from), (to), (size)) : \
90 (bcopy((from), (void *)(uintptr_t)(to), (size)), 0))
91
92 #define FILLIN_SIN(sin, addr) \
93 do { \
94 (sin).sin_len = sizeof (sin); \
95 (sin).sin_family = AF_INET; \
96 (sin).sin_addr = (addr); \
97 } while (0)
98
99 #define FILLIN_SIN6(sin6, addr) \
100 do { \
101 (sin6).sin6_len = sizeof (sin6); \
102 (sin6).sin6_family = AF_INET6; \
103 (sin6).sin6_addr = (addr); \
104 } while (0)
105
106 #define SWAP(type, a1, a2) \
107 do { \
108 type tmp = a1; \
109 a1 = a2; \
110 a2 = tmp; \
111 } while (0)
112
113 #define SUNION2PF(su, af) (((af) == AF_INET) ? \
114 (struct pf_addr *)&(su)->sin.sin_addr : \
115 (struct pf_addr *)&(su)->sin6.sin6_addr)
116
117 #define AF_BITS(af) (((af) == AF_INET) ? 32 : 128)
118 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
119 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
120 #define KENTRY_RNF_ROOT(ke) \
121 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
122
123 #define NO_ADDRESSES (-1)
124 #define ENQUEUE_UNMARKED_ONLY (1)
125 #define INVERT_NEG_FLAG (1)
126
127 struct pfr_walktree {
128 enum pfrw_op {
129 PFRW_MARK,
130 PFRW_SWEEP,
131 PFRW_ENQUEUE,
132 PFRW_GET_ADDRS,
133 PFRW_GET_ASTATS,
134 PFRW_POOL_GET,
135 PFRW_DYNADDR_UPDATE
136 } pfrw_op;
137 union {
138 user_addr_t pfrw1_addr;
139 user_addr_t pfrw1_astats;
140 struct pfr_kentryworkq *pfrw1_workq;
141 struct pfr_kentry *pfrw1_kentry;
142 struct pfi_dynaddr *pfrw1_dyn;
143 } pfrw_1;
144 int pfrw_free;
145 int pfrw_flags;
146 };
147 #define pfrw_addr pfrw_1.pfrw1_addr
148 #define pfrw_astats pfrw_1.pfrw1_astats
149 #define pfrw_workq pfrw_1.pfrw1_workq
150 #define pfrw_kentry pfrw_1.pfrw1_kentry
151 #define pfrw_dyn pfrw_1.pfrw1_dyn
152 #define pfrw_cnt pfrw_free
153
154 #define senderr(e) do { rv = (e); goto _bad; } while (0)
155
156 struct pool pfr_ktable_pl;
157 struct pool pfr_kentry_pl;
158
159 static struct pool pfr_kentry_pl2;
160 static struct sockaddr_in pfr_sin;
161 static struct sockaddr_in6 pfr_sin6;
162 static union sockaddr_union pfr_mask;
163 static struct pf_addr pfr_ffaddr;
164
165 static void pfr_copyout_addr(struct pfr_addr *, struct pfr_kentry *ke);
166 static int pfr_validate_addr(struct pfr_addr *);
167 static void pfr_enqueue_addrs(struct pfr_ktable *, struct pfr_kentryworkq *,
168 int *, int);
169 static void pfr_mark_addrs(struct pfr_ktable *);
170 static struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
171 struct pfr_addr *, int);
172 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, int);
173 static void pfr_destroy_kentries(struct pfr_kentryworkq *);
174 static void pfr_destroy_kentry(struct pfr_kentry *);
175 static void pfr_insert_kentries(struct pfr_ktable *,
176 struct pfr_kentryworkq *, u_int64_t);
177 static void pfr_remove_kentries(struct pfr_ktable *, struct pfr_kentryworkq *);
178 static void pfr_clstats_kentries(struct pfr_kentryworkq *, u_int64_t, int);
179 static void pfr_reset_feedback(user_addr_t, int, int);
180 static void pfr_prepare_network(union sockaddr_union *, int, int);
181 static int pfr_route_kentry(struct pfr_ktable *, struct pfr_kentry *);
182 static int pfr_unroute_kentry(struct pfr_ktable *, struct pfr_kentry *);
183 static int pfr_walktree(struct radix_node *, void *);
184 static int pfr_validate_table(struct pfr_table *, int, int);
185 static int pfr_fix_anchor(char *);
186 static void pfr_commit_ktable(struct pfr_ktable *, u_int64_t);
187 static void pfr_insert_ktables(struct pfr_ktableworkq *);
188 static void pfr_insert_ktable(struct pfr_ktable *);
189 static void pfr_setflags_ktables(struct pfr_ktableworkq *);
190 static void pfr_setflags_ktable(struct pfr_ktable *, int);
191 static void pfr_clstats_ktables(struct pfr_ktableworkq *, u_int64_t, int);
192 static void pfr_clstats_ktable(struct pfr_ktable *, u_int64_t, int);
193 static struct pfr_ktable *pfr_create_ktable(struct pfr_table *, u_int64_t, int);
194 static void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
195 static void pfr_destroy_ktable(struct pfr_ktable *, int);
196 static int pfr_ktable_compare(struct pfr_ktable *, struct pfr_ktable *);
197 static struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
198 static void pfr_clean_node_mask(struct pfr_ktable *, struct pfr_kentryworkq *);
199 static int pfr_table_count(struct pfr_table *, int);
200 static int pfr_skip_table(struct pfr_table *, struct pfr_ktable *, int);
201 static struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
202
203 RB_PROTOTYPE_SC(static, pfr_ktablehead, pfr_ktable, pfrkt_tree,
204 pfr_ktable_compare);
205 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
206
207 static struct pfr_ktablehead pfr_ktables;
208 static struct pfr_table pfr_nulltable;
209 static int pfr_ktable_cnt;
210
211 void
212 pfr_initialize(void)
213 {
214 pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
215 "pfrktable", NULL);
216 pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
217 "pfrkentry", NULL);
218 pool_init(&pfr_kentry_pl2, sizeof(struct pfr_kentry), 0, 0, 0,
219 "pfrkentry2", NULL);
220
221 pfr_sin.sin_len = sizeof(pfr_sin);
222 pfr_sin.sin_family = AF_INET;
223 pfr_sin6.sin6_len = sizeof(pfr_sin6);
224 pfr_sin6.sin6_family = AF_INET6;
225
226 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
227 }
228
229 #if 0
230 void
231 pfr_destroy(void)
232 {
233 pool_destroy(&pfr_ktable_pl);
234 pool_destroy(&pfr_kentry_pl);
235 pool_destroy(&pfr_kentry_pl2);
236 }
237 #endif
238
239 int
240 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
241 {
242 struct pfr_ktable *kt;
243 struct pfr_kentryworkq workq;
244
245 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
246 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) {
247 return EINVAL;
248 }
249 kt = pfr_lookup_table(tbl);
250 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
251 return ESRCH;
252 }
253 if (kt->pfrkt_flags & PFR_TFLAG_CONST) {
254 return EPERM;
255 }
256 pfr_enqueue_addrs(kt, &workq, ndel, 0);
257
258 if (!(flags & PFR_FLAG_DUMMY)) {
259 pfr_remove_kentries(kt, &workq);
260 if (kt->pfrkt_cnt) {
261 printf("pfr_clr_addrs: corruption detected (%d).\n",
262 kt->pfrkt_cnt);
263 kt->pfrkt_cnt = 0;
264 }
265 }
266 return 0;
267 }
268
269 int
270 pfr_add_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
271 int *nadd, int flags)
272 {
273 struct pfr_ktable *kt, *tmpkt;
274 struct pfr_kentryworkq workq;
275 struct pfr_kentry *p, *q;
276 struct pfr_addr ad;
277 int i, rv, xadd = 0;
278 user_addr_t addr = _addr;
279 u_int64_t tzero = pf_calendar_time_second();
280
281 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
282 PFR_FLAG_FEEDBACK);
283 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) {
284 return EINVAL;
285 }
286 kt = pfr_lookup_table(tbl);
287 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
288 return ESRCH;
289 }
290 if (kt->pfrkt_flags & PFR_TFLAG_CONST) {
291 return EPERM;
292 }
293 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
294 if (tmpkt == NULL) {
295 return ENOMEM;
296 }
297 SLIST_INIT(&workq);
298 for (i = 0; i < size; i++, addr += sizeof(ad)) {
299 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
300 senderr(EFAULT);
301 }
302 if (pfr_validate_addr(&ad)) {
303 senderr(EINVAL);
304 }
305 p = pfr_lookup_addr(kt, &ad, 1);
306 q = pfr_lookup_addr(tmpkt, &ad, 1);
307 if (flags & PFR_FLAG_FEEDBACK) {
308 if (q != NULL) {
309 ad.pfra_fback = PFR_FB_DUPLICATE;
310 } else if (p == NULL) {
311 ad.pfra_fback = PFR_FB_ADDED;
312 } else if (p->pfrke_not != ad.pfra_not) {
313 ad.pfra_fback = PFR_FB_CONFLICT;
314 } else {
315 ad.pfra_fback = PFR_FB_NONE;
316 }
317 }
318 if (p == NULL && q == NULL) {
319 p = pfr_create_kentry(&ad,
320 !(flags & PFR_FLAG_USERIOCTL));
321 if (p == NULL) {
322 senderr(ENOMEM);
323 }
324 if (pfr_route_kentry(tmpkt, p)) {
325 pfr_destroy_kentry(p);
326 ad.pfra_fback = PFR_FB_NONE;
327 } else {
328 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
329 xadd++;
330 }
331 }
332 if (flags & PFR_FLAG_FEEDBACK) {
333 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
334 senderr(EFAULT);
335 }
336 }
337 }
338 pfr_clean_node_mask(tmpkt, &workq);
339 if (!(flags & PFR_FLAG_DUMMY)) {
340 pfr_insert_kentries(kt, &workq, tzero);
341 } else {
342 pfr_destroy_kentries(&workq);
343 }
344 if (nadd != NULL) {
345 *nadd = xadd;
346 }
347 pfr_destroy_ktable(tmpkt, 0);
348 return 0;
349 _bad:
350 pfr_clean_node_mask(tmpkt, &workq);
351 pfr_destroy_kentries(&workq);
352 if (flags & PFR_FLAG_FEEDBACK) {
353 pfr_reset_feedback(_addr, size, flags);
354 }
355 pfr_destroy_ktable(tmpkt, 0);
356 return rv;
357 }
358
359 int
360 pfr_del_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
361 int *ndel, int flags)
362 {
363 struct pfr_ktable *kt;
364 struct pfr_kentryworkq workq;
365 struct pfr_kentry *p;
366 struct pfr_addr ad;
367 user_addr_t addr = _addr;
368 int i, rv, xdel = 0, log = 1;
369
370 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
371 PFR_FLAG_FEEDBACK);
372 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) {
373 return EINVAL;
374 }
375 kt = pfr_lookup_table(tbl);
376 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
377 return ESRCH;
378 }
379 if (kt->pfrkt_flags & PFR_TFLAG_CONST) {
380 return EPERM;
381 }
382 /*
383 * there are two algorithms to choose from here.
384 * with:
385 * n: number of addresses to delete
386 * N: number of addresses in the table
387 *
388 * one is O(N) and is better for large 'n'
389 * one is O(n*LOG(N)) and is better for small 'n'
390 *
391 * following code try to decide which one is best.
392 */
393 for (i = kt->pfrkt_cnt; i > 0; i >>= 1) {
394 log++;
395 }
396 if (size > kt->pfrkt_cnt / log) {
397 /* full table scan */
398 pfr_mark_addrs(kt);
399 } else {
400 /* iterate over addresses to delete */
401 for (i = 0; i < size; i++, addr += sizeof(ad)) {
402 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
403 return EFAULT;
404 }
405 if (pfr_validate_addr(&ad)) {
406 return EINVAL;
407 }
408 p = pfr_lookup_addr(kt, &ad, 1);
409 if (p != NULL) {
410 p->pfrke_mark = 0;
411 }
412 }
413 }
414 SLIST_INIT(&workq);
415 for (addr = _addr, i = 0; i < size; i++, addr += sizeof(ad)) {
416 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
417 senderr(EFAULT);
418 }
419 if (pfr_validate_addr(&ad)) {
420 senderr(EINVAL);
421 }
422 p = pfr_lookup_addr(kt, &ad, 1);
423 if (flags & PFR_FLAG_FEEDBACK) {
424 if (p == NULL) {
425 ad.pfra_fback = PFR_FB_NONE;
426 } else if (p->pfrke_not != ad.pfra_not) {
427 ad.pfra_fback = PFR_FB_CONFLICT;
428 } else if (p->pfrke_mark) {
429 ad.pfra_fback = PFR_FB_DUPLICATE;
430 } else {
431 ad.pfra_fback = PFR_FB_DELETED;
432 }
433 }
434 if (p != NULL && p->pfrke_not == ad.pfra_not &&
435 !p->pfrke_mark) {
436 p->pfrke_mark = 1;
437 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
438 xdel++;
439 }
440 if (flags & PFR_FLAG_FEEDBACK) {
441 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
442 senderr(EFAULT);
443 }
444 }
445 }
446 if (!(flags & PFR_FLAG_DUMMY)) {
447 pfr_remove_kentries(kt, &workq);
448 }
449 if (ndel != NULL) {
450 *ndel = xdel;
451 }
452 return 0;
453 _bad:
454 if (flags & PFR_FLAG_FEEDBACK) {
455 pfr_reset_feedback(_addr, size, flags);
456 }
457 return rv;
458 }
459
460 int
461 pfr_set_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
462 int *size2, int *nadd, int *ndel, int *nchange, int flags,
463 u_int32_t ignore_pfrt_flags)
464 {
465 struct pfr_ktable *kt, *tmpkt;
466 struct pfr_kentryworkq addq, delq, changeq;
467 struct pfr_kentry *p, *q;
468 struct pfr_addr ad;
469 user_addr_t addr = _addr;
470 int i, rv, xadd = 0, xdel = 0, xchange = 0;
471 u_int64_t tzero = pf_calendar_time_second();
472
473 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
474 PFR_FLAG_FEEDBACK);
475 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
476 PFR_FLAG_USERIOCTL)) {
477 return EINVAL;
478 }
479 kt = pfr_lookup_table(tbl);
480 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
481 return ESRCH;
482 }
483 if (kt->pfrkt_flags & PFR_TFLAG_CONST) {
484 return EPERM;
485 }
486 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
487 if (tmpkt == NULL) {
488 return ENOMEM;
489 }
490 pfr_mark_addrs(kt);
491 SLIST_INIT(&addq);
492 SLIST_INIT(&delq);
493 SLIST_INIT(&changeq);
494 for (i = 0; i < size; i++, addr += sizeof(ad)) {
495 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
496 senderr(EFAULT);
497 }
498 if (pfr_validate_addr(&ad)) {
499 senderr(EINVAL);
500 }
501 ad.pfra_fback = PFR_FB_NONE;
502 p = pfr_lookup_addr(kt, &ad, 1);
503 if (p != NULL) {
504 if (p->pfrke_mark) {
505 ad.pfra_fback = PFR_FB_DUPLICATE;
506 goto _skip;
507 }
508 p->pfrke_mark = 1;
509 if (p->pfrke_not != ad.pfra_not) {
510 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
511 ad.pfra_fback = PFR_FB_CHANGED;
512 xchange++;
513 }
514 } else {
515 q = pfr_lookup_addr(tmpkt, &ad, 1);
516 if (q != NULL) {
517 ad.pfra_fback = PFR_FB_DUPLICATE;
518 goto _skip;
519 }
520 p = pfr_create_kentry(&ad,
521 !(flags & PFR_FLAG_USERIOCTL));
522 if (p == NULL) {
523 senderr(ENOMEM);
524 }
525 if (pfr_route_kentry(tmpkt, p)) {
526 pfr_destroy_kentry(p);
527 ad.pfra_fback = PFR_FB_NONE;
528 } else {
529 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
530 ad.pfra_fback = PFR_FB_ADDED;
531 xadd++;
532 }
533 }
534 _skip:
535 if (flags & PFR_FLAG_FEEDBACK) {
536 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
537 senderr(EFAULT);
538 }
539 }
540 }
541 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
542 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
543 if (*size2 < size + xdel) {
544 *size2 = size + xdel;
545 senderr(0);
546 }
547 i = 0;
548 addr = _addr + size;
549 SLIST_FOREACH(p, &delq, pfrke_workq) {
550 pfr_copyout_addr(&ad, p);
551 ad.pfra_fback = PFR_FB_DELETED;
552 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
553 senderr(EFAULT);
554 }
555 addr += sizeof(ad);
556 i++;
557 }
558 }
559 pfr_clean_node_mask(tmpkt, &addq);
560 if (!(flags & PFR_FLAG_DUMMY)) {
561 pfr_insert_kentries(kt, &addq, tzero);
562 pfr_remove_kentries(kt, &delq);
563 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
564 } else {
565 pfr_destroy_kentries(&addq);
566 }
567 if (nadd != NULL) {
568 *nadd = xadd;
569 }
570 if (ndel != NULL) {
571 *ndel = xdel;
572 }
573 if (nchange != NULL) {
574 *nchange = xchange;
575 }
576 if ((flags & PFR_FLAG_FEEDBACK) && size2) {
577 *size2 = size + xdel;
578 }
579 pfr_destroy_ktable(tmpkt, 0);
580 return 0;
581 _bad:
582 pfr_clean_node_mask(tmpkt, &addq);
583 pfr_destroy_kentries(&addq);
584 if (flags & PFR_FLAG_FEEDBACK) {
585 pfr_reset_feedback(_addr, size, flags);
586 }
587 pfr_destroy_ktable(tmpkt, 0);
588 return rv;
589 }
590
591 int
592 pfr_tst_addrs(struct pfr_table *tbl, user_addr_t addr, int size,
593 int *nmatch, int flags)
594 {
595 struct pfr_ktable *kt;
596 struct pfr_kentry *p;
597 struct pfr_addr ad;
598 int i, xmatch = 0;
599
600 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
601 if (pfr_validate_table(tbl, 0, 0)) {
602 return EINVAL;
603 }
604 kt = pfr_lookup_table(tbl);
605 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
606 return ESRCH;
607 }
608
609 for (i = 0; i < size; i++, addr += sizeof(ad)) {
610 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
611 return EFAULT;
612 }
613 if (pfr_validate_addr(&ad)) {
614 return EINVAL;
615 }
616 if (ADDR_NETWORK(&ad)) {
617 return EINVAL;
618 }
619 p = pfr_lookup_addr(kt, &ad, 0);
620 if (flags & PFR_FLAG_REPLACE) {
621 pfr_copyout_addr(&ad, p);
622 }
623 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
624 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
625 if (p != NULL && !p->pfrke_not) {
626 xmatch++;
627 }
628 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
629 return EFAULT;
630 }
631 }
632 if (nmatch != NULL) {
633 *nmatch = xmatch;
634 }
635 return 0;
636 }
637
638 int
639 pfr_get_addrs(struct pfr_table *tbl, user_addr_t addr, int *size,
640 int flags)
641 {
642 struct pfr_ktable *kt;
643 struct pfr_walktree w;
644 int rv;
645
646 ACCEPT_FLAGS(flags, 0);
647 if (pfr_validate_table(tbl, 0, 0)) {
648 return EINVAL;
649 }
650 kt = pfr_lookup_table(tbl);
651 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
652 return ESRCH;
653 }
654 if (kt->pfrkt_cnt > *size) {
655 *size = kt->pfrkt_cnt;
656 return 0;
657 }
658
659 bzero(&w, sizeof(w));
660 w.pfrw_op = PFRW_GET_ADDRS;
661 w.pfrw_addr = addr;
662 w.pfrw_free = kt->pfrkt_cnt;
663 w.pfrw_flags = flags;
664 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
665 if (!rv) {
666 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
667 pfr_walktree, &w);
668 }
669 if (rv) {
670 return rv;
671 }
672
673 if (w.pfrw_free) {
674 printf("pfr_get_addrs: corruption detected (%d).\n",
675 w.pfrw_free);
676 return ENOTTY;
677 }
678 *size = kt->pfrkt_cnt;
679 return 0;
680 }
681
682 int
683 pfr_get_astats(struct pfr_table *tbl, user_addr_t addr, int *size,
684 int flags)
685 {
686 struct pfr_ktable *kt;
687 struct pfr_walktree w;
688 struct pfr_kentryworkq workq;
689 int rv;
690 u_int64_t tzero = pf_calendar_time_second();
691
692 /* XXX PFR_FLAG_CLSTATS disabled */
693 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
694 if (pfr_validate_table(tbl, 0, 0)) {
695 return EINVAL;
696 }
697 kt = pfr_lookup_table(tbl);
698 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
699 return ESRCH;
700 }
701 if (kt->pfrkt_cnt > *size) {
702 *size = kt->pfrkt_cnt;
703 return 0;
704 }
705
706 bzero(&w, sizeof(w));
707 w.pfrw_op = PFRW_GET_ASTATS;
708 w.pfrw_astats = addr;
709 w.pfrw_free = kt->pfrkt_cnt;
710 w.pfrw_flags = flags;
711 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
712 if (!rv) {
713 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
714 pfr_walktree, &w);
715 }
716 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
717 pfr_enqueue_addrs(kt, &workq, NULL, 0);
718 pfr_clstats_kentries(&workq, tzero, 0);
719 }
720 if (rv) {
721 return rv;
722 }
723
724 if (w.pfrw_free) {
725 printf("pfr_get_astats: corruption detected (%d).\n",
726 w.pfrw_free);
727 return ENOTTY;
728 }
729 *size = kt->pfrkt_cnt;
730 return 0;
731 }
732
733 int
734 pfr_clr_astats(struct pfr_table *tbl, user_addr_t _addr, int size,
735 int *nzero, int flags)
736 {
737 struct pfr_ktable *kt;
738 struct pfr_kentryworkq workq;
739 struct pfr_kentry *p;
740 struct pfr_addr ad;
741 user_addr_t addr = _addr;
742 int i, rv, xzero = 0;
743
744 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
745 PFR_FLAG_FEEDBACK);
746 if (pfr_validate_table(tbl, 0, 0)) {
747 return EINVAL;
748 }
749 kt = pfr_lookup_table(tbl);
750 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
751 return ESRCH;
752 }
753 SLIST_INIT(&workq);
754 for (i = 0; i < size; i++, addr += sizeof(ad)) {
755 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
756 senderr(EFAULT);
757 }
758 if (pfr_validate_addr(&ad)) {
759 senderr(EINVAL);
760 }
761 p = pfr_lookup_addr(kt, &ad, 1);
762 if (flags & PFR_FLAG_FEEDBACK) {
763 ad.pfra_fback = (p != NULL) ?
764 PFR_FB_CLEARED : PFR_FB_NONE;
765 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
766 senderr(EFAULT);
767 }
768 }
769 if (p != NULL) {
770 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
771 xzero++;
772 }
773 }
774
775 if (!(flags & PFR_FLAG_DUMMY)) {
776 pfr_clstats_kentries(&workq, 0, 0);
777 }
778 if (nzero != NULL) {
779 *nzero = xzero;
780 }
781 return 0;
782 _bad:
783 if (flags & PFR_FLAG_FEEDBACK) {
784 pfr_reset_feedback(_addr, size, flags);
785 }
786 return rv;
787 }
788
789 static int
790 pfr_validate_addr(struct pfr_addr *ad)
791 {
792 int i;
793
794 switch (ad->pfra_af) {
795 #if INET
796 case AF_INET:
797 if (ad->pfra_net > 32) {
798 return -1;
799 }
800 break;
801 #endif /* INET */
802 #if INET6
803 case AF_INET6:
804 if (ad->pfra_net > 128) {
805 return -1;
806 }
807 break;
808 #endif /* INET6 */
809 default:
810 return -1;
811 }
812 if (ad->pfra_net < 128 &&
813 (((caddr_t)ad)[ad->pfra_net / 8] & (0xFF >> (ad->pfra_net % 8)))) {
814 return -1;
815 }
816 for (i = (ad->pfra_net + 7) / 8; i < (int)sizeof(ad->pfra_u); i++) {
817 if (((caddr_t)ad)[i]) {
818 return -1;
819 }
820 }
821 if (ad->pfra_not && ad->pfra_not != 1) {
822 return -1;
823 }
824 if (ad->pfra_fback) {
825 return -1;
826 }
827 return 0;
828 }
829
830 static void
831 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
832 int *naddr, int sweep)
833 {
834 struct pfr_walktree w;
835
836 SLIST_INIT(workq);
837 bzero(&w, sizeof(w));
838 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
839 w.pfrw_workq = workq;
840 if (kt->pfrkt_ip4 != NULL) {
841 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4,
842 pfr_walktree, &w)) {
843 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
844 }
845 }
846 if (kt->pfrkt_ip6 != NULL) {
847 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
848 pfr_walktree, &w)) {
849 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
850 }
851 }
852 if (naddr != NULL) {
853 *naddr = w.pfrw_cnt;
854 }
855 }
856
857 static void
858 pfr_mark_addrs(struct pfr_ktable *kt)
859 {
860 struct pfr_walktree w;
861
862 bzero(&w, sizeof(w));
863 w.pfrw_op = PFRW_MARK;
864 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) {
865 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
866 }
867 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) {
868 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
869 }
870 }
871
872
873 static struct pfr_kentry *
874 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
875 {
876 union sockaddr_union sa, mask;
877 struct radix_node_head *head;
878 struct pfr_kentry *ke;
879
880 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
881
882 bzero(&sa, sizeof(sa));
883 if (ad->pfra_af == AF_INET) {
884 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
885 head = kt->pfrkt_ip4;
886 } else if (ad->pfra_af == AF_INET6) {
887 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
888 head = kt->pfrkt_ip6;
889 } else {
890 return NULL;
891 }
892 if (ADDR_NETWORK(ad)) {
893 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
894 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
895 if (ke && KENTRY_RNF_ROOT(ke)) {
896 ke = NULL;
897 }
898 } else {
899 ke = (struct pfr_kentry *)rn_match(&sa, head);
900 if (ke && KENTRY_RNF_ROOT(ke)) {
901 ke = NULL;
902 }
903 if (exact && ke && KENTRY_NETWORK(ke)) {
904 ke = NULL;
905 }
906 }
907 return ke;
908 }
909
910 static struct pfr_kentry *
911 pfr_create_kentry(struct pfr_addr *ad, int intr)
912 {
913 struct pfr_kentry *ke;
914
915 if (intr) {
916 ke = pool_get(&pfr_kentry_pl2, PR_WAITOK);
917 } else {
918 ke = pool_get(&pfr_kentry_pl, PR_WAITOK);
919 }
920 if (ke == NULL) {
921 return NULL;
922 }
923 bzero(ke, sizeof(*ke));
924
925 if (ad->pfra_af == AF_INET) {
926 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
927 } else if (ad->pfra_af == AF_INET6) {
928 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
929 }
930 ke->pfrke_af = ad->pfra_af;
931 ke->pfrke_net = ad->pfra_net;
932 ke->pfrke_not = ad->pfra_not;
933 ke->pfrke_intrpool = intr;
934 return ke;
935 }
936
937 static void
938 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
939 {
940 struct pfr_kentry *p, *q;
941
942 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
943 q = SLIST_NEXT(p, pfrke_workq);
944 pfr_destroy_kentry(p);
945 }
946 }
947
948 static void
949 pfr_destroy_kentry(struct pfr_kentry *ke)
950 {
951 if (ke->pfrke_intrpool) {
952 pool_put(&pfr_kentry_pl2, ke);
953 } else {
954 pool_put(&pfr_kentry_pl, ke);
955 }
956 }
957
958 static void
959 pfr_insert_kentries(struct pfr_ktable *kt,
960 struct pfr_kentryworkq *workq, u_int64_t tzero)
961 {
962 struct pfr_kentry *p;
963 int rv, n = 0;
964
965 SLIST_FOREACH(p, workq, pfrke_workq) {
966 rv = pfr_route_kentry(kt, p);
967 if (rv) {
968 printf("pfr_insert_kentries: cannot route entry "
969 "(code=%d).\n", rv);
970 break;
971 }
972 p->pfrke_tzero = tzero;
973 n++;
974 }
975 kt->pfrkt_cnt += n;
976 }
977
978 int
979 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, u_int64_t tzero)
980 {
981 struct pfr_kentry *p;
982 int rv;
983
984 p = pfr_lookup_addr(kt, ad, 1);
985 if (p != NULL) {
986 return 0;
987 }
988 p = pfr_create_kentry(ad, 1);
989 if (p == NULL) {
990 return EINVAL;
991 }
992
993 rv = pfr_route_kentry(kt, p);
994 if (rv) {
995 return rv;
996 }
997
998 p->pfrke_tzero = tzero;
999 kt->pfrkt_cnt++;
1000
1001 return 0;
1002 }
1003
1004 static void
1005 pfr_remove_kentries(struct pfr_ktable *kt,
1006 struct pfr_kentryworkq *workq)
1007 {
1008 struct pfr_kentry *p;
1009 int n = 0;
1010
1011 SLIST_FOREACH(p, workq, pfrke_workq) {
1012 pfr_unroute_kentry(kt, p);
1013 n++;
1014 }
1015 kt->pfrkt_cnt -= n;
1016 pfr_destroy_kentries(workq);
1017 }
1018
1019 static void
1020 pfr_clean_node_mask(struct pfr_ktable *kt,
1021 struct pfr_kentryworkq *workq)
1022 {
1023 struct pfr_kentry *p;
1024
1025 SLIST_FOREACH(p, workq, pfrke_workq)
1026 pfr_unroute_kentry(kt, p);
1027 }
1028
1029 static void
1030 pfr_clstats_kentries(struct pfr_kentryworkq *workq, u_int64_t tzero,
1031 int negchange)
1032 {
1033 struct pfr_kentry *p;
1034
1035 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1036
1037 SLIST_FOREACH(p, workq, pfrke_workq) {
1038 if (negchange) {
1039 p->pfrke_not = !p->pfrke_not;
1040 }
1041 bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
1042 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
1043 p->pfrke_tzero = tzero;
1044 }
1045 }
1046
1047 static void
1048 pfr_reset_feedback(user_addr_t addr, int size, int flags)
1049 {
1050 struct pfr_addr ad;
1051 int i;
1052
1053 for (i = 0; i < size; i++, addr += sizeof(ad)) {
1054 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
1055 break;
1056 }
1057 ad.pfra_fback = PFR_FB_NONE;
1058 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
1059 break;
1060 }
1061 }
1062 }
1063
1064 static void
1065 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
1066 {
1067 int i;
1068
1069 bzero(sa, sizeof(*sa));
1070 if (af == AF_INET) {
1071 sa->sin.sin_len = sizeof(sa->sin);
1072 sa->sin.sin_family = AF_INET;
1073 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32 - net)) : 0;
1074 } else if (af == AF_INET6) {
1075 sa->sin6.sin6_len = sizeof(sa->sin6);
1076 sa->sin6.sin6_family = AF_INET6;
1077 for (i = 0; i < 4; i++) {
1078 if (net <= 32) {
1079 sa->sin6.sin6_addr.s6_addr32[i] =
1080 net ? htonl(-1 << (32 - net)) : 0;
1081 break;
1082 }
1083 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
1084 net -= 32;
1085 }
1086 }
1087 }
1088
1089 static int
1090 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1091 {
1092 union sockaddr_union mask;
1093 struct radix_node *rn;
1094 struct radix_node_head *head;
1095
1096 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1097
1098 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
1099 if (ke->pfrke_af == AF_INET) {
1100 head = kt->pfrkt_ip4;
1101 } else if (ke->pfrke_af == AF_INET6) {
1102 head = kt->pfrkt_ip6;
1103 } else {
1104 return -1;
1105 }
1106
1107 if (KENTRY_NETWORK(ke)) {
1108 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1109 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
1110 } else {
1111 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
1112 }
1113
1114 return rn == NULL ? -1 : 0;
1115 }
1116
1117 static int
1118 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1119 {
1120 union sockaddr_union mask;
1121 struct radix_node *rn;
1122 struct radix_node_head *head;
1123
1124 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1125
1126 if (ke->pfrke_af == AF_INET) {
1127 head = kt->pfrkt_ip4;
1128 } else if (ke->pfrke_af == AF_INET6) {
1129 head = kt->pfrkt_ip6;
1130 } else {
1131 return -1;
1132 }
1133
1134 if (KENTRY_NETWORK(ke)) {
1135 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1136 rn = rn_delete(&ke->pfrke_sa, &mask, head);
1137 } else {
1138 rn = rn_delete(&ke->pfrke_sa, NULL, head);
1139 }
1140
1141 if (rn == NULL) {
1142 printf("pfr_unroute_kentry: delete failed.\n");
1143 return -1;
1144 }
1145 return 0;
1146 }
1147
1148 static void
1149 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1150 {
1151 bzero(ad, sizeof(*ad));
1152 if (ke == NULL) {
1153 return;
1154 }
1155 ad->pfra_af = ke->pfrke_af;
1156 ad->pfra_net = ke->pfrke_net;
1157 ad->pfra_not = ke->pfrke_not;
1158 if (ad->pfra_af == AF_INET) {
1159 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1160 } else if (ad->pfra_af == AF_INET6) {
1161 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1162 }
1163 }
1164
1165 static int
1166 pfr_walktree(struct radix_node *rn, void *arg)
1167 {
1168 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
1169 struct pfr_walktree *w = arg;
1170 int flags = w->pfrw_flags;
1171
1172 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1173
1174 switch (w->pfrw_op) {
1175 case PFRW_MARK:
1176 ke->pfrke_mark = 0;
1177 break;
1178 case PFRW_SWEEP:
1179 if (ke->pfrke_mark) {
1180 break;
1181 }
1182 /* FALLTHROUGH */
1183 case PFRW_ENQUEUE:
1184 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1185 w->pfrw_cnt++;
1186 break;
1187 case PFRW_GET_ADDRS:
1188 if (w->pfrw_free-- > 0) {
1189 struct pfr_addr ad;
1190
1191 pfr_copyout_addr(&ad, ke);
1192 if (copyout(&ad, w->pfrw_addr, sizeof(ad))) {
1193 return EFAULT;
1194 }
1195 w->pfrw_addr += sizeof(ad);
1196 }
1197 break;
1198 case PFRW_GET_ASTATS:
1199 if (w->pfrw_free-- > 0) {
1200 struct pfr_astats as;
1201
1202 pfr_copyout_addr(&as.pfras_a, ke);
1203
1204 #if !defined(__LP64__)
1205 /* Initialized to avoid potential info leak to
1206 * userspace */
1207 as._pad = 0;
1208 #endif
1209 bcopy(ke->pfrke_packets, as.pfras_packets,
1210 sizeof(as.pfras_packets));
1211 bcopy(ke->pfrke_bytes, as.pfras_bytes,
1212 sizeof(as.pfras_bytes));
1213 as.pfras_tzero = ke->pfrke_tzero;
1214
1215 if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags)) {
1216 return EFAULT;
1217 }
1218 w->pfrw_astats += sizeof(as);
1219 }
1220 break;
1221 case PFRW_POOL_GET:
1222 if (ke->pfrke_not) {
1223 break; /* negative entries are ignored */
1224 }
1225 if (!w->pfrw_cnt--) {
1226 w->pfrw_kentry = ke;
1227 return 1; /* finish search */
1228 }
1229 break;
1230 case PFRW_DYNADDR_UPDATE:
1231 if (ke->pfrke_af == AF_INET) {
1232 if (w->pfrw_dyn->pfid_acnt4++ > 0) {
1233 break;
1234 }
1235 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1236 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1237 &ke->pfrke_sa, AF_INET);
1238 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1239 &pfr_mask, AF_INET);
1240 } else if (ke->pfrke_af == AF_INET6) {
1241 if (w->pfrw_dyn->pfid_acnt6++ > 0) {
1242 break;
1243 }
1244 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1245 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1246 &ke->pfrke_sa, AF_INET6);
1247 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1248 &pfr_mask, AF_INET6);
1249 }
1250 break;
1251 }
1252 return 0;
1253 }
1254
1255 int
1256 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1257 {
1258 struct pfr_ktableworkq workq;
1259 struct pfr_ktable *p;
1260 int xdel = 0;
1261
1262 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1263
1264 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1265 PFR_FLAG_ALLRSETS);
1266 if (pfr_fix_anchor(filter->pfrt_anchor)) {
1267 return EINVAL;
1268 }
1269 if (pfr_table_count(filter, flags) < 0) {
1270 return ENOENT;
1271 }
1272
1273 SLIST_INIT(&workq);
1274 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1275 if (pfr_skip_table(filter, p, flags)) {
1276 continue;
1277 }
1278 if (strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR) == 0) {
1279 continue;
1280 }
1281 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1282 continue;
1283 }
1284 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1285 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1286 xdel++;
1287 }
1288 if (!(flags & PFR_FLAG_DUMMY)) {
1289 pfr_setflags_ktables(&workq);
1290 }
1291 if (ndel != NULL) {
1292 *ndel = xdel;
1293 }
1294 return 0;
1295 }
1296
1297 int
1298 pfr_add_tables(user_addr_t tbl, int size, int *nadd, int flags)
1299 {
1300 struct pfr_ktableworkq addq, changeq;
1301 struct pfr_ktable *p, *q, *r, key;
1302 int i, rv, xadd = 0;
1303 u_int64_t tzero = pf_calendar_time_second();
1304
1305 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1306
1307 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1308 SLIST_INIT(&addq);
1309 SLIST_INIT(&changeq);
1310 for (i = 0; i < size; i++, tbl += sizeof(key.pfrkt_t)) {
1311 if (COPYIN(tbl, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) {
1312 senderr(EFAULT);
1313 }
1314 pfr_table_copyin_cleanup(&key.pfrkt_t);
1315 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1316 flags & PFR_FLAG_USERIOCTL)) {
1317 senderr(EINVAL);
1318 }
1319 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1320 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1321 if (p == NULL) {
1322 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1323 if (p == NULL) {
1324 senderr(ENOMEM);
1325 }
1326 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1327 if (!pfr_ktable_compare(p, q)) {
1328 goto _skip;
1329 }
1330 }
1331 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1332 xadd++;
1333 if (!key.pfrkt_anchor[0]) {
1334 goto _skip;
1335 }
1336
1337 /* find or create root table */
1338 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1339 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1340 if (r != NULL) {
1341 p->pfrkt_root = r;
1342 goto _skip;
1343 }
1344 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1345 if (!pfr_ktable_compare(&key, q)) {
1346 p->pfrkt_root = q;
1347 goto _skip;
1348 }
1349 }
1350 key.pfrkt_flags = 0;
1351 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1352 if (r == NULL) {
1353 senderr(ENOMEM);
1354 }
1355 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1356 p->pfrkt_root = r;
1357 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1358 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1359 if (!pfr_ktable_compare(&key, q)) {
1360 goto _skip;
1361 }
1362 p->pfrkt_nflags = (p->pfrkt_flags &
1363 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1364 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1365 xadd++;
1366 }
1367 _skip:
1368 ;
1369 }
1370 if (!(flags & PFR_FLAG_DUMMY)) {
1371 pfr_insert_ktables(&addq);
1372 pfr_setflags_ktables(&changeq);
1373 } else {
1374 pfr_destroy_ktables(&addq, 0);
1375 }
1376 if (nadd != NULL) {
1377 *nadd = xadd;
1378 }
1379 return 0;
1380 _bad:
1381 pfr_destroy_ktables(&addq, 0);
1382 return rv;
1383 }
1384
1385 int
1386 pfr_del_tables(user_addr_t tbl, int size, int *ndel, int flags)
1387 {
1388 struct pfr_ktableworkq workq;
1389 struct pfr_ktable *p, *q, key;
1390 int i, xdel = 0;
1391
1392 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1393
1394 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1395 SLIST_INIT(&workq);
1396 for (i = 0; i < size; i++, tbl += sizeof(key.pfrkt_t)) {
1397 if (COPYIN(tbl, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) {
1398 return EFAULT;
1399 }
1400 pfr_table_copyin_cleanup(&key.pfrkt_t);
1401 if (pfr_validate_table(&key.pfrkt_t, 0,
1402 flags & PFR_FLAG_USERIOCTL)) {
1403 return EINVAL;
1404 }
1405 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1406 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1407 SLIST_FOREACH(q, &workq, pfrkt_workq)
1408 if (!pfr_ktable_compare(p, q)) {
1409 goto _skip;
1410 }
1411 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1412 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1413 xdel++;
1414 }
1415 _skip:
1416 ;
1417 }
1418
1419 if (!(flags & PFR_FLAG_DUMMY)) {
1420 pfr_setflags_ktables(&workq);
1421 }
1422 if (ndel != NULL) {
1423 *ndel = xdel;
1424 }
1425 return 0;
1426 }
1427
1428 int
1429 pfr_get_tables(struct pfr_table *filter, user_addr_t tbl, int *size,
1430 int flags)
1431 {
1432 struct pfr_ktable *p;
1433 int n, nn;
1434
1435 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1436 if (pfr_fix_anchor(filter->pfrt_anchor)) {
1437 return EINVAL;
1438 }
1439 n = nn = pfr_table_count(filter, flags);
1440 if (n < 0) {
1441 return ENOENT;
1442 }
1443 if (n > *size) {
1444 *size = n;
1445 return 0;
1446 }
1447 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1448 if (pfr_skip_table(filter, p, flags)) {
1449 continue;
1450 }
1451 if (n-- <= 0) {
1452 continue;
1453 }
1454 if (COPYOUT(&p->pfrkt_t, tbl, sizeof(p->pfrkt_t), flags)) {
1455 return EFAULT;
1456 }
1457 tbl += sizeof(p->pfrkt_t);
1458 }
1459 if (n) {
1460 printf("pfr_get_tables: corruption detected (%d).\n", n);
1461 return ENOTTY;
1462 }
1463 *size = nn;
1464 return 0;
1465 }
1466
1467 int
1468 pfr_get_tstats(struct pfr_table *filter, user_addr_t tbl, int *size,
1469 int flags)
1470 {
1471 struct pfr_ktable *p;
1472 struct pfr_ktableworkq workq;
1473 int n, nn;
1474 u_int64_t tzero = pf_calendar_time_second();
1475
1476 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1477
1478 /* XXX PFR_FLAG_CLSTATS disabled */
1479 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
1480 if (pfr_fix_anchor(filter->pfrt_anchor)) {
1481 return EINVAL;
1482 }
1483 n = nn = pfr_table_count(filter, flags);
1484 if (n < 0) {
1485 return ENOENT;
1486 }
1487 if (n > *size) {
1488 *size = n;
1489 return 0;
1490 }
1491 SLIST_INIT(&workq);
1492 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1493 if (pfr_skip_table(filter, p, flags)) {
1494 continue;
1495 }
1496 if (n-- <= 0) {
1497 continue;
1498 }
1499 if (COPYOUT(&p->pfrkt_ts, tbl, sizeof(p->pfrkt_ts), flags)) {
1500 return EFAULT;
1501 }
1502 tbl += sizeof(p->pfrkt_ts);
1503 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1504 }
1505 if (flags & PFR_FLAG_CLSTATS) {
1506 pfr_clstats_ktables(&workq, tzero,
1507 flags & PFR_FLAG_ADDRSTOO);
1508 }
1509 if (n) {
1510 printf("pfr_get_tstats: corruption detected (%d).\n", n);
1511 return ENOTTY;
1512 }
1513 *size = nn;
1514 return 0;
1515 }
1516
1517 int
1518 pfr_clr_tstats(user_addr_t tbl, int size, int *nzero, int flags)
1519 {
1520 struct pfr_ktableworkq workq;
1521 struct pfr_ktable *p, key;
1522 int i, xzero = 0;
1523 u_int64_t tzero = pf_calendar_time_second();
1524
1525 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1526
1527 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1528 PFR_FLAG_ADDRSTOO);
1529 SLIST_INIT(&workq);
1530 for (i = 0; i < size; i++, tbl += sizeof(key.pfrkt_t)) {
1531 if (COPYIN(tbl, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) {
1532 return EFAULT;
1533 }
1534 pfr_table_copyin_cleanup(&key.pfrkt_t);
1535 if (pfr_validate_table(&key.pfrkt_t, 0, 0)) {
1536 return EINVAL;
1537 }
1538 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1539 if (p != NULL) {
1540 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1541 xzero++;
1542 }
1543 }
1544 if (!(flags & PFR_FLAG_DUMMY)) {
1545 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1546 }
1547 if (nzero != NULL) {
1548 *nzero = xzero;
1549 }
1550 return 0;
1551 }
1552
1553 int
1554 pfr_set_tflags(user_addr_t tbl, int size, int setflag, int clrflag,
1555 int *nchange, int *ndel, int flags)
1556 {
1557 struct pfr_ktableworkq workq;
1558 struct pfr_ktable *p, *q, key;
1559 int i, xchange = 0, xdel = 0;
1560
1561 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1562
1563 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1564 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1565 (clrflag & ~PFR_TFLAG_USRMASK) ||
1566 (setflag & clrflag)) {
1567 return EINVAL;
1568 }
1569 SLIST_INIT(&workq);
1570 for (i = 0; i < size; i++, tbl += sizeof(key.pfrkt_t)) {
1571 if (COPYIN(tbl, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) {
1572 return EFAULT;
1573 }
1574 pfr_table_copyin_cleanup(&key.pfrkt_t);
1575 if (pfr_validate_table(&key.pfrkt_t, 0,
1576 flags & PFR_FLAG_USERIOCTL)) {
1577 return EINVAL;
1578 }
1579 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1580 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1581 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1582 ~clrflag;
1583 if (p->pfrkt_nflags == p->pfrkt_flags) {
1584 goto _skip;
1585 }
1586 SLIST_FOREACH(q, &workq, pfrkt_workq)
1587 if (!pfr_ktable_compare(p, q)) {
1588 goto _skip;
1589 }
1590 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1591 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1592 (clrflag & PFR_TFLAG_PERSIST) &&
1593 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) {
1594 xdel++;
1595 } else {
1596 xchange++;
1597 }
1598 }
1599 _skip:
1600 ;
1601 }
1602 if (!(flags & PFR_FLAG_DUMMY)) {
1603 pfr_setflags_ktables(&workq);
1604 }
1605 if (nchange != NULL) {
1606 *nchange = xchange;
1607 }
1608 if (ndel != NULL) {
1609 *ndel = xdel;
1610 }
1611 return 0;
1612 }
1613
1614 int
1615 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1616 {
1617 struct pfr_ktableworkq workq;
1618 struct pfr_ktable *p;
1619 struct pf_ruleset *rs;
1620 int xdel = 0;
1621
1622 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1623
1624 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1625 rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1626 if (rs == NULL) {
1627 return ENOMEM;
1628 }
1629 SLIST_INIT(&workq);
1630 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1631 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1632 pfr_skip_table(trs, p, 0)) {
1633 continue;
1634 }
1635 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1636 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1637 xdel++;
1638 }
1639 if (!(flags & PFR_FLAG_DUMMY)) {
1640 pfr_setflags_ktables(&workq);
1641 if (ticket != NULL) {
1642 *ticket = ++rs->tticket;
1643 }
1644 rs->topen = 1;
1645 } else {
1646 pf_remove_if_empty_ruleset(rs);
1647 }
1648 if (ndel != NULL) {
1649 *ndel = xdel;
1650 }
1651 return 0;
1652 }
1653
1654 int
1655 pfr_ina_define(struct pfr_table *tbl, user_addr_t addr, int size,
1656 int *nadd, int *naddr, u_int32_t ticket, int flags)
1657 {
1658 struct pfr_ktableworkq tableq;
1659 struct pfr_kentryworkq addrq;
1660 struct pfr_ktable *kt, *rt, *shadow, key;
1661 struct pfr_kentry *p;
1662 struct pfr_addr ad;
1663 struct pf_ruleset *rs;
1664 int i, rv, xadd = 0, xaddr = 0;
1665
1666 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1667
1668 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1669 if (size && !(flags & PFR_FLAG_ADDRSTOO)) {
1670 return EINVAL;
1671 }
1672 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1673 flags & PFR_FLAG_USERIOCTL)) {
1674 return EINVAL;
1675 }
1676 rs = pf_find_ruleset(tbl->pfrt_anchor);
1677 if (rs == NULL || !rs->topen || ticket != rs->tticket) {
1678 return EBUSY;
1679 }
1680 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1681 SLIST_INIT(&tableq);
1682 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)(void *)tbl);
1683 if (kt == NULL) {
1684 kt = pfr_create_ktable(tbl, 0, 1);
1685 if (kt == NULL) {
1686 return ENOMEM;
1687 }
1688 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1689 xadd++;
1690 if (!tbl->pfrt_anchor[0]) {
1691 goto _skip;
1692 }
1693
1694 /* find or create root table */
1695 bzero(&key, sizeof(key));
1696 strlcpy(key.pfrkt_name, tbl->pfrt_name,
1697 sizeof(key.pfrkt_name));
1698 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1699 if (rt != NULL) {
1700 kt->pfrkt_root = rt;
1701 goto _skip;
1702 }
1703 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1704 if (rt == NULL) {
1705 pfr_destroy_ktables(&tableq, 0);
1706 return ENOMEM;
1707 }
1708 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1709 kt->pfrkt_root = rt;
1710 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) {
1711 xadd++;
1712 }
1713 _skip:
1714 shadow = pfr_create_ktable(tbl, 0, 0);
1715 if (shadow == NULL) {
1716 pfr_destroy_ktables(&tableq, 0);
1717 return ENOMEM;
1718 }
1719 SLIST_INIT(&addrq);
1720 for (i = 0; i < size; i++, addr += sizeof(ad)) {
1721 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
1722 senderr(EFAULT);
1723 }
1724 if (pfr_validate_addr(&ad)) {
1725 senderr(EINVAL);
1726 }
1727 if (pfr_lookup_addr(shadow, &ad, 1) != NULL) {
1728 continue;
1729 }
1730 p = pfr_create_kentry(&ad, 0);
1731 if (p == NULL) {
1732 senderr(ENOMEM);
1733 }
1734 if (pfr_route_kentry(shadow, p)) {
1735 pfr_destroy_kentry(p);
1736 continue;
1737 }
1738 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1739 xaddr++;
1740 }
1741 if (!(flags & PFR_FLAG_DUMMY)) {
1742 if (kt->pfrkt_shadow != NULL) {
1743 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1744 }
1745 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1746 pfr_insert_ktables(&tableq);
1747 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1748 xaddr : NO_ADDRESSES;
1749 kt->pfrkt_shadow = shadow;
1750 } else {
1751 pfr_clean_node_mask(shadow, &addrq);
1752 pfr_destroy_ktable(shadow, 0);
1753 pfr_destroy_ktables(&tableq, 0);
1754 pfr_destroy_kentries(&addrq);
1755 }
1756 if (nadd != NULL) {
1757 *nadd = xadd;
1758 }
1759 if (naddr != NULL) {
1760 *naddr = xaddr;
1761 }
1762 return 0;
1763 _bad:
1764 pfr_destroy_ktable(shadow, 0);
1765 pfr_destroy_ktables(&tableq, 0);
1766 pfr_destroy_kentries(&addrq);
1767 return rv;
1768 }
1769
1770 int
1771 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1772 {
1773 struct pfr_ktableworkq workq;
1774 struct pfr_ktable *p;
1775 struct pf_ruleset *rs;
1776 int xdel = 0;
1777
1778 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1779
1780 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1781 rs = pf_find_ruleset(trs->pfrt_anchor);
1782 if (rs == NULL || !rs->topen || ticket != rs->tticket) {
1783 return 0;
1784 }
1785 SLIST_INIT(&workq);
1786 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1787 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1788 pfr_skip_table(trs, p, 0)) {
1789 continue;
1790 }
1791 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1792 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1793 xdel++;
1794 }
1795 if (!(flags & PFR_FLAG_DUMMY)) {
1796 pfr_setflags_ktables(&workq);
1797 rs->topen = 0;
1798 pf_remove_if_empty_ruleset(rs);
1799 }
1800 if (ndel != NULL) {
1801 *ndel = xdel;
1802 }
1803 return 0;
1804 }
1805
1806 int
1807 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1808 int *nchange, int flags)
1809 {
1810 struct pfr_ktable *p, *q;
1811 struct pfr_ktableworkq workq;
1812 struct pf_ruleset *rs;
1813 int xadd = 0, xchange = 0;
1814 u_int64_t tzero = pf_calendar_time_second();
1815
1816 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1817
1818 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1819 rs = pf_find_ruleset(trs->pfrt_anchor);
1820 if (rs == NULL || !rs->topen || ticket != rs->tticket) {
1821 return EBUSY;
1822 }
1823
1824 SLIST_INIT(&workq);
1825 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1826 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1827 pfr_skip_table(trs, p, 0)) {
1828 continue;
1829 }
1830 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1831 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1832 xchange++;
1833 } else {
1834 xadd++;
1835 }
1836 }
1837
1838 if (!(flags & PFR_FLAG_DUMMY)) {
1839 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1840 q = SLIST_NEXT(p, pfrkt_workq);
1841 pfr_commit_ktable(p, tzero);
1842 }
1843 rs->topen = 0;
1844 pf_remove_if_empty_ruleset(rs);
1845 }
1846 if (nadd != NULL) {
1847 *nadd = xadd;
1848 }
1849 if (nchange != NULL) {
1850 *nchange = xchange;
1851 }
1852
1853 return 0;
1854 }
1855
1856 static void
1857 pfr_commit_ktable(struct pfr_ktable *kt, u_int64_t tzero)
1858 {
1859 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1860 int nflags;
1861
1862 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1863
1864 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1865 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1866 pfr_clstats_ktable(kt, tzero, 1);
1867 }
1868 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1869 /* kt might contain addresses */
1870 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1871 struct pfr_kentry *p, *q, *next;
1872 struct pfr_addr ad;
1873
1874 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1875 pfr_mark_addrs(kt);
1876 SLIST_INIT(&addq);
1877 SLIST_INIT(&changeq);
1878 SLIST_INIT(&delq);
1879 SLIST_INIT(&garbageq);
1880 pfr_clean_node_mask(shadow, &addrq);
1881 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1882 next = SLIST_NEXT(p, pfrke_workq); /* XXX */
1883 pfr_copyout_addr(&ad, p);
1884 q = pfr_lookup_addr(kt, &ad, 1);
1885 if (q != NULL) {
1886 if (q->pfrke_not != p->pfrke_not) {
1887 SLIST_INSERT_HEAD(&changeq, q,
1888 pfrke_workq);
1889 }
1890 q->pfrke_mark = 1;
1891 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1892 } else {
1893 p->pfrke_tzero = tzero;
1894 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1895 }
1896 }
1897 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1898 pfr_insert_kentries(kt, &addq, tzero);
1899 pfr_remove_kentries(kt, &delq);
1900 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1901 pfr_destroy_kentries(&garbageq);
1902 } else {
1903 /* kt cannot contain addresses */
1904 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1905 shadow->pfrkt_ip4);
1906 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1907 shadow->pfrkt_ip6);
1908 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1909 pfr_clstats_ktable(kt, tzero, 1);
1910 }
1911 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1912 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) &
1913 ~PFR_TFLAG_INACTIVE;
1914 pfr_destroy_ktable(shadow, 0);
1915 kt->pfrkt_shadow = NULL;
1916 pfr_setflags_ktable(kt, nflags);
1917 }
1918
1919 void
1920 pfr_table_copyin_cleanup(struct pfr_table *tbl)
1921 {
1922 tbl->pfrt_anchor[sizeof(tbl->pfrt_anchor) - 1] = '\0';
1923 tbl->pfrt_name[sizeof(tbl->pfrt_name) - 1] = '\0';
1924 }
1925
1926 static int
1927 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1928 {
1929 int i;
1930
1931 if (!tbl->pfrt_name[0]) {
1932 return -1;
1933 }
1934 if (no_reserved && strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR) == 0) {
1935 return -1;
1936 }
1937 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE - 1]) {
1938 return -1;
1939 }
1940 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) {
1941 if (tbl->pfrt_name[i]) {
1942 return -1;
1943 }
1944 }
1945 if (pfr_fix_anchor(tbl->pfrt_anchor)) {
1946 return -1;
1947 }
1948 if (tbl->pfrt_flags & ~allowedflags) {
1949 return -1;
1950 }
1951 return 0;
1952 }
1953
1954 /*
1955 * Rewrite anchors referenced by tables to remove slashes
1956 * and check for validity.
1957 */
1958 static int
1959 pfr_fix_anchor(char *anchor)
1960 {
1961 size_t siz = MAXPATHLEN;
1962 int i;
1963
1964 if (anchor[0] == '/') {
1965 char *path;
1966 int off;
1967
1968 path = anchor;
1969 off = 1;
1970 while (*++path == '/') {
1971 off++;
1972 }
1973 bcopy(path, anchor, siz - off);
1974 memset(anchor + siz - off, 0, off);
1975 }
1976 if (anchor[siz - 1]) {
1977 return -1;
1978 }
1979 for (i = strlen(anchor); i < (int)siz; i++) {
1980 if (anchor[i]) {
1981 return -1;
1982 }
1983 }
1984 return 0;
1985 }
1986
1987 static int
1988 pfr_table_count(struct pfr_table *filter, int flags)
1989 {
1990 struct pf_ruleset *rs;
1991
1992 if (flags & PFR_FLAG_ALLRSETS) {
1993 return pfr_ktable_cnt;
1994 }
1995 if (filter->pfrt_anchor[0]) {
1996 rs = pf_find_ruleset(filter->pfrt_anchor);
1997 return (rs != NULL) ? rs->tables : -1;
1998 }
1999 return pf_main_ruleset.tables;
2000 }
2001
2002 static int
2003 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
2004 {
2005 if (flags & PFR_FLAG_ALLRSETS) {
2006 return 0;
2007 }
2008 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor)) {
2009 return 1;
2010 }
2011 return 0;
2012 }
2013
2014 static void
2015 pfr_insert_ktables(struct pfr_ktableworkq *workq)
2016 {
2017 struct pfr_ktable *p;
2018
2019 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
2020
2021 SLIST_FOREACH(p, workq, pfrkt_workq)
2022 pfr_insert_ktable(p);
2023 }
2024
2025 static void
2026 pfr_insert_ktable(struct pfr_ktable *kt)
2027 {
2028 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
2029
2030 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
2031 pfr_ktable_cnt++;
2032 if (kt->pfrkt_root != NULL) {
2033 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) {
2034 pfr_setflags_ktable(kt->pfrkt_root,
2035 kt->pfrkt_root->pfrkt_flags | PFR_TFLAG_REFDANCHOR);
2036 }
2037 }
2038 }
2039
2040 static void
2041 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
2042 {
2043 struct pfr_ktable *p, *q;
2044
2045 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
2046
2047 for (p = SLIST_FIRST(workq); p; p = q) {
2048 q = SLIST_NEXT(p, pfrkt_workq);
2049 pfr_setflags_ktable(p, p->pfrkt_nflags);
2050 }
2051 }
2052
2053 static void
2054 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
2055 {
2056 struct pfr_kentryworkq addrq;
2057
2058 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
2059
2060 if (!(newf & PFR_TFLAG_REFERENCED) &&
2061 !(newf & PFR_TFLAG_PERSIST)) {
2062 newf &= ~PFR_TFLAG_ACTIVE;
2063 }
2064 if (!(newf & PFR_TFLAG_ACTIVE)) {
2065 newf &= ~PFR_TFLAG_USRMASK;
2066 }
2067 if (!(newf & PFR_TFLAG_SETMASK)) {
2068 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
2069 if (kt->pfrkt_root != NULL) {
2070 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) {
2071 pfr_setflags_ktable(kt->pfrkt_root,
2072 kt->pfrkt_root->pfrkt_flags &
2073 ~PFR_TFLAG_REFDANCHOR);
2074 }
2075 }
2076 pfr_destroy_ktable(kt, 1);
2077 pfr_ktable_cnt--;
2078 return;
2079 }
2080 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
2081 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2082 pfr_remove_kentries(kt, &addrq);
2083 }
2084 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
2085 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
2086 kt->pfrkt_shadow = NULL;
2087 }
2088 kt->pfrkt_flags = newf;
2089 }
2090
2091 static void
2092 pfr_clstats_ktables(struct pfr_ktableworkq *workq, u_int64_t tzero, int recurse)
2093 {
2094 struct pfr_ktable *p;
2095
2096 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
2097
2098 SLIST_FOREACH(p, workq, pfrkt_workq)
2099 pfr_clstats_ktable(p, tzero, recurse);
2100 }
2101
2102 static void
2103 pfr_clstats_ktable(struct pfr_ktable *kt, u_int64_t tzero, int recurse)
2104 {
2105 struct pfr_kentryworkq addrq;
2106
2107 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
2108
2109 if (recurse) {
2110 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2111 pfr_clstats_kentries(&addrq, tzero, 0);
2112 }
2113 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
2114 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
2115 kt->pfrkt_match = kt->pfrkt_nomatch = 0;
2116 kt->pfrkt_tzero = tzero;
2117 }
2118
2119 static struct pfr_ktable *
2120 pfr_create_ktable(struct pfr_table *tbl, u_int64_t tzero, int attachruleset)
2121 {
2122 struct pfr_ktable *kt;
2123 struct pf_ruleset *rs;
2124
2125 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
2126
2127 kt = pool_get(&pfr_ktable_pl, PR_WAITOK);
2128 if (kt == NULL) {
2129 return NULL;
2130 }
2131 bzero(kt, sizeof(*kt));
2132 kt->pfrkt_t = *tbl;
2133
2134 if (attachruleset) {
2135 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
2136 if (!rs) {
2137 pfr_destroy_ktable(kt, 0);
2138 return NULL;
2139 }
2140 kt->pfrkt_rs = rs;
2141 rs->tables++;
2142 }
2143
2144 if (!rn_inithead((void **)&kt->pfrkt_ip4,
2145 offsetof(struct sockaddr_in, sin_addr) * 8) ||
2146 !rn_inithead((void **)&kt->pfrkt_ip6,
2147 offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
2148 pfr_destroy_ktable(kt, 0);
2149 return NULL;
2150 }
2151 kt->pfrkt_tzero = tzero;
2152
2153 return kt;
2154 }
2155
2156 static void
2157 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
2158 {
2159 struct pfr_ktable *p, *q;
2160
2161 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
2162
2163 for (p = SLIST_FIRST(workq); p; p = q) {
2164 q = SLIST_NEXT(p, pfrkt_workq);
2165 pfr_destroy_ktable(p, flushaddr);
2166 }
2167 }
2168
2169 static void
2170 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
2171 {
2172 struct pfr_kentryworkq addrq;
2173
2174 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
2175
2176 if (flushaddr) {
2177 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2178 pfr_clean_node_mask(kt, &addrq);
2179 pfr_destroy_kentries(&addrq);
2180 }
2181 if (kt->pfrkt_ip4 != NULL) {
2182 _FREE((caddr_t)kt->pfrkt_ip4, M_RTABLE);
2183 }
2184 if (kt->pfrkt_ip6 != NULL) {
2185 _FREE((caddr_t)kt->pfrkt_ip6, M_RTABLE);
2186 }
2187 if (kt->pfrkt_shadow != NULL) {
2188 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
2189 }
2190 if (kt->pfrkt_rs != NULL) {
2191 kt->pfrkt_rs->tables--;
2192 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
2193 }
2194 pool_put(&pfr_ktable_pl, kt);
2195 }
2196
2197 static int
2198 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
2199 {
2200 int d;
2201
2202 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) {
2203 return d;
2204 }
2205 return strcmp(p->pfrkt_anchor, q->pfrkt_anchor);
2206 }
2207
2208 static struct pfr_ktable *
2209 pfr_lookup_table(struct pfr_table *tbl)
2210 {
2211 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
2212
2213 /* struct pfr_ktable start like a struct pfr_table */
2214 return RB_FIND(pfr_ktablehead, &pfr_ktables,
2215 (struct pfr_ktable *)(void *)tbl);
2216 }
2217
2218 int
2219 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2220 {
2221 struct pfr_kentry *ke = NULL;
2222 int match;
2223
2224 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
2225
2226 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) {
2227 kt = kt->pfrkt_root;
2228 }
2229 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
2230 return 0;
2231 }
2232
2233 switch (af) {
2234 #if INET
2235 case AF_INET:
2236 pfr_sin.sin_addr.s_addr = a->addr32[0];
2237 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2238 if (ke && KENTRY_RNF_ROOT(ke)) {
2239 ke = NULL;
2240 }
2241 break;
2242 #endif /* INET */
2243 #if INET6
2244 case AF_INET6:
2245 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2246 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2247 if (ke && KENTRY_RNF_ROOT(ke)) {
2248 ke = NULL;
2249 }
2250 break;
2251 #endif /* INET6 */
2252 }
2253 match = (ke && !ke->pfrke_not);
2254 if (match) {
2255 kt->pfrkt_match++;
2256 } else {
2257 kt->pfrkt_nomatch++;
2258 }
2259 return match;
2260 }
2261
2262 void
2263 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2264 u_int64_t len, int dir_out, int op_pass, int notrule)
2265 {
2266 struct pfr_kentry *ke = NULL;
2267
2268 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
2269
2270 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) {
2271 kt = kt->pfrkt_root;
2272 }
2273 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
2274 return;
2275 }
2276
2277 switch (af) {
2278 #if INET
2279 case AF_INET:
2280 pfr_sin.sin_addr.s_addr = a->addr32[0];
2281 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2282 if (ke && KENTRY_RNF_ROOT(ke)) {
2283 ke = NULL;
2284 }
2285 break;
2286 #endif /* INET */
2287 #if INET6
2288 case AF_INET6:
2289 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2290 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2291 if (ke && KENTRY_RNF_ROOT(ke)) {
2292 ke = NULL;
2293 }
2294 break;
2295 #endif /* INET6 */
2296 default:
2297 ;
2298 }
2299 if ((ke == NULL || ke->pfrke_not) != notrule) {
2300 if (op_pass != PFR_OP_PASS) {
2301 printf("pfr_update_stats: assertion failed.\n");
2302 }
2303 op_pass = PFR_OP_XPASS;
2304 }
2305 kt->pfrkt_packets[dir_out][op_pass]++;
2306 kt->pfrkt_bytes[dir_out][op_pass] += len;
2307 if (ke != NULL && op_pass != PFR_OP_XPASS) {
2308 ke->pfrke_packets[dir_out][op_pass]++;
2309 ke->pfrke_bytes[dir_out][op_pass] += len;
2310 }
2311 }
2312
2313 struct pfr_ktable *
2314 pfr_attach_table(struct pf_ruleset *rs, char *name)
2315 {
2316 struct pfr_ktable *kt, *rt;
2317 struct pfr_table tbl;
2318 struct pf_anchor *ac = rs->anchor;
2319
2320 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
2321
2322 bzero(&tbl, sizeof(tbl));
2323 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2324 if (ac != NULL) {
2325 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2326 }
2327 kt = pfr_lookup_table(&tbl);
2328 if (kt == NULL) {
2329 kt = pfr_create_ktable(&tbl, pf_calendar_time_second(), 1);
2330 if (kt == NULL) {
2331 return NULL;
2332 }
2333 if (ac != NULL) {
2334 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2335 rt = pfr_lookup_table(&tbl);
2336 if (rt == NULL) {
2337 rt = pfr_create_ktable(&tbl, 0, 1);
2338 if (rt == NULL) {
2339 pfr_destroy_ktable(kt, 0);
2340 return NULL;
2341 }
2342 pfr_insert_ktable(rt);
2343 }
2344 kt->pfrkt_root = rt;
2345 }
2346 pfr_insert_ktable(kt);
2347 }
2348 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) {
2349 pfr_setflags_ktable(kt, kt->pfrkt_flags | PFR_TFLAG_REFERENCED);
2350 }
2351 return kt;
2352 }
2353
2354 void
2355 pfr_detach_table(struct pfr_ktable *kt)
2356 {
2357 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
2358
2359 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0) {
2360 printf("pfr_detach_table: refcount = %d.\n",
2361 kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2362 } else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) {
2363 pfr_setflags_ktable(kt, kt->pfrkt_flags & ~PFR_TFLAG_REFERENCED);
2364 }
2365 }
2366
2367 int
2368 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2369 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2370 {
2371 struct pfr_kentry *ke, *ke2;
2372 struct pf_addr *addr;
2373 union sockaddr_union mask;
2374 int idx = -1, use_counter = 0;
2375
2376 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
2377
2378 if (af == AF_INET) {
2379 addr = (struct pf_addr *)&pfr_sin.sin_addr;
2380 } else if (af == AF_INET6) {
2381 addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2382 } else {
2383 return -1;
2384 }
2385
2386 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) {
2387 kt = kt->pfrkt_root;
2388 }
2389 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
2390 return -1;
2391 }
2392
2393 if (pidx != NULL) {
2394 idx = *pidx;
2395 }
2396 if (counter != NULL && idx >= 0) {
2397 use_counter = 1;
2398 }
2399 if (idx < 0) {
2400 idx = 0;
2401 }
2402
2403 _next_block:
2404 ke = pfr_kentry_byidx(kt, idx, af);
2405 if (ke == NULL) {
2406 kt->pfrkt_nomatch++;
2407 return 1;
2408 }
2409 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2410 *raddr = SUNION2PF(&ke->pfrke_sa, af);
2411 *rmask = SUNION2PF(&pfr_mask, af);
2412
2413 if (use_counter) {
2414 /* is supplied address within block? */
2415 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2416 /* no, go to next block in table */
2417 idx++;
2418 use_counter = 0;
2419 goto _next_block;
2420 }
2421 PF_ACPY(addr, counter, af);
2422 } else {
2423 /* use first address of block */
2424 PF_ACPY(addr, *raddr, af);
2425 }
2426
2427 if (!KENTRY_NETWORK(ke)) {
2428 /* this is a single IP address - no possible nested block */
2429 PF_ACPY(counter, addr, af);
2430 *pidx = idx;
2431 kt->pfrkt_match++;
2432 return 0;
2433 }
2434 for (;;) {
2435 /* we don't want to use a nested block */
2436 if (af == AF_INET) {
2437 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
2438 kt->pfrkt_ip4);
2439 } else if (af == AF_INET6) {
2440 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
2441 kt->pfrkt_ip6);
2442 } else {
2443 return -1; /* never happens */
2444 }
2445 /* no need to check KENTRY_RNF_ROOT() here */
2446 if (ke2 == ke) {
2447 /* lookup return the same block - perfect */
2448 PF_ACPY(counter, addr, af);
2449 *pidx = idx;
2450 kt->pfrkt_match++;
2451 return 0;
2452 }
2453
2454 /* we need to increase the counter past the nested block */
2455 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2456 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2457 PF_AINC(addr, af);
2458 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2459 /* ok, we reached the end of our main block */
2460 /* go to next block in table */
2461 idx++;
2462 use_counter = 0;
2463 goto _next_block;
2464 }
2465 }
2466 }
2467
2468 static struct pfr_kentry *
2469 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2470 {
2471 struct pfr_walktree w;
2472
2473 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
2474
2475 bzero(&w, sizeof(w));
2476 w.pfrw_op = PFRW_POOL_GET;
2477 w.pfrw_cnt = idx;
2478
2479 switch (af) {
2480 #if INET
2481 case AF_INET:
2482 (void) kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4,
2483 pfr_walktree, &w);
2484 return w.pfrw_kentry;
2485 #endif /* INET */
2486 #if INET6
2487 case AF_INET6:
2488 (void) kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
2489 pfr_walktree, &w);
2490 return w.pfrw_kentry;
2491 #endif /* INET6 */
2492 default:
2493 return NULL;
2494 }
2495 }
2496
2497 void
2498 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2499 {
2500 struct pfr_walktree w;
2501
2502 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
2503
2504 bzero(&w, sizeof(w));
2505 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2506 w.pfrw_dyn = dyn;
2507
2508 dyn->pfid_acnt4 = 0;
2509 dyn->pfid_acnt6 = 0;
2510 if (!dyn->pfid_af || dyn->pfid_af == AF_INET) {
2511 (void) kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4,
2512 pfr_walktree, &w);
2513 }
2514 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6) {
2515 (void) kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
2516 pfr_walktree, &w);
2517 }
2518 }