]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/pf_table.c
8b4bf61c6abd5b223c6abe2693c553c324c64f14
[apple/xnu.git] / bsd / net / pf_table.c
1 /*
2 * Copyright (c) 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $apfw: pf_table.c,v 1.4 2008/08/27 00:01:32 jhw Exp $ */
30 /* $OpenBSD: pf_table.c,v 1.68 2006/05/02 10:08:45 dhartmei Exp $ */
31
32 /*
33 * Copyright (c) 2002 Cedric Berger
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * - Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * - Redistributions in binary form must reproduce the above
43 * copyright notice, this list of conditions and the following
44 * disclaimer in the documentation and/or other materials provided
45 * with the distribution.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
48 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
49 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
50 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
51 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
52 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
53 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
54 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
55 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
57 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
58 * POSSIBILITY OF SUCH DAMAGE.
59 *
60 */
61
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/socket.h>
65 #include <sys/mbuf.h>
66 #include <sys/kernel.h>
67 #include <sys/malloc.h>
68
69 #include <net/if.h>
70 #include <net/route.h>
71 #include <netinet/in.h>
72 #include <net/radix.h>
73 #include <net/pfvar.h>
74
75 #define ACCEPT_FLAGS(flags, oklist) \
76 do { \
77 if ((flags & ~(oklist)) & \
78 PFR_FLAG_ALLMASK) \
79 return (EINVAL); \
80 } while (0)
81
82 #define COPYIN(from, to, size, flags) \
83 ((flags & PFR_FLAG_USERIOCTL) ? \
84 copyin(CAST_USER_ADDR_T(from), (to), (size)) : \
85 (bcopy((from), (to), (size)), 0))
86
87 #define COPYOUT(from, to, size, flags) \
88 ((flags & PFR_FLAG_USERIOCTL) ? \
89 copyout((from), CAST_USER_ADDR_T(to), (size)) : \
90 (bcopy((from), (to), (size)), 0))
91
92 #define FILLIN_SIN(sin, addr) \
93 do { \
94 (sin).sin_len = sizeof (sin); \
95 (sin).sin_family = AF_INET; \
96 (sin).sin_addr = (addr); \
97 } while (0)
98
99 #define FILLIN_SIN6(sin6, addr) \
100 do { \
101 (sin6).sin6_len = sizeof (sin6); \
102 (sin6).sin6_family = AF_INET6; \
103 (sin6).sin6_addr = (addr); \
104 } while (0)
105
106 #define SWAP(type, a1, a2) \
107 do { \
108 type tmp = a1; \
109 a1 = a2; \
110 a2 = tmp; \
111 } while (0)
112
113 #define SUNION2PF(su, af) (((af) == AF_INET) ? \
114 (struct pf_addr *)&(su)->sin.sin_addr : \
115 (struct pf_addr *)&(su)->sin6.sin6_addr)
116
117 #define AF_BITS(af) (((af) == AF_INET) ? 32 : 128)
118 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
119 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
120 #define KENTRY_RNF_ROOT(ke) \
121 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
122
123 #define NO_ADDRESSES (-1)
124 #define ENQUEUE_UNMARKED_ONLY (1)
125 #define INVERT_NEG_FLAG (1)
126
127 struct pfr_walktree {
128 enum pfrw_op {
129 PFRW_MARK,
130 PFRW_SWEEP,
131 PFRW_ENQUEUE,
132 PFRW_GET_ADDRS,
133 PFRW_GET_ASTATS,
134 PFRW_POOL_GET,
135 PFRW_DYNADDR_UPDATE
136 } pfrw_op;
137 union {
138 struct pfr_addr *pfrw1_addr;
139 struct pfr_astats *pfrw1_astats;
140 struct pfr_kentryworkq *pfrw1_workq;
141 struct pfr_kentry *pfrw1_kentry;
142 struct pfi_dynaddr *pfrw1_dyn;
143 } pfrw_1;
144 int pfrw_free;
145 int pfrw_flags;
146 };
147 #define pfrw_addr pfrw_1.pfrw1_addr
148 #define pfrw_astats pfrw_1.pfrw1_astats
149 #define pfrw_workq pfrw_1.pfrw1_workq
150 #define pfrw_kentry pfrw_1.pfrw1_kentry
151 #define pfrw_dyn pfrw_1.pfrw1_dyn
152 #define pfrw_cnt pfrw_free
153
154 #define senderr(e) do { rv = (e); goto _bad; } while (0)
155
156 struct pool pfr_ktable_pl;
157 struct pool pfr_kentry_pl;
158
159 static struct pool pfr_kentry_pl2;
160 static struct sockaddr_in pfr_sin;
161 static struct sockaddr_in6 pfr_sin6;
162 static union sockaddr_union pfr_mask;
163 static struct pf_addr pfr_ffaddr;
164
165 static void pfr_copyout_addr(struct pfr_addr *, struct pfr_kentry *ke);
166 static int pfr_validate_addr(struct pfr_addr *);
167 static void pfr_enqueue_addrs(struct pfr_ktable *, struct pfr_kentryworkq *,
168 int *, int);
169 static void pfr_mark_addrs(struct pfr_ktable *);
170 static struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
171 struct pfr_addr *, int);
172 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, int);
173 static void pfr_destroy_kentries(struct pfr_kentryworkq *);
174 static void pfr_destroy_kentry(struct pfr_kentry *);
175 static void pfr_insert_kentries(struct pfr_ktable *,
176 struct pfr_kentryworkq *, u_int64_t);
177 static void pfr_remove_kentries(struct pfr_ktable *, struct pfr_kentryworkq *);
178 static void pfr_clstats_kentries(struct pfr_kentryworkq *, u_int64_t, int);
179 static void pfr_reset_feedback(struct pfr_addr *, int, int);
180 static void pfr_prepare_network(union sockaddr_union *, int, int);
181 static int pfr_route_kentry(struct pfr_ktable *, struct pfr_kentry *);
182 static int pfr_unroute_kentry(struct pfr_ktable *, struct pfr_kentry *);
183 static int pfr_walktree(struct radix_node *, void *);
184 static int pfr_validate_table(struct pfr_table *, int, int);
185 static int pfr_fix_anchor(char *);
186 static void pfr_commit_ktable(struct pfr_ktable *, u_int64_t);
187 static void pfr_insert_ktables(struct pfr_ktableworkq *);
188 static void pfr_insert_ktable(struct pfr_ktable *);
189 static void pfr_setflags_ktables(struct pfr_ktableworkq *);
190 static void pfr_setflags_ktable(struct pfr_ktable *, int);
191 static void pfr_clstats_ktables(struct pfr_ktableworkq *, u_int64_t, int);
192 static void pfr_clstats_ktable(struct pfr_ktable *, u_int64_t, int);
193 static struct pfr_ktable *pfr_create_ktable(struct pfr_table *, u_int64_t, int);
194 static void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
195 static void pfr_destroy_ktable(struct pfr_ktable *, int);
196 static int pfr_ktable_compare(struct pfr_ktable *, struct pfr_ktable *);
197 static struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
198 static void pfr_clean_node_mask(struct pfr_ktable *, struct pfr_kentryworkq *);
199 static int pfr_table_count(struct pfr_table *, int);
200 static int pfr_skip_table(struct pfr_table *, struct pfr_ktable *, int);
201 static struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
202
203 RB_PROTOTYPE_SC(static, pfr_ktablehead, pfr_ktable, pfrkt_tree,
204 pfr_ktable_compare);
205 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
206
207 static struct pfr_ktablehead pfr_ktables;
208 static struct pfr_table pfr_nulltable;
209 static int pfr_ktable_cnt;
210
211 void
212 pfr_initialize(void)
213 {
214 pool_init(&pfr_ktable_pl, sizeof (struct pfr_ktable), 0, 0, 0,
215 "pfrktable", NULL);
216 pool_init(&pfr_kentry_pl, sizeof (struct pfr_kentry), 0, 0, 0,
217 "pfrkentry", NULL);
218 pool_init(&pfr_kentry_pl2, sizeof (struct pfr_kentry), 0, 0, 0,
219 "pfrkentry2", NULL);
220
221 pfr_sin.sin_len = sizeof (pfr_sin);
222 pfr_sin.sin_family = AF_INET;
223 pfr_sin6.sin6_len = sizeof (pfr_sin6);
224 pfr_sin6.sin6_family = AF_INET6;
225
226 memset(&pfr_ffaddr, 0xff, sizeof (pfr_ffaddr));
227 }
228
229 #if 0
230 void
231 pfr_destroy(void)
232 {
233 pool_destroy(&pfr_ktable_pl);
234 pool_destroy(&pfr_kentry_pl);
235 pool_destroy(&pfr_kentry_pl2);
236 }
237 #endif
238
239 int
240 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
241 {
242 struct pfr_ktable *kt;
243 struct pfr_kentryworkq workq;
244
245 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
246 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
247 return (EINVAL);
248 kt = pfr_lookup_table(tbl);
249 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
250 return (ESRCH);
251 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
252 return (EPERM);
253 pfr_enqueue_addrs(kt, &workq, ndel, 0);
254
255 if (!(flags & PFR_FLAG_DUMMY)) {
256 pfr_remove_kentries(kt, &workq);
257 if (kt->pfrkt_cnt) {
258 printf("pfr_clr_addrs: corruption detected (%d).\n",
259 kt->pfrkt_cnt);
260 kt->pfrkt_cnt = 0;
261 }
262 }
263 return (0);
264 }
265
266 int
267 pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
268 int *nadd, int flags)
269 {
270 struct pfr_ktable *kt, *tmpkt;
271 struct pfr_kentryworkq workq;
272 struct pfr_kentry *p, *q;
273 struct pfr_addr ad;
274 int i, rv, xadd = 0;
275 u_int64_t tzero = pf_time_second();
276
277 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
278 PFR_FLAG_FEEDBACK);
279 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
280 return (EINVAL);
281 kt = pfr_lookup_table(tbl);
282 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
283 return (ESRCH);
284 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
285 return (EPERM);
286 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
287 if (tmpkt == NULL)
288 return (ENOMEM);
289 SLIST_INIT(&workq);
290 for (i = 0; i < size; i++) {
291 if (COPYIN(addr+i, &ad, sizeof (ad), flags))
292 senderr(EFAULT);
293 if (pfr_validate_addr(&ad))
294 senderr(EINVAL);
295 p = pfr_lookup_addr(kt, &ad, 1);
296 q = pfr_lookup_addr(tmpkt, &ad, 1);
297 if (flags & PFR_FLAG_FEEDBACK) {
298 if (q != NULL)
299 ad.pfra_fback = PFR_FB_DUPLICATE;
300 else if (p == NULL)
301 ad.pfra_fback = PFR_FB_ADDED;
302 else if (p->pfrke_not != ad.pfra_not)
303 ad.pfra_fback = PFR_FB_CONFLICT;
304 else
305 ad.pfra_fback = PFR_FB_NONE;
306 }
307 if (p == NULL && q == NULL) {
308 p = pfr_create_kentry(&ad,
309 !(flags & PFR_FLAG_USERIOCTL));
310 if (p == NULL)
311 senderr(ENOMEM);
312 if (pfr_route_kentry(tmpkt, p)) {
313 pfr_destroy_kentry(p);
314 ad.pfra_fback = PFR_FB_NONE;
315 } else {
316 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
317 xadd++;
318 }
319 }
320 if (flags & PFR_FLAG_FEEDBACK)
321 if (COPYOUT(&ad, addr+i, sizeof (ad), flags))
322 senderr(EFAULT);
323 }
324 pfr_clean_node_mask(tmpkt, &workq);
325 if (!(flags & PFR_FLAG_DUMMY)) {
326 pfr_insert_kentries(kt, &workq, tzero);
327 } else
328 pfr_destroy_kentries(&workq);
329 if (nadd != NULL)
330 *nadd = xadd;
331 pfr_destroy_ktable(tmpkt, 0);
332 return (0);
333 _bad:
334 pfr_clean_node_mask(tmpkt, &workq);
335 pfr_destroy_kentries(&workq);
336 if (flags & PFR_FLAG_FEEDBACK)
337 pfr_reset_feedback(addr, size, flags);
338 pfr_destroy_ktable(tmpkt, 0);
339 return (rv);
340 }
341
342 int
343 pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
344 int *ndel, int flags)
345 {
346 struct pfr_ktable *kt;
347 struct pfr_kentryworkq workq;
348 struct pfr_kentry *p;
349 struct pfr_addr ad;
350 int i, rv, xdel = 0, log = 1;
351
352 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
353 PFR_FLAG_FEEDBACK);
354 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
355 return (EINVAL);
356 kt = pfr_lookup_table(tbl);
357 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
358 return (ESRCH);
359 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
360 return (EPERM);
361 /*
362 * there are two algorithms to choose from here.
363 * with:
364 * n: number of addresses to delete
365 * N: number of addresses in the table
366 *
367 * one is O(N) and is better for large 'n'
368 * one is O(n*LOG(N)) and is better for small 'n'
369 *
370 * following code try to decide which one is best.
371 */
372 for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
373 log++;
374 if (size > kt->pfrkt_cnt/log) {
375 /* full table scan */
376 pfr_mark_addrs(kt);
377 } else {
378 /* iterate over addresses to delete */
379 for (i = 0; i < size; i++) {
380 if (COPYIN(addr+i, &ad, sizeof (ad), flags))
381 return (EFAULT);
382 if (pfr_validate_addr(&ad))
383 return (EINVAL);
384 p = pfr_lookup_addr(kt, &ad, 1);
385 if (p != NULL)
386 p->pfrke_mark = 0;
387 }
388 }
389 SLIST_INIT(&workq);
390 for (i = 0; i < size; i++) {
391 if (COPYIN(addr+i, &ad, sizeof (ad), flags))
392 senderr(EFAULT);
393 if (pfr_validate_addr(&ad))
394 senderr(EINVAL);
395 p = pfr_lookup_addr(kt, &ad, 1);
396 if (flags & PFR_FLAG_FEEDBACK) {
397 if (p == NULL)
398 ad.pfra_fback = PFR_FB_NONE;
399 else if (p->pfrke_not != ad.pfra_not)
400 ad.pfra_fback = PFR_FB_CONFLICT;
401 else if (p->pfrke_mark)
402 ad.pfra_fback = PFR_FB_DUPLICATE;
403 else
404 ad.pfra_fback = PFR_FB_DELETED;
405 }
406 if (p != NULL && p->pfrke_not == ad.pfra_not &&
407 !p->pfrke_mark) {
408 p->pfrke_mark = 1;
409 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
410 xdel++;
411 }
412 if (flags & PFR_FLAG_FEEDBACK)
413 if (COPYOUT(&ad, addr+i, sizeof (ad), flags))
414 senderr(EFAULT);
415 }
416 if (!(flags & PFR_FLAG_DUMMY)) {
417 pfr_remove_kentries(kt, &workq);
418 }
419 if (ndel != NULL)
420 *ndel = xdel;
421 return (0);
422 _bad:
423 if (flags & PFR_FLAG_FEEDBACK)
424 pfr_reset_feedback(addr, size, flags);
425 return (rv);
426 }
427
428 int
429 pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
430 int *size2, int *nadd, int *ndel, int *nchange, int flags,
431 u_int32_t ignore_pfrt_flags)
432 {
433 struct pfr_ktable *kt, *tmpkt;
434 struct pfr_kentryworkq addq, delq, changeq;
435 struct pfr_kentry *p, *q;
436 struct pfr_addr ad;
437 int i, rv, xadd = 0, xdel = 0, xchange = 0;
438 u_int64_t tzero = pf_time_second();
439
440 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
441 PFR_FLAG_FEEDBACK);
442 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
443 PFR_FLAG_USERIOCTL))
444 return (EINVAL);
445 kt = pfr_lookup_table(tbl);
446 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
447 return (ESRCH);
448 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
449 return (EPERM);
450 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
451 if (tmpkt == NULL)
452 return (ENOMEM);
453 pfr_mark_addrs(kt);
454 SLIST_INIT(&addq);
455 SLIST_INIT(&delq);
456 SLIST_INIT(&changeq);
457 for (i = 0; i < size; i++) {
458 if (COPYIN(addr+i, &ad, sizeof (ad), flags))
459 senderr(EFAULT);
460 if (pfr_validate_addr(&ad))
461 senderr(EINVAL);
462 ad.pfra_fback = PFR_FB_NONE;
463 p = pfr_lookup_addr(kt, &ad, 1);
464 if (p != NULL) {
465 if (p->pfrke_mark) {
466 ad.pfra_fback = PFR_FB_DUPLICATE;
467 goto _skip;
468 }
469 p->pfrke_mark = 1;
470 if (p->pfrke_not != ad.pfra_not) {
471 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
472 ad.pfra_fback = PFR_FB_CHANGED;
473 xchange++;
474 }
475 } else {
476 q = pfr_lookup_addr(tmpkt, &ad, 1);
477 if (q != NULL) {
478 ad.pfra_fback = PFR_FB_DUPLICATE;
479 goto _skip;
480 }
481 p = pfr_create_kentry(&ad,
482 !(flags & PFR_FLAG_USERIOCTL));
483 if (p == NULL)
484 senderr(ENOMEM);
485 if (pfr_route_kentry(tmpkt, p)) {
486 pfr_destroy_kentry(p);
487 ad.pfra_fback = PFR_FB_NONE;
488 } else {
489 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
490 ad.pfra_fback = PFR_FB_ADDED;
491 xadd++;
492 }
493 }
494 _skip:
495 if (flags & PFR_FLAG_FEEDBACK)
496 if (COPYOUT(&ad, addr+i, sizeof (ad), flags))
497 senderr(EFAULT);
498 }
499 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
500 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
501 if (*size2 < size+xdel) {
502 *size2 = size+xdel;
503 senderr(0);
504 }
505 i = 0;
506 SLIST_FOREACH(p, &delq, pfrke_workq) {
507 pfr_copyout_addr(&ad, p);
508 ad.pfra_fback = PFR_FB_DELETED;
509 if (COPYOUT(&ad, addr+size+i, sizeof (ad), flags))
510 senderr(EFAULT);
511 i++;
512 }
513 }
514 pfr_clean_node_mask(tmpkt, &addq);
515 if (!(flags & PFR_FLAG_DUMMY)) {
516 pfr_insert_kentries(kt, &addq, tzero);
517 pfr_remove_kentries(kt, &delq);
518 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
519 } else
520 pfr_destroy_kentries(&addq);
521 if (nadd != NULL)
522 *nadd = xadd;
523 if (ndel != NULL)
524 *ndel = xdel;
525 if (nchange != NULL)
526 *nchange = xchange;
527 if ((flags & PFR_FLAG_FEEDBACK) && size2)
528 *size2 = size+xdel;
529 pfr_destroy_ktable(tmpkt, 0);
530 return (0);
531 _bad:
532 pfr_clean_node_mask(tmpkt, &addq);
533 pfr_destroy_kentries(&addq);
534 if (flags & PFR_FLAG_FEEDBACK)
535 pfr_reset_feedback(addr, size, flags);
536 pfr_destroy_ktable(tmpkt, 0);
537 return (rv);
538 }
539
540 int
541 pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
542 int *nmatch, int flags)
543 {
544 struct pfr_ktable *kt;
545 struct pfr_kentry *p;
546 struct pfr_addr ad;
547 int i, xmatch = 0;
548
549 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
550 if (pfr_validate_table(tbl, 0, 0))
551 return (EINVAL);
552 kt = pfr_lookup_table(tbl);
553 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
554 return (ESRCH);
555
556 for (i = 0; i < size; i++) {
557 if (COPYIN(addr+i, &ad, sizeof (ad), flags))
558 return (EFAULT);
559 if (pfr_validate_addr(&ad))
560 return (EINVAL);
561 if (ADDR_NETWORK(&ad))
562 return (EINVAL);
563 p = pfr_lookup_addr(kt, &ad, 0);
564 if (flags & PFR_FLAG_REPLACE)
565 pfr_copyout_addr(&ad, p);
566 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
567 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
568 if (p != NULL && !p->pfrke_not)
569 xmatch++;
570 if (COPYOUT(&ad, addr+i, sizeof (ad), flags))
571 return (EFAULT);
572 }
573 if (nmatch != NULL)
574 *nmatch = xmatch;
575 return (0);
576 }
577
578 int
579 pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
580 int flags)
581 {
582 struct pfr_ktable *kt;
583 struct pfr_walktree w;
584 int rv;
585
586 ACCEPT_FLAGS(flags, 0);
587 if (pfr_validate_table(tbl, 0, 0))
588 return (EINVAL);
589 kt = pfr_lookup_table(tbl);
590 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
591 return (ESRCH);
592 if (kt->pfrkt_cnt > *size) {
593 *size = kt->pfrkt_cnt;
594 return (0);
595 }
596
597 bzero(&w, sizeof (w));
598 w.pfrw_op = PFRW_GET_ADDRS;
599 w.pfrw_addr = addr;
600 w.pfrw_free = kt->pfrkt_cnt;
601 w.pfrw_flags = flags;
602 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
603 if (!rv)
604 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
605 pfr_walktree, &w);
606 if (rv)
607 return (rv);
608
609 if (w.pfrw_free) {
610 printf("pfr_get_addrs: corruption detected (%d).\n",
611 w.pfrw_free);
612 return (ENOTTY);
613 }
614 *size = kt->pfrkt_cnt;
615 return (0);
616 }
617
618 int
619 pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
620 int flags)
621 {
622 struct pfr_ktable *kt;
623 struct pfr_walktree w;
624 struct pfr_kentryworkq workq;
625 int rv;
626 u_int64_t tzero = pf_time_second();
627
628 /* XXX PFR_FLAG_CLSTATS disabled */
629 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
630 if (pfr_validate_table(tbl, 0, 0))
631 return (EINVAL);
632 kt = pfr_lookup_table(tbl);
633 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
634 return (ESRCH);
635 if (kt->pfrkt_cnt > *size) {
636 *size = kt->pfrkt_cnt;
637 return (0);
638 }
639
640 bzero(&w, sizeof (w));
641 w.pfrw_op = PFRW_GET_ASTATS;
642 w.pfrw_astats = addr;
643 w.pfrw_free = kt->pfrkt_cnt;
644 w.pfrw_flags = flags;
645 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
646 if (!rv)
647 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
648 pfr_walktree, &w);
649 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
650 pfr_enqueue_addrs(kt, &workq, NULL, 0);
651 pfr_clstats_kentries(&workq, tzero, 0);
652 }
653 if (rv)
654 return (rv);
655
656 if (w.pfrw_free) {
657 printf("pfr_get_astats: corruption detected (%d).\n",
658 w.pfrw_free);
659 return (ENOTTY);
660 }
661 *size = kt->pfrkt_cnt;
662 return (0);
663 }
664
665 int
666 pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
667 int *nzero, int flags)
668 {
669 struct pfr_ktable *kt;
670 struct pfr_kentryworkq workq;
671 struct pfr_kentry *p;
672 struct pfr_addr ad;
673 int i, rv, xzero = 0;
674
675 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
676 PFR_FLAG_FEEDBACK);
677 if (pfr_validate_table(tbl, 0, 0))
678 return (EINVAL);
679 kt = pfr_lookup_table(tbl);
680 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
681 return (ESRCH);
682 SLIST_INIT(&workq);
683 for (i = 0; i < size; i++) {
684 if (COPYIN(addr+i, &ad, sizeof (ad), flags))
685 senderr(EFAULT);
686 if (pfr_validate_addr(&ad))
687 senderr(EINVAL);
688 p = pfr_lookup_addr(kt, &ad, 1);
689 if (flags & PFR_FLAG_FEEDBACK) {
690 ad.pfra_fback = (p != NULL) ?
691 PFR_FB_CLEARED : PFR_FB_NONE;
692 if (COPYOUT(&ad, addr+i, sizeof (ad), flags))
693 senderr(EFAULT);
694 }
695 if (p != NULL) {
696 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
697 xzero++;
698 }
699 }
700
701 if (!(flags & PFR_FLAG_DUMMY)) {
702 pfr_clstats_kentries(&workq, 0, 0);
703 }
704 if (nzero != NULL)
705 *nzero = xzero;
706 return (0);
707 _bad:
708 if (flags & PFR_FLAG_FEEDBACK)
709 pfr_reset_feedback(addr, size, flags);
710 return (rv);
711 }
712
713 int
714 pfr_validate_addr(struct pfr_addr *ad)
715 {
716 int i;
717
718 switch (ad->pfra_af) {
719 #if INET
720 case AF_INET:
721 if (ad->pfra_net > 32)
722 return (-1);
723 break;
724 #endif /* INET */
725 #if INET6
726 case AF_INET6:
727 if (ad->pfra_net > 128)
728 return (-1);
729 break;
730 #endif /* INET6 */
731 default:
732 return (-1);
733 }
734 if (ad->pfra_net < 128 &&
735 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
736 return (-1);
737 for (i = (ad->pfra_net+7)/8; i < (int)sizeof (ad->pfra_u); i++)
738 if (((caddr_t)ad)[i])
739 return (-1);
740 if (ad->pfra_not && ad->pfra_not != 1)
741 return (-1);
742 if (ad->pfra_fback)
743 return (-1);
744 return (0);
745 }
746
747 void
748 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
749 int *naddr, int sweep)
750 {
751 struct pfr_walktree w;
752
753 SLIST_INIT(workq);
754 bzero(&w, sizeof (w));
755 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
756 w.pfrw_workq = workq;
757 if (kt->pfrkt_ip4 != NULL)
758 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4,
759 pfr_walktree, &w))
760 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
761 if (kt->pfrkt_ip6 != NULL)
762 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
763 pfr_walktree, &w))
764 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
765 if (naddr != NULL)
766 *naddr = w.pfrw_cnt;
767 }
768
769 void
770 pfr_mark_addrs(struct pfr_ktable *kt)
771 {
772 struct pfr_walktree w;
773
774 bzero(&w, sizeof (w));
775 w.pfrw_op = PFRW_MARK;
776 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
777 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
778 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
779 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
780 }
781
782
783 struct pfr_kentry *
784 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
785 {
786 union sockaddr_union sa, mask;
787 struct radix_node_head *head;
788 struct pfr_kentry *ke;
789
790 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
791
792 bzero(&sa, sizeof (sa));
793 if (ad->pfra_af == AF_INET) {
794 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
795 head = kt->pfrkt_ip4;
796 } else if (ad->pfra_af == AF_INET6) {
797 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
798 head = kt->pfrkt_ip6;
799 }
800 else
801 return NULL;
802 if (ADDR_NETWORK(ad)) {
803 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
804 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
805 if (ke && KENTRY_RNF_ROOT(ke))
806 ke = NULL;
807 } else {
808 ke = (struct pfr_kentry *)rn_match(&sa, head);
809 if (ke && KENTRY_RNF_ROOT(ke))
810 ke = NULL;
811 if (exact && ke && KENTRY_NETWORK(ke))
812 ke = NULL;
813 }
814 return (ke);
815 }
816
817 struct pfr_kentry *
818 pfr_create_kentry(struct pfr_addr *ad, int intr)
819 {
820 struct pfr_kentry *ke;
821
822 if (intr)
823 ke = pool_get(&pfr_kentry_pl2, PR_WAITOK);
824 else
825 ke = pool_get(&pfr_kentry_pl, PR_WAITOK);
826 if (ke == NULL)
827 return (NULL);
828 bzero(ke, sizeof (*ke));
829
830 if (ad->pfra_af == AF_INET)
831 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
832 else if (ad->pfra_af == AF_INET6)
833 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
834 ke->pfrke_af = ad->pfra_af;
835 ke->pfrke_net = ad->pfra_net;
836 ke->pfrke_not = ad->pfra_not;
837 ke->pfrke_intrpool = intr;
838 return (ke);
839 }
840
841 void
842 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
843 {
844 struct pfr_kentry *p, *q;
845
846 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
847 q = SLIST_NEXT(p, pfrke_workq);
848 pfr_destroy_kentry(p);
849 }
850 }
851
852 void
853 pfr_destroy_kentry(struct pfr_kentry *ke)
854 {
855 if (ke->pfrke_intrpool)
856 pool_put(&pfr_kentry_pl2, ke);
857 else
858 pool_put(&pfr_kentry_pl, ke);
859 }
860
861 void
862 pfr_insert_kentries(struct pfr_ktable *kt,
863 struct pfr_kentryworkq *workq, u_int64_t tzero)
864 {
865 struct pfr_kentry *p;
866 int rv, n = 0;
867
868 SLIST_FOREACH(p, workq, pfrke_workq) {
869 rv = pfr_route_kentry(kt, p);
870 if (rv) {
871 printf("pfr_insert_kentries: cannot route entry "
872 "(code=%d).\n", rv);
873 break;
874 }
875 p->pfrke_tzero = tzero;
876 n++;
877 }
878 kt->pfrkt_cnt += n;
879 }
880
881 int
882 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, u_int64_t tzero)
883 {
884 struct pfr_kentry *p;
885 int rv;
886
887 p = pfr_lookup_addr(kt, ad, 1);
888 if (p != NULL)
889 return (0);
890 p = pfr_create_kentry(ad, 1);
891 if (p == NULL)
892 return (EINVAL);
893
894 rv = pfr_route_kentry(kt, p);
895 if (rv)
896 return (rv);
897
898 p->pfrke_tzero = tzero;
899 kt->pfrkt_cnt++;
900
901 return (0);
902 }
903
904 void
905 pfr_remove_kentries(struct pfr_ktable *kt,
906 struct pfr_kentryworkq *workq)
907 {
908 struct pfr_kentry *p;
909 int n = 0;
910
911 SLIST_FOREACH(p, workq, pfrke_workq) {
912 pfr_unroute_kentry(kt, p);
913 n++;
914 }
915 kt->pfrkt_cnt -= n;
916 pfr_destroy_kentries(workq);
917 }
918
919 void
920 pfr_clean_node_mask(struct pfr_ktable *kt,
921 struct pfr_kentryworkq *workq)
922 {
923 struct pfr_kentry *p;
924
925 SLIST_FOREACH(p, workq, pfrke_workq)
926 pfr_unroute_kentry(kt, p);
927 }
928
929 void
930 pfr_clstats_kentries(struct pfr_kentryworkq *workq, u_int64_t tzero,
931 int negchange)
932 {
933 struct pfr_kentry *p;
934
935 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
936
937 SLIST_FOREACH(p, workq, pfrke_workq) {
938 if (negchange)
939 p->pfrke_not = !p->pfrke_not;
940 bzero(p->pfrke_packets, sizeof (p->pfrke_packets));
941 bzero(p->pfrke_bytes, sizeof (p->pfrke_bytes));
942 p->pfrke_tzero = tzero;
943 }
944 }
945
946 void
947 pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
948 {
949 struct pfr_addr ad;
950 int i;
951
952 for (i = 0; i < size; i++) {
953 if (COPYIN(addr+i, &ad, sizeof (ad), flags))
954 break;
955 ad.pfra_fback = PFR_FB_NONE;
956 if (COPYOUT(&ad, addr+i, sizeof (ad), flags))
957 break;
958 }
959 }
960
961 void
962 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
963 {
964 int i;
965
966 bzero(sa, sizeof (*sa));
967 if (af == AF_INET) {
968 sa->sin.sin_len = sizeof (sa->sin);
969 sa->sin.sin_family = AF_INET;
970 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
971 } else if (af == AF_INET6) {
972 sa->sin6.sin6_len = sizeof (sa->sin6);
973 sa->sin6.sin6_family = AF_INET6;
974 for (i = 0; i < 4; i++) {
975 if (net <= 32) {
976 sa->sin6.sin6_addr.s6_addr32[i] =
977 net ? htonl(-1 << (32-net)) : 0;
978 break;
979 }
980 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
981 net -= 32;
982 }
983 }
984 }
985
986 int
987 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
988 {
989 union sockaddr_union mask;
990 struct radix_node *rn;
991 struct radix_node_head *head;
992
993 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
994
995 bzero(ke->pfrke_node, sizeof (ke->pfrke_node));
996 if (ke->pfrke_af == AF_INET)
997 head = kt->pfrkt_ip4;
998 else if (ke->pfrke_af == AF_INET6)
999 head = kt->pfrkt_ip6;
1000 else
1001 return (-1);
1002
1003 if (KENTRY_NETWORK(ke)) {
1004 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1005 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
1006 } else
1007 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
1008
1009 return (rn == NULL ? -1 : 0);
1010 }
1011
1012 int
1013 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1014 {
1015 union sockaddr_union mask;
1016 struct radix_node *rn;
1017 struct radix_node_head *head;
1018
1019 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1020
1021 if (ke->pfrke_af == AF_INET)
1022 head = kt->pfrkt_ip4;
1023 else if (ke->pfrke_af == AF_INET6)
1024 head = kt->pfrkt_ip6;
1025 else
1026 return (-1);
1027
1028 if (KENTRY_NETWORK(ke)) {
1029 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1030 rn = rn_delete(&ke->pfrke_sa, &mask, head);
1031 } else
1032 rn = rn_delete(&ke->pfrke_sa, NULL, head);
1033
1034 if (rn == NULL) {
1035 printf("pfr_unroute_kentry: delete failed.\n");
1036 return (-1);
1037 }
1038 return (0);
1039 }
1040
1041 void
1042 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1043 {
1044 bzero(ad, sizeof (*ad));
1045 if (ke == NULL)
1046 return;
1047 ad->pfra_af = ke->pfrke_af;
1048 ad->pfra_net = ke->pfrke_net;
1049 ad->pfra_not = ke->pfrke_not;
1050 if (ad->pfra_af == AF_INET)
1051 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1052 else if (ad->pfra_af == AF_INET6)
1053 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1054 }
1055
1056 int
1057 pfr_walktree(struct radix_node *rn, void *arg)
1058 {
1059 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
1060 struct pfr_walktree *w = arg;
1061 int flags = w->pfrw_flags;
1062
1063 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1064
1065 switch (w->pfrw_op) {
1066 case PFRW_MARK:
1067 ke->pfrke_mark = 0;
1068 break;
1069 case PFRW_SWEEP:
1070 if (ke->pfrke_mark)
1071 break;
1072 /* FALLTHROUGH */
1073 case PFRW_ENQUEUE:
1074 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1075 w->pfrw_cnt++;
1076 break;
1077 case PFRW_GET_ADDRS:
1078 if (w->pfrw_free-- > 0) {
1079 struct pfr_addr ad;
1080
1081 pfr_copyout_addr(&ad, ke);
1082 if (copyout(&ad,
1083 CAST_USER_ADDR_T(w->pfrw_addr),
1084 sizeof (ad)))
1085 return (EFAULT);
1086 w->pfrw_addr++;
1087 }
1088 break;
1089 case PFRW_GET_ASTATS:
1090 if (w->pfrw_free-- > 0) {
1091 struct pfr_astats as;
1092
1093 pfr_copyout_addr(&as.pfras_a, ke);
1094
1095 bcopy(ke->pfrke_packets, as.pfras_packets,
1096 sizeof (as.pfras_packets));
1097 bcopy(ke->pfrke_bytes, as.pfras_bytes,
1098 sizeof (as.pfras_bytes));
1099 as.pfras_tzero = ke->pfrke_tzero;
1100
1101 if (COPYOUT(&as, w->pfrw_astats, sizeof (as), flags))
1102 return (EFAULT);
1103 w->pfrw_astats++;
1104 }
1105 break;
1106 case PFRW_POOL_GET:
1107 if (ke->pfrke_not)
1108 break; /* negative entries are ignored */
1109 if (!w->pfrw_cnt--) {
1110 w->pfrw_kentry = ke;
1111 return (1); /* finish search */
1112 }
1113 break;
1114 case PFRW_DYNADDR_UPDATE:
1115 if (ke->pfrke_af == AF_INET) {
1116 if (w->pfrw_dyn->pfid_acnt4++ > 0)
1117 break;
1118 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1119 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1120 &ke->pfrke_sa, AF_INET);
1121 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1122 &pfr_mask, AF_INET);
1123 } else if (ke->pfrke_af == AF_INET6) {
1124 if (w->pfrw_dyn->pfid_acnt6++ > 0)
1125 break;
1126 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1127 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1128 &ke->pfrke_sa, AF_INET6);
1129 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1130 &pfr_mask, AF_INET6);
1131 }
1132 break;
1133 }
1134 return (0);
1135 }
1136
1137 int
1138 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1139 {
1140 struct pfr_ktableworkq workq;
1141 struct pfr_ktable *p;
1142 int xdel = 0;
1143
1144 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1145
1146 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1147 PFR_FLAG_ALLRSETS);
1148 if (pfr_fix_anchor(filter->pfrt_anchor))
1149 return (EINVAL);
1150 if (pfr_table_count(filter, flags) < 0)
1151 return (ENOENT);
1152
1153 SLIST_INIT(&workq);
1154 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1155 if (pfr_skip_table(filter, p, flags))
1156 continue;
1157 if (strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR) == 0)
1158 continue;
1159 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1160 continue;
1161 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1162 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1163 xdel++;
1164 }
1165 if (!(flags & PFR_FLAG_DUMMY)) {
1166 pfr_setflags_ktables(&workq);
1167 }
1168 if (ndel != NULL)
1169 *ndel = xdel;
1170 return (0);
1171 }
1172
1173 int
1174 pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1175 {
1176 struct pfr_ktableworkq addq, changeq;
1177 struct pfr_ktable *p, *q, *r, key;
1178 int i, rv, xadd = 0;
1179 u_int64_t tzero = pf_time_second();
1180
1181 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1182
1183 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1184 SLIST_INIT(&addq);
1185 SLIST_INIT(&changeq);
1186 for (i = 0; i < size; i++) {
1187 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
1188 senderr(EFAULT);
1189 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1190 flags & PFR_FLAG_USERIOCTL))
1191 senderr(EINVAL);
1192 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1193 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1194 if (p == NULL) {
1195 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1196 if (p == NULL)
1197 senderr(ENOMEM);
1198 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1199 if (!pfr_ktable_compare(p, q))
1200 goto _skip;
1201 }
1202 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1203 xadd++;
1204 if (!key.pfrkt_anchor[0])
1205 goto _skip;
1206
1207 /* find or create root table */
1208 bzero(key.pfrkt_anchor, sizeof (key.pfrkt_anchor));
1209 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1210 if (r != NULL) {
1211 p->pfrkt_root = r;
1212 goto _skip;
1213 }
1214 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1215 if (!pfr_ktable_compare(&key, q)) {
1216 p->pfrkt_root = q;
1217 goto _skip;
1218 }
1219 }
1220 key.pfrkt_flags = 0;
1221 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1222 if (r == NULL)
1223 senderr(ENOMEM);
1224 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1225 p->pfrkt_root = r;
1226 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1227 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1228 if (!pfr_ktable_compare(&key, q))
1229 goto _skip;
1230 p->pfrkt_nflags = (p->pfrkt_flags &
1231 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1232 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1233 xadd++;
1234 }
1235 _skip:
1236 ;
1237 }
1238 if (!(flags & PFR_FLAG_DUMMY)) {
1239 pfr_insert_ktables(&addq);
1240 pfr_setflags_ktables(&changeq);
1241 } else
1242 pfr_destroy_ktables(&addq, 0);
1243 if (nadd != NULL)
1244 *nadd = xadd;
1245 return (0);
1246 _bad:
1247 pfr_destroy_ktables(&addq, 0);
1248 return (rv);
1249 }
1250
1251 int
1252 pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1253 {
1254 struct pfr_ktableworkq workq;
1255 struct pfr_ktable *p, *q, key;
1256 int i, xdel = 0;
1257
1258 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1259
1260 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1261 SLIST_INIT(&workq);
1262 for (i = 0; i < size; i++) {
1263 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
1264 return (EFAULT);
1265 if (pfr_validate_table(&key.pfrkt_t, 0,
1266 flags & PFR_FLAG_USERIOCTL))
1267 return (EINVAL);
1268 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1269 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1270 SLIST_FOREACH(q, &workq, pfrkt_workq)
1271 if (!pfr_ktable_compare(p, q))
1272 goto _skip;
1273 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1274 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1275 xdel++;
1276 }
1277 _skip:
1278 ;
1279 }
1280
1281 if (!(flags & PFR_FLAG_DUMMY)) {
1282 pfr_setflags_ktables(&workq);
1283 }
1284 if (ndel != NULL)
1285 *ndel = xdel;
1286 return (0);
1287 }
1288
1289 int
1290 pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1291 int flags)
1292 {
1293 struct pfr_ktable *p;
1294 int n, nn;
1295
1296 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1297 if (pfr_fix_anchor(filter->pfrt_anchor))
1298 return (EINVAL);
1299 n = nn = pfr_table_count(filter, flags);
1300 if (n < 0)
1301 return (ENOENT);
1302 if (n > *size) {
1303 *size = n;
1304 return (0);
1305 }
1306 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1307 if (pfr_skip_table(filter, p, flags))
1308 continue;
1309 if (n-- <= 0)
1310 continue;
1311 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof (*tbl), flags))
1312 return (EFAULT);
1313 }
1314 if (n) {
1315 printf("pfr_get_tables: corruption detected (%d).\n", n);
1316 return (ENOTTY);
1317 }
1318 *size = nn;
1319 return (0);
1320 }
1321
1322 int
1323 pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1324 int flags)
1325 {
1326 struct pfr_ktable *p;
1327 struct pfr_ktableworkq workq;
1328 int n, nn;
1329 u_int64_t tzero = pf_time_second();
1330
1331 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1332
1333 /* XXX PFR_FLAG_CLSTATS disabled */
1334 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
1335 if (pfr_fix_anchor(filter->pfrt_anchor))
1336 return (EINVAL);
1337 n = nn = pfr_table_count(filter, flags);
1338 if (n < 0)
1339 return (ENOENT);
1340 if (n > *size) {
1341 *size = n;
1342 return (0);
1343 }
1344 SLIST_INIT(&workq);
1345 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1346 if (pfr_skip_table(filter, p, flags))
1347 continue;
1348 if (n-- <= 0)
1349 continue;
1350 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof (*tbl), flags)) {
1351 return (EFAULT);
1352 }
1353 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1354 }
1355 if (flags & PFR_FLAG_CLSTATS)
1356 pfr_clstats_ktables(&workq, tzero,
1357 flags & PFR_FLAG_ADDRSTOO);
1358 if (n) {
1359 printf("pfr_get_tstats: corruption detected (%d).\n", n);
1360 return (ENOTTY);
1361 }
1362 *size = nn;
1363 return (0);
1364 }
1365
1366 int
1367 pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1368 {
1369 struct pfr_ktableworkq workq;
1370 struct pfr_ktable *p, key;
1371 int i, xzero = 0;
1372 u_int64_t tzero = pf_time_second();
1373
1374 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1375
1376 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1377 PFR_FLAG_ADDRSTOO);
1378 SLIST_INIT(&workq);
1379 for (i = 0; i < size; i++) {
1380 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
1381 return (EFAULT);
1382 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1383 return (EINVAL);
1384 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1385 if (p != NULL) {
1386 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1387 xzero++;
1388 }
1389 }
1390 if (!(flags & PFR_FLAG_DUMMY)) {
1391 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1392 }
1393 if (nzero != NULL)
1394 *nzero = xzero;
1395 return (0);
1396 }
1397
1398 int
1399 pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1400 int *nchange, int *ndel, int flags)
1401 {
1402 struct pfr_ktableworkq workq;
1403 struct pfr_ktable *p, *q, key;
1404 int i, xchange = 0, xdel = 0;
1405
1406 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1407
1408 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1409 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1410 (clrflag & ~PFR_TFLAG_USRMASK) ||
1411 (setflag & clrflag))
1412 return (EINVAL);
1413 SLIST_INIT(&workq);
1414 for (i = 0; i < size; i++) {
1415 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
1416 return (EFAULT);
1417 if (pfr_validate_table(&key.pfrkt_t, 0,
1418 flags & PFR_FLAG_USERIOCTL))
1419 return (EINVAL);
1420 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1421 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1422 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1423 ~clrflag;
1424 if (p->pfrkt_nflags == p->pfrkt_flags)
1425 goto _skip;
1426 SLIST_FOREACH(q, &workq, pfrkt_workq)
1427 if (!pfr_ktable_compare(p, q))
1428 goto _skip;
1429 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1430 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1431 (clrflag & PFR_TFLAG_PERSIST) &&
1432 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1433 xdel++;
1434 else
1435 xchange++;
1436 }
1437 _skip:
1438 ;
1439 }
1440 if (!(flags & PFR_FLAG_DUMMY)) {
1441 pfr_setflags_ktables(&workq);
1442 }
1443 if (nchange != NULL)
1444 *nchange = xchange;
1445 if (ndel != NULL)
1446 *ndel = xdel;
1447 return (0);
1448 }
1449
1450 int
1451 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1452 {
1453 struct pfr_ktableworkq workq;
1454 struct pfr_ktable *p;
1455 struct pf_ruleset *rs;
1456 int xdel = 0;
1457
1458 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1459
1460 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1461 rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1462 if (rs == NULL)
1463 return (ENOMEM);
1464 SLIST_INIT(&workq);
1465 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1466 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1467 pfr_skip_table(trs, p, 0))
1468 continue;
1469 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1470 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1471 xdel++;
1472 }
1473 if (!(flags & PFR_FLAG_DUMMY)) {
1474 pfr_setflags_ktables(&workq);
1475 if (ticket != NULL)
1476 *ticket = ++rs->tticket;
1477 rs->topen = 1;
1478 } else
1479 pf_remove_if_empty_ruleset(rs);
1480 if (ndel != NULL)
1481 *ndel = xdel;
1482 return (0);
1483 }
1484
1485 int
1486 pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1487 int *nadd, int *naddr, u_int32_t ticket, int flags)
1488 {
1489 struct pfr_ktableworkq tableq;
1490 struct pfr_kentryworkq addrq;
1491 struct pfr_ktable *kt, *rt, *shadow, key;
1492 struct pfr_kentry *p;
1493 struct pfr_addr ad;
1494 struct pf_ruleset *rs;
1495 int i, rv, xadd = 0, xaddr = 0;
1496
1497 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1498
1499 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1500 if (size && !(flags & PFR_FLAG_ADDRSTOO))
1501 return (EINVAL);
1502 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1503 flags & PFR_FLAG_USERIOCTL))
1504 return (EINVAL);
1505 rs = pf_find_ruleset(tbl->pfrt_anchor);
1506 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1507 return (EBUSY);
1508 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1509 SLIST_INIT(&tableq);
1510 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1511 if (kt == NULL) {
1512 kt = pfr_create_ktable(tbl, 0, 1);
1513 if (kt == NULL)
1514 return (ENOMEM);
1515 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1516 xadd++;
1517 if (!tbl->pfrt_anchor[0])
1518 goto _skip;
1519
1520 /* find or create root table */
1521 bzero(&key, sizeof (key));
1522 strlcpy(key.pfrkt_name, tbl->pfrt_name,
1523 sizeof (key.pfrkt_name));
1524 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1525 if (rt != NULL) {
1526 kt->pfrkt_root = rt;
1527 goto _skip;
1528 }
1529 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1530 if (rt == NULL) {
1531 pfr_destroy_ktables(&tableq, 0);
1532 return (ENOMEM);
1533 }
1534 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1535 kt->pfrkt_root = rt;
1536 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1537 xadd++;
1538 _skip:
1539 shadow = pfr_create_ktable(tbl, 0, 0);
1540 if (shadow == NULL) {
1541 pfr_destroy_ktables(&tableq, 0);
1542 return (ENOMEM);
1543 }
1544 SLIST_INIT(&addrq);
1545 for (i = 0; i < size; i++) {
1546 if (COPYIN(addr+i, &ad, sizeof (ad), flags))
1547 senderr(EFAULT);
1548 if (pfr_validate_addr(&ad))
1549 senderr(EINVAL);
1550 if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1551 continue;
1552 p = pfr_create_kentry(&ad, 0);
1553 if (p == NULL)
1554 senderr(ENOMEM);
1555 if (pfr_route_kentry(shadow, p)) {
1556 pfr_destroy_kentry(p);
1557 continue;
1558 }
1559 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1560 xaddr++;
1561 }
1562 if (!(flags & PFR_FLAG_DUMMY)) {
1563 if (kt->pfrkt_shadow != NULL)
1564 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1565 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1566 pfr_insert_ktables(&tableq);
1567 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1568 xaddr : NO_ADDRESSES;
1569 kt->pfrkt_shadow = shadow;
1570 } else {
1571 pfr_clean_node_mask(shadow, &addrq);
1572 pfr_destroy_ktable(shadow, 0);
1573 pfr_destroy_ktables(&tableq, 0);
1574 pfr_destroy_kentries(&addrq);
1575 }
1576 if (nadd != NULL)
1577 *nadd = xadd;
1578 if (naddr != NULL)
1579 *naddr = xaddr;
1580 return (0);
1581 _bad:
1582 pfr_destroy_ktable(shadow, 0);
1583 pfr_destroy_ktables(&tableq, 0);
1584 pfr_destroy_kentries(&addrq);
1585 return (rv);
1586 }
1587
1588 int
1589 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1590 {
1591 struct pfr_ktableworkq workq;
1592 struct pfr_ktable *p;
1593 struct pf_ruleset *rs;
1594 int xdel = 0;
1595
1596 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1597
1598 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1599 rs = pf_find_ruleset(trs->pfrt_anchor);
1600 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1601 return (0);
1602 SLIST_INIT(&workq);
1603 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1604 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1605 pfr_skip_table(trs, p, 0))
1606 continue;
1607 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1608 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1609 xdel++;
1610 }
1611 if (!(flags & PFR_FLAG_DUMMY)) {
1612 pfr_setflags_ktables(&workq);
1613 rs->topen = 0;
1614 pf_remove_if_empty_ruleset(rs);
1615 }
1616 if (ndel != NULL)
1617 *ndel = xdel;
1618 return (0);
1619 }
1620
1621 int
1622 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1623 int *nchange, int flags)
1624 {
1625 struct pfr_ktable *p, *q;
1626 struct pfr_ktableworkq workq;
1627 struct pf_ruleset *rs;
1628 int xadd = 0, xchange = 0;
1629 u_int64_t tzero = pf_time_second();
1630
1631 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1632
1633 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1634 rs = pf_find_ruleset(trs->pfrt_anchor);
1635 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1636 return (EBUSY);
1637
1638 SLIST_INIT(&workq);
1639 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1640 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1641 pfr_skip_table(trs, p, 0))
1642 continue;
1643 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1644 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1645 xchange++;
1646 else
1647 xadd++;
1648 }
1649
1650 if (!(flags & PFR_FLAG_DUMMY)) {
1651 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1652 q = SLIST_NEXT(p, pfrkt_workq);
1653 pfr_commit_ktable(p, tzero);
1654 }
1655 rs->topen = 0;
1656 pf_remove_if_empty_ruleset(rs);
1657 }
1658 if (nadd != NULL)
1659 *nadd = xadd;
1660 if (nchange != NULL)
1661 *nchange = xchange;
1662
1663 return (0);
1664 }
1665
1666 void
1667 pfr_commit_ktable(struct pfr_ktable *kt, u_int64_t tzero)
1668 {
1669 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1670 int nflags;
1671
1672 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1673
1674 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1675 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1676 pfr_clstats_ktable(kt, tzero, 1);
1677 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1678 /* kt might contain addresses */
1679 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1680 struct pfr_kentry *p, *q, *next;
1681 struct pfr_addr ad;
1682
1683 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1684 pfr_mark_addrs(kt);
1685 SLIST_INIT(&addq);
1686 SLIST_INIT(&changeq);
1687 SLIST_INIT(&delq);
1688 SLIST_INIT(&garbageq);
1689 pfr_clean_node_mask(shadow, &addrq);
1690 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1691 next = SLIST_NEXT(p, pfrke_workq); /* XXX */
1692 pfr_copyout_addr(&ad, p);
1693 q = pfr_lookup_addr(kt, &ad, 1);
1694 if (q != NULL) {
1695 if (q->pfrke_not != p->pfrke_not)
1696 SLIST_INSERT_HEAD(&changeq, q,
1697 pfrke_workq);
1698 q->pfrke_mark = 1;
1699 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1700 } else {
1701 p->pfrke_tzero = tzero;
1702 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1703 }
1704 }
1705 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1706 pfr_insert_kentries(kt, &addq, tzero);
1707 pfr_remove_kentries(kt, &delq);
1708 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1709 pfr_destroy_kentries(&garbageq);
1710 } else {
1711 /* kt cannot contain addresses */
1712 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1713 shadow->pfrkt_ip4);
1714 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1715 shadow->pfrkt_ip6);
1716 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1717 pfr_clstats_ktable(kt, tzero, 1);
1718 }
1719 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1720 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) &
1721 ~PFR_TFLAG_INACTIVE;
1722 pfr_destroy_ktable(shadow, 0);
1723 kt->pfrkt_shadow = NULL;
1724 pfr_setflags_ktable(kt, nflags);
1725 }
1726
1727 int
1728 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1729 {
1730 int i;
1731
1732 if (!tbl->pfrt_name[0])
1733 return (-1);
1734 if (no_reserved && strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR) == 0)
1735 return (-1);
1736 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1737 return (-1);
1738 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1739 if (tbl->pfrt_name[i])
1740 return (-1);
1741 if (pfr_fix_anchor(tbl->pfrt_anchor))
1742 return (-1);
1743 if (tbl->pfrt_flags & ~allowedflags)
1744 return (-1);
1745 return (0);
1746 }
1747
1748 /*
1749 * Rewrite anchors referenced by tables to remove slashes
1750 * and check for validity.
1751 */
1752 int
1753 pfr_fix_anchor(char *anchor)
1754 {
1755 size_t siz = MAXPATHLEN;
1756 int i;
1757
1758 if (anchor[0] == '/') {
1759 char *path;
1760 int off;
1761
1762 path = anchor;
1763 off = 1;
1764 while (*++path == '/')
1765 off++;
1766 bcopy(path, anchor, siz - off);
1767 memset(anchor + siz - off, 0, off);
1768 }
1769 if (anchor[siz - 1])
1770 return (-1);
1771 for (i = strlen(anchor); i < (int)siz; i++)
1772 if (anchor[i])
1773 return (-1);
1774 return (0);
1775 }
1776
1777 int
1778 pfr_table_count(struct pfr_table *filter, int flags)
1779 {
1780 struct pf_ruleset *rs;
1781
1782 if (flags & PFR_FLAG_ALLRSETS)
1783 return (pfr_ktable_cnt);
1784 if (filter->pfrt_anchor[0]) {
1785 rs = pf_find_ruleset(filter->pfrt_anchor);
1786 return ((rs != NULL) ? rs->tables : -1);
1787 }
1788 return (pf_main_ruleset.tables);
1789 }
1790
1791 int
1792 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1793 {
1794 if (flags & PFR_FLAG_ALLRSETS)
1795 return (0);
1796 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1797 return (1);
1798 return (0);
1799 }
1800
1801 void
1802 pfr_insert_ktables(struct pfr_ktableworkq *workq)
1803 {
1804 struct pfr_ktable *p;
1805
1806 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1807
1808 SLIST_FOREACH(p, workq, pfrkt_workq)
1809 pfr_insert_ktable(p);
1810 }
1811
1812 void
1813 pfr_insert_ktable(struct pfr_ktable *kt)
1814 {
1815 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1816
1817 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1818 pfr_ktable_cnt++;
1819 if (kt->pfrkt_root != NULL)
1820 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1821 pfr_setflags_ktable(kt->pfrkt_root,
1822 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1823 }
1824
1825 void
1826 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1827 {
1828 struct pfr_ktable *p, *q;
1829
1830 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1831
1832 for (p = SLIST_FIRST(workq); p; p = q) {
1833 q = SLIST_NEXT(p, pfrkt_workq);
1834 pfr_setflags_ktable(p, p->pfrkt_nflags);
1835 }
1836 }
1837
1838 void
1839 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1840 {
1841 struct pfr_kentryworkq addrq;
1842
1843 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1844
1845 if (!(newf & PFR_TFLAG_REFERENCED) &&
1846 !(newf & PFR_TFLAG_PERSIST))
1847 newf &= ~PFR_TFLAG_ACTIVE;
1848 if (!(newf & PFR_TFLAG_ACTIVE))
1849 newf &= ~PFR_TFLAG_USRMASK;
1850 if (!(newf & PFR_TFLAG_SETMASK)) {
1851 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1852 if (kt->pfrkt_root != NULL)
1853 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1854 pfr_setflags_ktable(kt->pfrkt_root,
1855 kt->pfrkt_root->pfrkt_flags &
1856 ~PFR_TFLAG_REFDANCHOR);
1857 pfr_destroy_ktable(kt, 1);
1858 pfr_ktable_cnt--;
1859 return;
1860 }
1861 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1862 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1863 pfr_remove_kentries(kt, &addrq);
1864 }
1865 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1866 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1867 kt->pfrkt_shadow = NULL;
1868 }
1869 kt->pfrkt_flags = newf;
1870 }
1871
1872 void
1873 pfr_clstats_ktables(struct pfr_ktableworkq *workq, u_int64_t tzero, int recurse)
1874 {
1875 struct pfr_ktable *p;
1876
1877 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1878
1879 SLIST_FOREACH(p, workq, pfrkt_workq)
1880 pfr_clstats_ktable(p, tzero, recurse);
1881 }
1882
1883 void
1884 pfr_clstats_ktable(struct pfr_ktable *kt, u_int64_t tzero, int recurse)
1885 {
1886 struct pfr_kentryworkq addrq;
1887
1888 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1889
1890 if (recurse) {
1891 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1892 pfr_clstats_kentries(&addrq, tzero, 0);
1893 }
1894 bzero(kt->pfrkt_packets, sizeof (kt->pfrkt_packets));
1895 bzero(kt->pfrkt_bytes, sizeof (kt->pfrkt_bytes));
1896 kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1897 kt->pfrkt_tzero = tzero;
1898 }
1899
1900 struct pfr_ktable *
1901 pfr_create_ktable(struct pfr_table *tbl, u_int64_t tzero, int attachruleset)
1902 {
1903 struct pfr_ktable *kt;
1904 struct pf_ruleset *rs;
1905
1906 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1907
1908 kt = pool_get(&pfr_ktable_pl, PR_WAITOK);
1909 if (kt == NULL)
1910 return (NULL);
1911 bzero(kt, sizeof (*kt));
1912 kt->pfrkt_t = *tbl;
1913
1914 if (attachruleset) {
1915 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1916 if (!rs) {
1917 pfr_destroy_ktable(kt, 0);
1918 return (NULL);
1919 }
1920 kt->pfrkt_rs = rs;
1921 rs->tables++;
1922 }
1923
1924 if (!rn_inithead((void **)&kt->pfrkt_ip4,
1925 offsetof(struct sockaddr_in, sin_addr) * 8) ||
1926 !rn_inithead((void **)&kt->pfrkt_ip6,
1927 offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1928 pfr_destroy_ktable(kt, 0);
1929 return (NULL);
1930 }
1931 kt->pfrkt_tzero = tzero;
1932
1933 return (kt);
1934 }
1935
1936 void
1937 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1938 {
1939 struct pfr_ktable *p, *q;
1940
1941 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1942
1943 for (p = SLIST_FIRST(workq); p; p = q) {
1944 q = SLIST_NEXT(p, pfrkt_workq);
1945 pfr_destroy_ktable(p, flushaddr);
1946 }
1947 }
1948
1949 void
1950 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1951 {
1952 struct pfr_kentryworkq addrq;
1953
1954 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1955
1956 if (flushaddr) {
1957 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1958 pfr_clean_node_mask(kt, &addrq);
1959 pfr_destroy_kentries(&addrq);
1960 }
1961 if (kt->pfrkt_ip4 != NULL)
1962 _FREE((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1963 if (kt->pfrkt_ip6 != NULL)
1964 _FREE((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1965 if (kt->pfrkt_shadow != NULL)
1966 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1967 if (kt->pfrkt_rs != NULL) {
1968 kt->pfrkt_rs->tables--;
1969 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1970 }
1971 pool_put(&pfr_ktable_pl, kt);
1972 }
1973
1974 int
1975 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1976 {
1977 int d;
1978
1979 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1980 return (d);
1981 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1982 }
1983
1984 struct pfr_ktable *
1985 pfr_lookup_table(struct pfr_table *tbl)
1986 {
1987 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1988
1989 /* struct pfr_ktable start like a struct pfr_table */
1990 return (RB_FIND(pfr_ktablehead, &pfr_ktables,
1991 (struct pfr_ktable *)tbl));
1992 }
1993
1994 int
1995 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
1996 {
1997 struct pfr_kentry *ke = NULL;
1998 int match;
1999
2000 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
2001
2002 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2003 kt = kt->pfrkt_root;
2004 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2005 return (0);
2006
2007 switch (af) {
2008 #if INET
2009 case AF_INET:
2010 pfr_sin.sin_addr.s_addr = a->addr32[0];
2011 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2012 if (ke && KENTRY_RNF_ROOT(ke))
2013 ke = NULL;
2014 break;
2015 #endif /* INET */
2016 #if INET6
2017 case AF_INET6:
2018 bcopy(a, &pfr_sin6.sin6_addr, sizeof (pfr_sin6.sin6_addr));
2019 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2020 if (ke && KENTRY_RNF_ROOT(ke))
2021 ke = NULL;
2022 break;
2023 #endif /* INET6 */
2024 }
2025 match = (ke && !ke->pfrke_not);
2026 if (match)
2027 kt->pfrkt_match++;
2028 else
2029 kt->pfrkt_nomatch++;
2030 return (match);
2031 }
2032
2033 void
2034 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2035 u_int64_t len, int dir_out, int op_pass, int notrule)
2036 {
2037 struct pfr_kentry *ke = NULL;
2038
2039 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
2040
2041 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2042 kt = kt->pfrkt_root;
2043 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2044 return;
2045
2046 switch (af) {
2047 #if INET
2048 case AF_INET:
2049 pfr_sin.sin_addr.s_addr = a->addr32[0];
2050 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2051 if (ke && KENTRY_RNF_ROOT(ke))
2052 ke = NULL;
2053 break;
2054 #endif /* INET */
2055 #if INET6
2056 case AF_INET6:
2057 bcopy(a, &pfr_sin6.sin6_addr, sizeof (pfr_sin6.sin6_addr));
2058 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2059 if (ke && KENTRY_RNF_ROOT(ke))
2060 ke = NULL;
2061 break;
2062 #endif /* INET6 */
2063 default:
2064 ;
2065 }
2066 if ((ke == NULL || ke->pfrke_not) != notrule) {
2067 if (op_pass != PFR_OP_PASS)
2068 printf("pfr_update_stats: assertion failed.\n");
2069 op_pass = PFR_OP_XPASS;
2070 }
2071 kt->pfrkt_packets[dir_out][op_pass]++;
2072 kt->pfrkt_bytes[dir_out][op_pass] += len;
2073 if (ke != NULL && op_pass != PFR_OP_XPASS) {
2074 ke->pfrke_packets[dir_out][op_pass]++;
2075 ke->pfrke_bytes[dir_out][op_pass] += len;
2076 }
2077 }
2078
2079 struct pfr_ktable *
2080 pfr_attach_table(struct pf_ruleset *rs, char *name)
2081 {
2082 struct pfr_ktable *kt, *rt;
2083 struct pfr_table tbl;
2084 struct pf_anchor *ac = rs->anchor;
2085
2086 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
2087
2088 bzero(&tbl, sizeof (tbl));
2089 strlcpy(tbl.pfrt_name, name, sizeof (tbl.pfrt_name));
2090 if (ac != NULL)
2091 strlcpy(tbl.pfrt_anchor, ac->path, sizeof (tbl.pfrt_anchor));
2092 kt = pfr_lookup_table(&tbl);
2093 if (kt == NULL) {
2094 kt = pfr_create_ktable(&tbl, pf_time_second(), 1);
2095 if (kt == NULL)
2096 return (NULL);
2097 if (ac != NULL) {
2098 bzero(tbl.pfrt_anchor, sizeof (tbl.pfrt_anchor));
2099 rt = pfr_lookup_table(&tbl);
2100 if (rt == NULL) {
2101 rt = pfr_create_ktable(&tbl, 0, 1);
2102 if (rt == NULL) {
2103 pfr_destroy_ktable(kt, 0);
2104 return (NULL);
2105 }
2106 pfr_insert_ktable(rt);
2107 }
2108 kt->pfrkt_root = rt;
2109 }
2110 pfr_insert_ktable(kt);
2111 }
2112 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2113 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2114 return (kt);
2115 }
2116
2117 void
2118 pfr_detach_table(struct pfr_ktable *kt)
2119 {
2120 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
2121
2122 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2123 printf("pfr_detach_table: refcount = %d.\n",
2124 kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2125 else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2126 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2127 }
2128
2129 int
2130 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2131 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2132 {
2133 struct pfr_kentry *ke, *ke2;
2134 struct pf_addr *addr;
2135 union sockaddr_union mask;
2136 int idx = -1, use_counter = 0;
2137
2138 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
2139
2140 if (af == AF_INET)
2141 addr = (struct pf_addr *)&pfr_sin.sin_addr;
2142 else if (af == AF_INET6)
2143 addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2144 else
2145 return (-1);
2146
2147 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2148 kt = kt->pfrkt_root;
2149 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2150 return (-1);
2151
2152 if (pidx != NULL)
2153 idx = *pidx;
2154 if (counter != NULL && idx >= 0)
2155 use_counter = 1;
2156 if (idx < 0)
2157 idx = 0;
2158
2159 _next_block:
2160 ke = pfr_kentry_byidx(kt, idx, af);
2161 if (ke == NULL) {
2162 kt->pfrkt_nomatch++;
2163 return (1);
2164 }
2165 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2166 *raddr = SUNION2PF(&ke->pfrke_sa, af);
2167 *rmask = SUNION2PF(&pfr_mask, af);
2168
2169 if (use_counter) {
2170 /* is supplied address within block? */
2171 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2172 /* no, go to next block in table */
2173 idx++;
2174 use_counter = 0;
2175 goto _next_block;
2176 }
2177 PF_ACPY(addr, counter, af);
2178 } else {
2179 /* use first address of block */
2180 PF_ACPY(addr, *raddr, af);
2181 }
2182
2183 if (!KENTRY_NETWORK(ke)) {
2184 /* this is a single IP address - no possible nested block */
2185 PF_ACPY(counter, addr, af);
2186 *pidx = idx;
2187 kt->pfrkt_match++;
2188 return (0);
2189 }
2190 for (;;) {
2191 /* we don't want to use a nested block */
2192 if (af == AF_INET)
2193 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
2194 kt->pfrkt_ip4);
2195 else if (af == AF_INET6)
2196 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
2197 kt->pfrkt_ip6);
2198 else
2199 return (-1); /* never happens */
2200 /* no need to check KENTRY_RNF_ROOT() here */
2201 if (ke2 == ke) {
2202 /* lookup return the same block - perfect */
2203 PF_ACPY(counter, addr, af);
2204 *pidx = idx;
2205 kt->pfrkt_match++;
2206 return (0);
2207 }
2208
2209 /* we need to increase the counter past the nested block */
2210 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2211 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2212 PF_AINC(addr, af);
2213 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2214 /* ok, we reached the end of our main block */
2215 /* go to next block in table */
2216 idx++;
2217 use_counter = 0;
2218 goto _next_block;
2219 }
2220 }
2221 }
2222
2223 struct pfr_kentry *
2224 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2225 {
2226 struct pfr_walktree w;
2227
2228 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
2229
2230 bzero(&w, sizeof (w));
2231 w.pfrw_op = PFRW_POOL_GET;
2232 w.pfrw_cnt = idx;
2233
2234 switch (af) {
2235 #if INET
2236 case AF_INET:
2237 (void) kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4,
2238 pfr_walktree, &w);
2239 return (w.pfrw_kentry);
2240 #endif /* INET */
2241 #if INET6
2242 case AF_INET6:
2243 (void) kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
2244 pfr_walktree, &w);
2245 return (w.pfrw_kentry);
2246 #endif /* INET6 */
2247 default:
2248 return (NULL);
2249 }
2250 }
2251
2252 void
2253 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2254 {
2255 struct pfr_walktree w;
2256
2257 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
2258
2259 bzero(&w, sizeof (w));
2260 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2261 w.pfrw_dyn = dyn;
2262
2263 dyn->pfid_acnt4 = 0;
2264 dyn->pfid_acnt6 = 0;
2265 if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2266 (void) kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4,
2267 pfr_walktree, &w);
2268 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2269 (void) kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
2270 pfr_walktree, &w);
2271 }