]> git.saurik.com Git - apple/xnu.git/blame - bsd/net/pf_table.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / bsd / net / pf_table.c
CommitLineData
b0d623f7 1/*
f427ee49 2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
b0d623f7
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/* $apfw: pf_table.c,v 1.4 2008/08/27 00:01:32 jhw Exp $ */
30/* $OpenBSD: pf_table.c,v 1.68 2006/05/02 10:08:45 dhartmei Exp $ */
31
32/*
33 * Copyright (c) 2002 Cedric Berger
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * - Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * - Redistributions in binary form must reproduce the above
43 * copyright notice, this list of conditions and the following
44 * disclaimer in the documentation and/or other materials provided
45 * with the distribution.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
48 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
49 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
50 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
51 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
52 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
53 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
54 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
55 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
57 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
58 * POSSIBILITY OF SUCH DAMAGE.
59 *
60 */
61
62#include <sys/param.h>
63#include <sys/systm.h>
64#include <sys/socket.h>
65#include <sys/mbuf.h>
66#include <sys/kernel.h>
67#include <sys/malloc.h>
68
69#include <net/if.h>
70#include <net/route.h>
71#include <netinet/in.h>
72#include <net/radix.h>
73#include <net/pfvar.h>
74
0a7de745
A
75#define ACCEPT_FLAGS(flags, oklist) \
76 do { \
77 if ((flags & ~(oklist)) & \
78 PFR_FLAG_ALLMASK) \
79 return (EINVAL); \
b0d623f7
A
80 } while (0)
81
0a7de745
A
82#define COPYIN(from, to, size, flags) \
83 ((flags & PFR_FLAG_USERIOCTL) ? \
84 copyin((from), (to), (size)) : \
d1ecb069 85 (bcopy((void *)(uintptr_t)(from), (to), (size)), 0))
b0d623f7 86
0a7de745
A
87#define COPYOUT(from, to, size, flags) \
88 ((flags & PFR_FLAG_USERIOCTL) ? \
89 copyout((from), (to), (size)) : \
d1ecb069 90 (bcopy((from), (void *)(uintptr_t)(to), (size)), 0))
b0d623f7 91
0a7de745
A
92#define FILLIN_SIN(sin, addr) \
93 do { \
94 (sin).sin_len = sizeof (sin); \
95 (sin).sin_family = AF_INET; \
96 (sin).sin_addr = (addr); \
b0d623f7
A
97 } while (0)
98
0a7de745
A
99#define FILLIN_SIN6(sin6, addr) \
100 do { \
101 (sin6).sin6_len = sizeof (sin6); \
102 (sin6).sin6_family = AF_INET6; \
103 (sin6).sin6_addr = (addr); \
b0d623f7
A
104 } while (0)
105
0a7de745
A
106#define SWAP(type, a1, a2) \
107 do { \
108 type tmp = a1; \
109 a1 = a2; \
110 a2 = tmp; \
b0d623f7
A
111 } while (0)
112
0a7de745
A
113#define SUNION2PF(su, af) (((af) == AF_INET) ? \
114 (struct pf_addr *)&(su)->sin.sin_addr : \
b0d623f7
A
115 (struct pf_addr *)&(su)->sin6.sin6_addr)
116
0a7de745
A
117#define AF_BITS(af) (((af) == AF_INET) ? 32 : 128)
118#define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
119#define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
b0d623f7 120#define KENTRY_RNF_ROOT(ke) \
0a7de745 121 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
b0d623f7 122
0a7de745
A
123#define NO_ADDRESSES (-1)
124#define ENQUEUE_UNMARKED_ONLY (1)
125#define INVERT_NEG_FLAG (1)
b0d623f7
A
126
127struct pfr_walktree {
128 enum pfrw_op {
129 PFRW_MARK,
130 PFRW_SWEEP,
131 PFRW_ENQUEUE,
132 PFRW_GET_ADDRS,
133 PFRW_GET_ASTATS,
134 PFRW_POOL_GET,
135 PFRW_DYNADDR_UPDATE
0a7de745 136 } pfrw_op;
b0d623f7 137 union {
0a7de745
A
138 user_addr_t pfrw1_addr;
139 user_addr_t pfrw1_astats;
140 struct pfr_kentryworkq *pfrw1_workq;
141 struct pfr_kentry *pfrw1_kentry;
142 struct pfi_dynaddr *pfrw1_dyn;
143 } pfrw_1;
144 int pfrw_free;
145 int pfrw_flags;
b0d623f7 146};
0a7de745
A
147#define pfrw_addr pfrw_1.pfrw1_addr
148#define pfrw_astats pfrw_1.pfrw1_astats
149#define pfrw_workq pfrw_1.pfrw1_workq
150#define pfrw_kentry pfrw_1.pfrw1_kentry
151#define pfrw_dyn pfrw_1.pfrw1_dyn
152#define pfrw_cnt pfrw_free
b0d623f7 153
0a7de745 154#define senderr(e) do { rv = (e); goto _bad; } while (0)
b0d623f7 155
0a7de745
A
156struct pool pfr_ktable_pl;
157struct pool pfr_kentry_pl;
b0d623f7 158
0a7de745
A
159static struct pool pfr_kentry_pl2;
160static struct sockaddr_in pfr_sin;
161static struct sockaddr_in6 pfr_sin6;
162static union sockaddr_union pfr_mask;
163static struct pf_addr pfr_ffaddr;
b0d623f7
A
164
165static void pfr_copyout_addr(struct pfr_addr *, struct pfr_kentry *ke);
166static int pfr_validate_addr(struct pfr_addr *);
167static void pfr_enqueue_addrs(struct pfr_ktable *, struct pfr_kentryworkq *,
168 int *, int);
169static void pfr_mark_addrs(struct pfr_ktable *);
170static struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
171 struct pfr_addr *, int);
f427ee49 172static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, boolean_t);
b0d623f7
A
173static void pfr_destroy_kentries(struct pfr_kentryworkq *);
174static void pfr_destroy_kentry(struct pfr_kentry *);
175static void pfr_insert_kentries(struct pfr_ktable *,
176 struct pfr_kentryworkq *, u_int64_t);
177static void pfr_remove_kentries(struct pfr_ktable *, struct pfr_kentryworkq *);
178static void pfr_clstats_kentries(struct pfr_kentryworkq *, u_int64_t, int);
d1ecb069 179static void pfr_reset_feedback(user_addr_t, int, int);
b0d623f7
A
180static void pfr_prepare_network(union sockaddr_union *, int, int);
181static int pfr_route_kentry(struct pfr_ktable *, struct pfr_kentry *);
182static int pfr_unroute_kentry(struct pfr_ktable *, struct pfr_kentry *);
183static int pfr_walktree(struct radix_node *, void *);
184static int pfr_validate_table(struct pfr_table *, int, int);
185static int pfr_fix_anchor(char *);
186static void pfr_commit_ktable(struct pfr_ktable *, u_int64_t);
187static void pfr_insert_ktables(struct pfr_ktableworkq *);
188static void pfr_insert_ktable(struct pfr_ktable *);
189static void pfr_setflags_ktables(struct pfr_ktableworkq *);
190static void pfr_setflags_ktable(struct pfr_ktable *, int);
191static void pfr_clstats_ktables(struct pfr_ktableworkq *, u_int64_t, int);
192static void pfr_clstats_ktable(struct pfr_ktable *, u_int64_t, int);
193static struct pfr_ktable *pfr_create_ktable(struct pfr_table *, u_int64_t, int);
194static void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
195static void pfr_destroy_ktable(struct pfr_ktable *, int);
196static int pfr_ktable_compare(struct pfr_ktable *, struct pfr_ktable *);
197static struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
198static void pfr_clean_node_mask(struct pfr_ktable *, struct pfr_kentryworkq *);
199static int pfr_table_count(struct pfr_table *, int);
200static int pfr_skip_table(struct pfr_table *, struct pfr_ktable *, int);
201static struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
202
203RB_PROTOTYPE_SC(static, pfr_ktablehead, pfr_ktable, pfrkt_tree,
204 pfr_ktable_compare);
205RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
206
0a7de745
A
207static struct pfr_ktablehead pfr_ktables;
208static struct pfr_table pfr_nulltable;
209static int pfr_ktable_cnt;
b0d623f7
A
210
211void
212pfr_initialize(void)
213{
0a7de745 214 pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
b0d623f7 215 "pfrktable", NULL);
0a7de745 216 pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
b0d623f7 217 "pfrkentry", NULL);
0a7de745 218 pool_init(&pfr_kentry_pl2, sizeof(struct pfr_kentry), 0, 0, 0,
b0d623f7
A
219 "pfrkentry2", NULL);
220
0a7de745 221 pfr_sin.sin_len = sizeof(pfr_sin);
b0d623f7 222 pfr_sin.sin_family = AF_INET;
0a7de745 223 pfr_sin6.sin6_len = sizeof(pfr_sin6);
b0d623f7
A
224 pfr_sin6.sin6_family = AF_INET6;
225
0a7de745 226 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
b0d623f7
A
227}
228
229#if 0
230void
231pfr_destroy(void)
232{
233 pool_destroy(&pfr_ktable_pl);
234 pool_destroy(&pfr_kentry_pl);
235 pool_destroy(&pfr_kentry_pl2);
236}
237#endif
238
239int
240pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
241{
0a7de745
A
242 struct pfr_ktable *kt;
243 struct pfr_kentryworkq workq;
b0d623f7
A
244
245 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
0a7de745
A
246 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) {
247 return EINVAL;
248 }
b0d623f7 249 kt = pfr_lookup_table(tbl);
0a7de745
A
250 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
251 return ESRCH;
252 }
253 if (kt->pfrkt_flags & PFR_TFLAG_CONST) {
254 return EPERM;
255 }
b0d623f7
A
256 pfr_enqueue_addrs(kt, &workq, ndel, 0);
257
258 if (!(flags & PFR_FLAG_DUMMY)) {
259 pfr_remove_kentries(kt, &workq);
260 if (kt->pfrkt_cnt) {
261 printf("pfr_clr_addrs: corruption detected (%d).\n",
262 kt->pfrkt_cnt);
263 kt->pfrkt_cnt = 0;
264 }
265 }
0a7de745 266 return 0;
b0d623f7
A
267}
268
269int
d1ecb069 270pfr_add_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
b0d623f7
A
271 int *nadd, int flags)
272{
0a7de745
A
273 struct pfr_ktable *kt, *tmpkt;
274 struct pfr_kentryworkq workq;
275 struct pfr_kentry *p, *q;
276 struct pfr_addr ad;
277 int i, rv, xadd = 0;
278 user_addr_t addr = _addr;
279 u_int64_t tzero = pf_calendar_time_second();
b0d623f7
A
280
281 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
282 PFR_FLAG_FEEDBACK);
0a7de745
A
283 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) {
284 return EINVAL;
285 }
b0d623f7 286 kt = pfr_lookup_table(tbl);
0a7de745
A
287 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
288 return ESRCH;
289 }
290 if (kt->pfrkt_flags & PFR_TFLAG_CONST) {
291 return EPERM;
292 }
b0d623f7 293 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
0a7de745
A
294 if (tmpkt == NULL) {
295 return ENOMEM;
296 }
b0d623f7 297 SLIST_INIT(&workq);
0a7de745
A
298 for (i = 0; i < size; i++, addr += sizeof(ad)) {
299 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
b0d623f7 300 senderr(EFAULT);
0a7de745
A
301 }
302 if (pfr_validate_addr(&ad)) {
b0d623f7 303 senderr(EINVAL);
0a7de745 304 }
b0d623f7
A
305 p = pfr_lookup_addr(kt, &ad, 1);
306 q = pfr_lookup_addr(tmpkt, &ad, 1);
307 if (flags & PFR_FLAG_FEEDBACK) {
0a7de745 308 if (q != NULL) {
b0d623f7 309 ad.pfra_fback = PFR_FB_DUPLICATE;
0a7de745 310 } else if (p == NULL) {
b0d623f7 311 ad.pfra_fback = PFR_FB_ADDED;
0a7de745 312 } else if (p->pfrke_not != ad.pfra_not) {
b0d623f7 313 ad.pfra_fback = PFR_FB_CONFLICT;
0a7de745 314 } else {
b0d623f7 315 ad.pfra_fback = PFR_FB_NONE;
0a7de745 316 }
b0d623f7
A
317 }
318 if (p == NULL && q == NULL) {
319 p = pfr_create_kentry(&ad,
320 !(flags & PFR_FLAG_USERIOCTL));
0a7de745 321 if (p == NULL) {
b0d623f7 322 senderr(ENOMEM);
0a7de745 323 }
b0d623f7
A
324 if (pfr_route_kentry(tmpkt, p)) {
325 pfr_destroy_kentry(p);
326 ad.pfra_fback = PFR_FB_NONE;
327 } else {
328 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
329 xadd++;
330 }
331 }
0a7de745
A
332 if (flags & PFR_FLAG_FEEDBACK) {
333 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
b0d623f7 334 senderr(EFAULT);
0a7de745
A
335 }
336 }
b0d623f7
A
337 }
338 pfr_clean_node_mask(tmpkt, &workq);
339 if (!(flags & PFR_FLAG_DUMMY)) {
340 pfr_insert_kentries(kt, &workq, tzero);
0a7de745 341 } else {
b0d623f7 342 pfr_destroy_kentries(&workq);
0a7de745
A
343 }
344 if (nadd != NULL) {
b0d623f7 345 *nadd = xadd;
0a7de745 346 }
b0d623f7 347 pfr_destroy_ktable(tmpkt, 0);
0a7de745 348 return 0;
b0d623f7
A
349_bad:
350 pfr_clean_node_mask(tmpkt, &workq);
351 pfr_destroy_kentries(&workq);
0a7de745 352 if (flags & PFR_FLAG_FEEDBACK) {
d1ecb069 353 pfr_reset_feedback(_addr, size, flags);
0a7de745 354 }
b0d623f7 355 pfr_destroy_ktable(tmpkt, 0);
0a7de745 356 return rv;
b0d623f7
A
357}
358
359int
d1ecb069 360pfr_del_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
b0d623f7
A
361 int *ndel, int flags)
362{
0a7de745
A
363 struct pfr_ktable *kt;
364 struct pfr_kentryworkq workq;
365 struct pfr_kentry *p;
366 struct pfr_addr ad;
367 user_addr_t addr = _addr;
368 int i, rv, xdel = 0, log = 1;
b0d623f7
A
369
370 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
371 PFR_FLAG_FEEDBACK);
0a7de745
A
372 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) {
373 return EINVAL;
374 }
b0d623f7 375 kt = pfr_lookup_table(tbl);
0a7de745
A
376 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
377 return ESRCH;
378 }
379 if (kt->pfrkt_flags & PFR_TFLAG_CONST) {
380 return EPERM;
381 }
b0d623f7
A
382 /*
383 * there are two algorithms to choose from here.
384 * with:
385 * n: number of addresses to delete
386 * N: number of addresses in the table
387 *
388 * one is O(N) and is better for large 'n'
389 * one is O(n*LOG(N)) and is better for small 'n'
390 *
391 * following code try to decide which one is best.
392 */
0a7de745 393 for (i = kt->pfrkt_cnt; i > 0; i >>= 1) {
b0d623f7 394 log++;
0a7de745
A
395 }
396 if (size > kt->pfrkt_cnt / log) {
b0d623f7
A
397 /* full table scan */
398 pfr_mark_addrs(kt);
399 } else {
400 /* iterate over addresses to delete */
0a7de745
A
401 for (i = 0; i < size; i++, addr += sizeof(ad)) {
402 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
403 return EFAULT;
404 }
405 if (pfr_validate_addr(&ad)) {
406 return EINVAL;
407 }
b0d623f7 408 p = pfr_lookup_addr(kt, &ad, 1);
0a7de745 409 if (p != NULL) {
b0d623f7 410 p->pfrke_mark = 0;
0a7de745 411 }
b0d623f7
A
412 }
413 }
414 SLIST_INIT(&workq);
0a7de745
A
415 for (addr = _addr, i = 0; i < size; i++, addr += sizeof(ad)) {
416 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
b0d623f7 417 senderr(EFAULT);
0a7de745
A
418 }
419 if (pfr_validate_addr(&ad)) {
b0d623f7 420 senderr(EINVAL);
0a7de745 421 }
b0d623f7
A
422 p = pfr_lookup_addr(kt, &ad, 1);
423 if (flags & PFR_FLAG_FEEDBACK) {
0a7de745 424 if (p == NULL) {
b0d623f7 425 ad.pfra_fback = PFR_FB_NONE;
0a7de745 426 } else if (p->pfrke_not != ad.pfra_not) {
b0d623f7 427 ad.pfra_fback = PFR_FB_CONFLICT;
0a7de745 428 } else if (p->pfrke_mark) {
b0d623f7 429 ad.pfra_fback = PFR_FB_DUPLICATE;
0a7de745 430 } else {
b0d623f7 431 ad.pfra_fback = PFR_FB_DELETED;
0a7de745 432 }
b0d623f7
A
433 }
434 if (p != NULL && p->pfrke_not == ad.pfra_not &&
435 !p->pfrke_mark) {
436 p->pfrke_mark = 1;
437 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
438 xdel++;
439 }
0a7de745
A
440 if (flags & PFR_FLAG_FEEDBACK) {
441 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
b0d623f7 442 senderr(EFAULT);
0a7de745
A
443 }
444 }
b0d623f7
A
445 }
446 if (!(flags & PFR_FLAG_DUMMY)) {
447 pfr_remove_kentries(kt, &workq);
448 }
0a7de745 449 if (ndel != NULL) {
b0d623f7 450 *ndel = xdel;
0a7de745
A
451 }
452 return 0;
b0d623f7 453_bad:
0a7de745 454 if (flags & PFR_FLAG_FEEDBACK) {
d1ecb069 455 pfr_reset_feedback(_addr, size, flags);
0a7de745
A
456 }
457 return rv;
b0d623f7
A
458}
459
460int
d1ecb069 461pfr_set_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
b0d623f7
A
462 int *size2, int *nadd, int *ndel, int *nchange, int flags,
463 u_int32_t ignore_pfrt_flags)
464{
0a7de745
A
465 struct pfr_ktable *kt, *tmpkt;
466 struct pfr_kentryworkq addq, delq, changeq;
467 struct pfr_kentry *p, *q;
468 struct pfr_addr ad;
469 user_addr_t addr = _addr;
470 int i, rv, xadd = 0, xdel = 0, xchange = 0;
471 u_int64_t tzero = pf_calendar_time_second();
b0d623f7
A
472
473 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
474 PFR_FLAG_FEEDBACK);
475 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
0a7de745
A
476 PFR_FLAG_USERIOCTL)) {
477 return EINVAL;
478 }
b0d623f7 479 kt = pfr_lookup_table(tbl);
0a7de745
A
480 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
481 return ESRCH;
482 }
483 if (kt->pfrkt_flags & PFR_TFLAG_CONST) {
484 return EPERM;
485 }
b0d623f7 486 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
0a7de745
A
487 if (tmpkt == NULL) {
488 return ENOMEM;
489 }
b0d623f7
A
490 pfr_mark_addrs(kt);
491 SLIST_INIT(&addq);
492 SLIST_INIT(&delq);
493 SLIST_INIT(&changeq);
0a7de745
A
494 for (i = 0; i < size; i++, addr += sizeof(ad)) {
495 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
b0d623f7 496 senderr(EFAULT);
0a7de745
A
497 }
498 if (pfr_validate_addr(&ad)) {
b0d623f7 499 senderr(EINVAL);
0a7de745 500 }
b0d623f7
A
501 ad.pfra_fback = PFR_FB_NONE;
502 p = pfr_lookup_addr(kt, &ad, 1);
503 if (p != NULL) {
504 if (p->pfrke_mark) {
505 ad.pfra_fback = PFR_FB_DUPLICATE;
506 goto _skip;
507 }
508 p->pfrke_mark = 1;
509 if (p->pfrke_not != ad.pfra_not) {
510 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
511 ad.pfra_fback = PFR_FB_CHANGED;
512 xchange++;
513 }
514 } else {
515 q = pfr_lookup_addr(tmpkt, &ad, 1);
516 if (q != NULL) {
517 ad.pfra_fback = PFR_FB_DUPLICATE;
518 goto _skip;
519 }
520 p = pfr_create_kentry(&ad,
521 !(flags & PFR_FLAG_USERIOCTL));
0a7de745 522 if (p == NULL) {
b0d623f7 523 senderr(ENOMEM);
0a7de745 524 }
b0d623f7
A
525 if (pfr_route_kentry(tmpkt, p)) {
526 pfr_destroy_kentry(p);
527 ad.pfra_fback = PFR_FB_NONE;
528 } else {
529 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
530 ad.pfra_fback = PFR_FB_ADDED;
531 xadd++;
532 }
533 }
534_skip:
0a7de745
A
535 if (flags & PFR_FLAG_FEEDBACK) {
536 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
b0d623f7 537 senderr(EFAULT);
0a7de745
A
538 }
539 }
b0d623f7
A
540 }
541 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
542 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
0a7de745
A
543 if (*size2 < size + xdel) {
544 *size2 = size + xdel;
b0d623f7
A
545 senderr(0);
546 }
547 i = 0;
d1ecb069 548 addr = _addr + size;
b0d623f7
A
549 SLIST_FOREACH(p, &delq, pfrke_workq) {
550 pfr_copyout_addr(&ad, p);
551 ad.pfra_fback = PFR_FB_DELETED;
0a7de745 552 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
b0d623f7 553 senderr(EFAULT);
0a7de745
A
554 }
555 addr += sizeof(ad);
b0d623f7
A
556 i++;
557 }
558 }
559 pfr_clean_node_mask(tmpkt, &addq);
560 if (!(flags & PFR_FLAG_DUMMY)) {
561 pfr_insert_kentries(kt, &addq, tzero);
562 pfr_remove_kentries(kt, &delq);
563 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
0a7de745 564 } else {
b0d623f7 565 pfr_destroy_kentries(&addq);
0a7de745
A
566 }
567 if (nadd != NULL) {
b0d623f7 568 *nadd = xadd;
0a7de745
A
569 }
570 if (ndel != NULL) {
b0d623f7 571 *ndel = xdel;
0a7de745
A
572 }
573 if (nchange != NULL) {
b0d623f7 574 *nchange = xchange;
0a7de745
A
575 }
576 if ((flags & PFR_FLAG_FEEDBACK) && size2) {
577 *size2 = size + xdel;
578 }
b0d623f7 579 pfr_destroy_ktable(tmpkt, 0);
0a7de745 580 return 0;
b0d623f7
A
581_bad:
582 pfr_clean_node_mask(tmpkt, &addq);
583 pfr_destroy_kentries(&addq);
0a7de745 584 if (flags & PFR_FLAG_FEEDBACK) {
d1ecb069 585 pfr_reset_feedback(_addr, size, flags);
0a7de745 586 }
b0d623f7 587 pfr_destroy_ktable(tmpkt, 0);
0a7de745 588 return rv;
b0d623f7
A
589}
590
591int
d1ecb069 592pfr_tst_addrs(struct pfr_table *tbl, user_addr_t addr, int size,
0a7de745 593 int *nmatch, int flags)
b0d623f7 594{
0a7de745
A
595 struct pfr_ktable *kt;
596 struct pfr_kentry *p;
597 struct pfr_addr ad;
598 int i, xmatch = 0;
b0d623f7
A
599
600 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
0a7de745
A
601 if (pfr_validate_table(tbl, 0, 0)) {
602 return EINVAL;
603 }
b0d623f7 604 kt = pfr_lookup_table(tbl);
0a7de745
A
605 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
606 return ESRCH;
607 }
608
609 for (i = 0; i < size; i++, addr += sizeof(ad)) {
610 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
611 return EFAULT;
612 }
613 if (pfr_validate_addr(&ad)) {
614 return EINVAL;
615 }
616 if (ADDR_NETWORK(&ad)) {
617 return EINVAL;
618 }
b0d623f7 619 p = pfr_lookup_addr(kt, &ad, 0);
0a7de745 620 if (flags & PFR_FLAG_REPLACE) {
b0d623f7 621 pfr_copyout_addr(&ad, p);
0a7de745 622 }
b0d623f7
A
623 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
624 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
0a7de745 625 if (p != NULL && !p->pfrke_not) {
b0d623f7 626 xmatch++;
0a7de745
A
627 }
628 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
629 return EFAULT;
630 }
b0d623f7 631 }
0a7de745 632 if (nmatch != NULL) {
b0d623f7 633 *nmatch = xmatch;
0a7de745
A
634 }
635 return 0;
b0d623f7
A
636}
637
638int
d1ecb069 639pfr_get_addrs(struct pfr_table *tbl, user_addr_t addr, int *size,
0a7de745 640 int flags)
b0d623f7 641{
0a7de745
A
642 struct pfr_ktable *kt;
643 struct pfr_walktree w;
644 int rv;
b0d623f7
A
645
646 ACCEPT_FLAGS(flags, 0);
0a7de745
A
647 if (pfr_validate_table(tbl, 0, 0)) {
648 return EINVAL;
649 }
b0d623f7 650 kt = pfr_lookup_table(tbl);
0a7de745
A
651 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
652 return ESRCH;
653 }
b0d623f7
A
654 if (kt->pfrkt_cnt > *size) {
655 *size = kt->pfrkt_cnt;
0a7de745 656 return 0;
b0d623f7
A
657 }
658
0a7de745 659 bzero(&w, sizeof(w));
b0d623f7
A
660 w.pfrw_op = PFRW_GET_ADDRS;
661 w.pfrw_addr = addr;
662 w.pfrw_free = kt->pfrkt_cnt;
663 w.pfrw_flags = flags;
664 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
0a7de745 665 if (!rv) {
b0d623f7
A
666 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
667 pfr_walktree, &w);
0a7de745
A
668 }
669 if (rv) {
670 return rv;
671 }
b0d623f7
A
672
673 if (w.pfrw_free) {
674 printf("pfr_get_addrs: corruption detected (%d).\n",
675 w.pfrw_free);
0a7de745 676 return ENOTTY;
b0d623f7
A
677 }
678 *size = kt->pfrkt_cnt;
0a7de745 679 return 0;
b0d623f7
A
680}
681
682int
d1ecb069 683pfr_get_astats(struct pfr_table *tbl, user_addr_t addr, int *size,
0a7de745 684 int flags)
b0d623f7 685{
0a7de745
A
686 struct pfr_ktable *kt;
687 struct pfr_walktree w;
688 struct pfr_kentryworkq workq;
689 int rv;
690 u_int64_t tzero = pf_calendar_time_second();
b0d623f7
A
691
692 /* XXX PFR_FLAG_CLSTATS disabled */
693 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
0a7de745
A
694 if (pfr_validate_table(tbl, 0, 0)) {
695 return EINVAL;
696 }
b0d623f7 697 kt = pfr_lookup_table(tbl);
0a7de745
A
698 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
699 return ESRCH;
700 }
b0d623f7
A
701 if (kt->pfrkt_cnt > *size) {
702 *size = kt->pfrkt_cnt;
0a7de745 703 return 0;
b0d623f7
A
704 }
705
0a7de745 706 bzero(&w, sizeof(w));
b0d623f7
A
707 w.pfrw_op = PFRW_GET_ASTATS;
708 w.pfrw_astats = addr;
709 w.pfrw_free = kt->pfrkt_cnt;
710 w.pfrw_flags = flags;
711 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
0a7de745 712 if (!rv) {
b0d623f7
A
713 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
714 pfr_walktree, &w);
0a7de745 715 }
b0d623f7
A
716 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
717 pfr_enqueue_addrs(kt, &workq, NULL, 0);
718 pfr_clstats_kentries(&workq, tzero, 0);
719 }
0a7de745
A
720 if (rv) {
721 return rv;
722 }
b0d623f7
A
723
724 if (w.pfrw_free) {
725 printf("pfr_get_astats: corruption detected (%d).\n",
726 w.pfrw_free);
0a7de745 727 return ENOTTY;
b0d623f7
A
728 }
729 *size = kt->pfrkt_cnt;
0a7de745 730 return 0;
b0d623f7
A
731}
732
733int
d1ecb069 734pfr_clr_astats(struct pfr_table *tbl, user_addr_t _addr, int size,
b0d623f7
A
735 int *nzero, int flags)
736{
0a7de745
A
737 struct pfr_ktable *kt;
738 struct pfr_kentryworkq workq;
739 struct pfr_kentry *p;
740 struct pfr_addr ad;
741 user_addr_t addr = _addr;
742 int i, rv, xzero = 0;
b0d623f7
A
743
744 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
745 PFR_FLAG_FEEDBACK);
0a7de745
A
746 if (pfr_validate_table(tbl, 0, 0)) {
747 return EINVAL;
748 }
b0d623f7 749 kt = pfr_lookup_table(tbl);
0a7de745
A
750 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
751 return ESRCH;
752 }
b0d623f7 753 SLIST_INIT(&workq);
0a7de745
A
754 for (i = 0; i < size; i++, addr += sizeof(ad)) {
755 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
b0d623f7 756 senderr(EFAULT);
0a7de745
A
757 }
758 if (pfr_validate_addr(&ad)) {
b0d623f7 759 senderr(EINVAL);
0a7de745 760 }
b0d623f7
A
761 p = pfr_lookup_addr(kt, &ad, 1);
762 if (flags & PFR_FLAG_FEEDBACK) {
763 ad.pfra_fback = (p != NULL) ?
764 PFR_FB_CLEARED : PFR_FB_NONE;
0a7de745 765 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
b0d623f7 766 senderr(EFAULT);
0a7de745 767 }
b0d623f7
A
768 }
769 if (p != NULL) {
770 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
771 xzero++;
772 }
773 }
774
775 if (!(flags & PFR_FLAG_DUMMY)) {
776 pfr_clstats_kentries(&workq, 0, 0);
777 }
0a7de745 778 if (nzero != NULL) {
b0d623f7 779 *nzero = xzero;
0a7de745
A
780 }
781 return 0;
b0d623f7 782_bad:
0a7de745 783 if (flags & PFR_FLAG_FEEDBACK) {
d1ecb069 784 pfr_reset_feedback(_addr, size, flags);
0a7de745
A
785 }
786 return rv;
b0d623f7
A
787}
788
d1ecb069 789static int
b0d623f7
A
790pfr_validate_addr(struct pfr_addr *ad)
791{
792 int i;
793
794 switch (ad->pfra_af) {
795#if INET
796 case AF_INET:
0a7de745
A
797 if (ad->pfra_net > 32) {
798 return -1;
799 }
b0d623f7
A
800 break;
801#endif /* INET */
b0d623f7 802 case AF_INET6:
0a7de745
A
803 if (ad->pfra_net > 128) {
804 return -1;
805 }
b0d623f7 806 break;
b0d623f7 807 default:
0a7de745 808 return -1;
b0d623f7
A
809 }
810 if (ad->pfra_net < 128 &&
0a7de745
A
811 (((caddr_t)ad)[ad->pfra_net / 8] & (0xFF >> (ad->pfra_net % 8)))) {
812 return -1;
813 }
814 for (i = (ad->pfra_net + 7) / 8; i < (int)sizeof(ad->pfra_u); i++) {
815 if (((caddr_t)ad)[i]) {
816 return -1;
817 }
818 }
819 if (ad->pfra_not && ad->pfra_not != 1) {
820 return -1;
821 }
822 if (ad->pfra_fback) {
823 return -1;
824 }
825 return 0;
b0d623f7
A
826}
827
d1ecb069 828static void
b0d623f7 829pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
0a7de745 830 int *naddr, int sweep)
b0d623f7 831{
0a7de745 832 struct pfr_walktree w;
b0d623f7
A
833
834 SLIST_INIT(workq);
0a7de745 835 bzero(&w, sizeof(w));
b0d623f7
A
836 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
837 w.pfrw_workq = workq;
0a7de745 838 if (kt->pfrkt_ip4 != NULL) {
b0d623f7 839 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4,
0a7de745 840 pfr_walktree, &w)) {
b0d623f7 841 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
0a7de745
A
842 }
843 }
844 if (kt->pfrkt_ip6 != NULL) {
b0d623f7 845 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
0a7de745 846 pfr_walktree, &w)) {
b0d623f7 847 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
0a7de745
A
848 }
849 }
850 if (naddr != NULL) {
b0d623f7 851 *naddr = w.pfrw_cnt;
0a7de745 852 }
b0d623f7
A
853}
854
d1ecb069 855static void
b0d623f7
A
856pfr_mark_addrs(struct pfr_ktable *kt)
857{
0a7de745 858 struct pfr_walktree w;
b0d623f7 859
0a7de745 860 bzero(&w, sizeof(w));
b0d623f7 861 w.pfrw_op = PFRW_MARK;
0a7de745 862 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) {
b0d623f7 863 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
0a7de745
A
864 }
865 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) {
b0d623f7 866 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
0a7de745 867 }
b0d623f7
A
868}
869
870
d1ecb069 871static struct pfr_kentry *
b0d623f7
A
872pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
873{
0a7de745
A
874 union sockaddr_union sa, mask;
875 struct radix_node_head *head;
876 struct pfr_kentry *ke;
b0d623f7 877
5ba3f43e 878 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7 879
0a7de745 880 bzero(&sa, sizeof(sa));
b0d623f7
A
881 if (ad->pfra_af == AF_INET) {
882 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
883 head = kt->pfrkt_ip4;
884 } else if (ad->pfra_af == AF_INET6) {
885 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
886 head = kt->pfrkt_ip6;
0a7de745 887 } else {
b0d623f7 888 return NULL;
0a7de745 889 }
b0d623f7
A
890 if (ADDR_NETWORK(ad)) {
891 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
892 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
0a7de745 893 if (ke && KENTRY_RNF_ROOT(ke)) {
b0d623f7 894 ke = NULL;
0a7de745 895 }
b0d623f7
A
896 } else {
897 ke = (struct pfr_kentry *)rn_match(&sa, head);
0a7de745 898 if (ke && KENTRY_RNF_ROOT(ke)) {
b0d623f7 899 ke = NULL;
0a7de745
A
900 }
901 if (exact && ke && KENTRY_NETWORK(ke)) {
b0d623f7 902 ke = NULL;
0a7de745 903 }
b0d623f7 904 }
0a7de745 905 return ke;
b0d623f7
A
906}
907
d1ecb069 908static struct pfr_kentry *
f427ee49 909pfr_create_kentry(struct pfr_addr *ad, boolean_t intr)
b0d623f7 910{
0a7de745 911 struct pfr_kentry *ke;
b0d623f7 912
0a7de745 913 if (intr) {
b0d623f7 914 ke = pool_get(&pfr_kentry_pl2, PR_WAITOK);
0a7de745 915 } else {
b0d623f7 916 ke = pool_get(&pfr_kentry_pl, PR_WAITOK);
0a7de745
A
917 }
918 if (ke == NULL) {
919 return NULL;
920 }
921 bzero(ke, sizeof(*ke));
b0d623f7 922
0a7de745 923 if (ad->pfra_af == AF_INET) {
b0d623f7 924 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
0a7de745 925 } else if (ad->pfra_af == AF_INET6) {
b0d623f7 926 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
0a7de745 927 }
b0d623f7
A
928 ke->pfrke_af = ad->pfra_af;
929 ke->pfrke_net = ad->pfra_net;
930 ke->pfrke_not = ad->pfra_not;
f427ee49 931 ke->pfrke_intrpool = (u_int8_t)intr;
0a7de745 932 return ke;
b0d623f7
A
933}
934
d1ecb069 935static void
b0d623f7
A
936pfr_destroy_kentries(struct pfr_kentryworkq *workq)
937{
0a7de745 938 struct pfr_kentry *p, *q;
b0d623f7
A
939
940 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
941 q = SLIST_NEXT(p, pfrke_workq);
942 pfr_destroy_kentry(p);
943 }
944}
945
d1ecb069 946static void
b0d623f7
A
947pfr_destroy_kentry(struct pfr_kentry *ke)
948{
0a7de745 949 if (ke->pfrke_intrpool) {
b0d623f7 950 pool_put(&pfr_kentry_pl2, ke);
0a7de745 951 } else {
b0d623f7 952 pool_put(&pfr_kentry_pl, ke);
0a7de745 953 }
b0d623f7
A
954}
955
d1ecb069 956static void
b0d623f7
A
957pfr_insert_kentries(struct pfr_ktable *kt,
958 struct pfr_kentryworkq *workq, u_int64_t tzero)
959{
0a7de745
A
960 struct pfr_kentry *p;
961 int rv, n = 0;
b0d623f7
A
962
963 SLIST_FOREACH(p, workq, pfrke_workq) {
964 rv = pfr_route_kentry(kt, p);
965 if (rv) {
966 printf("pfr_insert_kentries: cannot route entry "
967 "(code=%d).\n", rv);
968 break;
969 }
970 p->pfrke_tzero = tzero;
971 n++;
972 }
973 kt->pfrkt_cnt += n;
974}
975
976int
977pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, u_int64_t tzero)
978{
0a7de745
A
979 struct pfr_kentry *p;
980 int rv;
b0d623f7
A
981
982 p = pfr_lookup_addr(kt, ad, 1);
0a7de745
A
983 if (p != NULL) {
984 return 0;
985 }
f427ee49 986 p = pfr_create_kentry(ad, TRUE);
0a7de745
A
987 if (p == NULL) {
988 return EINVAL;
989 }
b0d623f7
A
990
991 rv = pfr_route_kentry(kt, p);
0a7de745
A
992 if (rv) {
993 return rv;
994 }
b0d623f7
A
995
996 p->pfrke_tzero = tzero;
997 kt->pfrkt_cnt++;
998
0a7de745 999 return 0;
b0d623f7
A
1000}
1001
d1ecb069 1002static void
b0d623f7
A
1003pfr_remove_kentries(struct pfr_ktable *kt,
1004 struct pfr_kentryworkq *workq)
1005{
0a7de745
A
1006 struct pfr_kentry *p;
1007 int n = 0;
b0d623f7
A
1008
1009 SLIST_FOREACH(p, workq, pfrke_workq) {
1010 pfr_unroute_kentry(kt, p);
1011 n++;
1012 }
1013 kt->pfrkt_cnt -= n;
1014 pfr_destroy_kentries(workq);
1015}
1016
d1ecb069 1017static void
b0d623f7
A
1018pfr_clean_node_mask(struct pfr_ktable *kt,
1019 struct pfr_kentryworkq *workq)
1020{
0a7de745 1021 struct pfr_kentry *p;
b0d623f7
A
1022
1023 SLIST_FOREACH(p, workq, pfrke_workq)
0a7de745 1024 pfr_unroute_kentry(kt, p);
b0d623f7
A
1025}
1026
d1ecb069 1027static void
b0d623f7
A
1028pfr_clstats_kentries(struct pfr_kentryworkq *workq, u_int64_t tzero,
1029 int negchange)
1030{
0a7de745 1031 struct pfr_kentry *p;
b0d623f7 1032
5ba3f43e 1033 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
1034
1035 SLIST_FOREACH(p, workq, pfrke_workq) {
0a7de745 1036 if (negchange) {
b0d623f7 1037 p->pfrke_not = !p->pfrke_not;
0a7de745
A
1038 }
1039 bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
1040 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
b0d623f7
A
1041 p->pfrke_tzero = tzero;
1042 }
1043}
1044
d1ecb069
A
1045static void
1046pfr_reset_feedback(user_addr_t addr, int size, int flags)
b0d623f7 1047{
0a7de745
A
1048 struct pfr_addr ad;
1049 int i;
b0d623f7 1050
0a7de745
A
1051 for (i = 0; i < size; i++, addr += sizeof(ad)) {
1052 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
b0d623f7 1053 break;
0a7de745 1054 }
b0d623f7 1055 ad.pfra_fback = PFR_FB_NONE;
0a7de745 1056 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
b0d623f7 1057 break;
0a7de745 1058 }
b0d623f7
A
1059 }
1060}
1061
d1ecb069 1062static void
b0d623f7
A
1063pfr_prepare_network(union sockaddr_union *sa, int af, int net)
1064{
0a7de745 1065 int i;
b0d623f7 1066
0a7de745 1067 bzero(sa, sizeof(*sa));
b0d623f7 1068 if (af == AF_INET) {
0a7de745 1069 sa->sin.sin_len = sizeof(sa->sin);
b0d623f7 1070 sa->sin.sin_family = AF_INET;
0a7de745 1071 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32 - net)) : 0;
b0d623f7 1072 } else if (af == AF_INET6) {
0a7de745 1073 sa->sin6.sin6_len = sizeof(sa->sin6);
b0d623f7
A
1074 sa->sin6.sin6_family = AF_INET6;
1075 for (i = 0; i < 4; i++) {
1076 if (net <= 32) {
1077 sa->sin6.sin6_addr.s6_addr32[i] =
0a7de745 1078 net ? htonl(-1 << (32 - net)) : 0;
b0d623f7
A
1079 break;
1080 }
1081 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
1082 net -= 32;
1083 }
1084 }
1085}
1086
d1ecb069 1087static int
b0d623f7
A
1088pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1089{
0a7de745
A
1090 union sockaddr_union mask;
1091 struct radix_node *rn;
1092 struct radix_node_head *head;
b0d623f7 1093
5ba3f43e 1094 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7 1095
0a7de745
A
1096 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
1097 if (ke->pfrke_af == AF_INET) {
b0d623f7 1098 head = kt->pfrkt_ip4;
0a7de745 1099 } else if (ke->pfrke_af == AF_INET6) {
b0d623f7 1100 head = kt->pfrkt_ip6;
0a7de745
A
1101 } else {
1102 return -1;
1103 }
b0d623f7
A
1104
1105 if (KENTRY_NETWORK(ke)) {
1106 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1107 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
0a7de745 1108 } else {
b0d623f7 1109 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
0a7de745 1110 }
b0d623f7 1111
0a7de745 1112 return rn == NULL ? -1 : 0;
b0d623f7
A
1113}
1114
d1ecb069 1115static int
b0d623f7
A
1116pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1117{
0a7de745
A
1118 union sockaddr_union mask;
1119 struct radix_node *rn;
1120 struct radix_node_head *head;
b0d623f7 1121
5ba3f43e 1122 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7 1123
0a7de745 1124 if (ke->pfrke_af == AF_INET) {
b0d623f7 1125 head = kt->pfrkt_ip4;
0a7de745 1126 } else if (ke->pfrke_af == AF_INET6) {
b0d623f7 1127 head = kt->pfrkt_ip6;
0a7de745
A
1128 } else {
1129 return -1;
1130 }
b0d623f7
A
1131
1132 if (KENTRY_NETWORK(ke)) {
1133 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1134 rn = rn_delete(&ke->pfrke_sa, &mask, head);
0a7de745 1135 } else {
b0d623f7 1136 rn = rn_delete(&ke->pfrke_sa, NULL, head);
0a7de745 1137 }
b0d623f7
A
1138
1139 if (rn == NULL) {
1140 printf("pfr_unroute_kentry: delete failed.\n");
0a7de745 1141 return -1;
b0d623f7 1142 }
0a7de745 1143 return 0;
b0d623f7
A
1144}
1145
d1ecb069 1146static void
b0d623f7
A
1147pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1148{
0a7de745
A
1149 bzero(ad, sizeof(*ad));
1150 if (ke == NULL) {
b0d623f7 1151 return;
0a7de745 1152 }
b0d623f7
A
1153 ad->pfra_af = ke->pfrke_af;
1154 ad->pfra_net = ke->pfrke_net;
1155 ad->pfra_not = ke->pfrke_not;
0a7de745 1156 if (ad->pfra_af == AF_INET) {
b0d623f7 1157 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
0a7de745 1158 } else if (ad->pfra_af == AF_INET6) {
b0d623f7 1159 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
0a7de745 1160 }
b0d623f7
A
1161}
1162
d1ecb069 1163static int
b0d623f7
A
1164pfr_walktree(struct radix_node *rn, void *arg)
1165{
0a7de745
A
1166 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
1167 struct pfr_walktree *w = arg;
1168 int flags = w->pfrw_flags;
b0d623f7 1169
5ba3f43e 1170 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
1171
1172 switch (w->pfrw_op) {
1173 case PFRW_MARK:
1174 ke->pfrke_mark = 0;
1175 break;
1176 case PFRW_SWEEP:
0a7de745 1177 if (ke->pfrke_mark) {
b0d623f7 1178 break;
0a7de745 1179 }
f427ee49 1180 OS_FALLTHROUGH;
b0d623f7
A
1181 case PFRW_ENQUEUE:
1182 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1183 w->pfrw_cnt++;
1184 break;
1185 case PFRW_GET_ADDRS:
1186 if (w->pfrw_free-- > 0) {
1187 struct pfr_addr ad;
1188
1189 pfr_copyout_addr(&ad, ke);
0a7de745
A
1190 if (copyout(&ad, w->pfrw_addr, sizeof(ad))) {
1191 return EFAULT;
1192 }
1193 w->pfrw_addr += sizeof(ad);
b0d623f7
A
1194 }
1195 break;
1196 case PFRW_GET_ASTATS:
1197 if (w->pfrw_free-- > 0) {
1198 struct pfr_astats as;
1199
cb323159
A
1200 bzero(&as, sizeof(as));
1201
b0d623f7
A
1202 pfr_copyout_addr(&as.pfras_a, ke);
1203
1204 bcopy(ke->pfrke_packets, as.pfras_packets,
0a7de745 1205 sizeof(as.pfras_packets));
b0d623f7 1206 bcopy(ke->pfrke_bytes, as.pfras_bytes,
0a7de745 1207 sizeof(as.pfras_bytes));
b0d623f7
A
1208 as.pfras_tzero = ke->pfrke_tzero;
1209
0a7de745
A
1210 if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags)) {
1211 return EFAULT;
1212 }
1213 w->pfrw_astats += sizeof(as);
b0d623f7
A
1214 }
1215 break;
1216 case PFRW_POOL_GET:
0a7de745 1217 if (ke->pfrke_not) {
b0d623f7 1218 break; /* negative entries are ignored */
0a7de745 1219 }
b0d623f7
A
1220 if (!w->pfrw_cnt--) {
1221 w->pfrw_kentry = ke;
0a7de745 1222 return 1; /* finish search */
b0d623f7
A
1223 }
1224 break;
1225 case PFRW_DYNADDR_UPDATE:
1226 if (ke->pfrke_af == AF_INET) {
0a7de745 1227 if (w->pfrw_dyn->pfid_acnt4++ > 0) {
b0d623f7 1228 break;
0a7de745 1229 }
b0d623f7
A
1230 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1231 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
0a7de745 1232 &ke->pfrke_sa, AF_INET);
b0d623f7 1233 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
0a7de745 1234 &pfr_mask, AF_INET);
b0d623f7 1235 } else if (ke->pfrke_af == AF_INET6) {
0a7de745 1236 if (w->pfrw_dyn->pfid_acnt6++ > 0) {
b0d623f7 1237 break;
0a7de745 1238 }
b0d623f7
A
1239 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1240 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
0a7de745 1241 &ke->pfrke_sa, AF_INET6);
b0d623f7 1242 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
0a7de745 1243 &pfr_mask, AF_INET6);
b0d623f7
A
1244 }
1245 break;
1246 }
0a7de745 1247 return 0;
b0d623f7
A
1248}
1249
1250int
1251pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1252{
0a7de745
A
1253 struct pfr_ktableworkq workq;
1254 struct pfr_ktable *p;
1255 int xdel = 0;
b0d623f7 1256
5ba3f43e 1257 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
1258
1259 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1260 PFR_FLAG_ALLRSETS);
0a7de745
A
1261 if (pfr_fix_anchor(filter->pfrt_anchor)) {
1262 return EINVAL;
1263 }
1264 if (pfr_table_count(filter, flags) < 0) {
1265 return ENOENT;
1266 }
b0d623f7
A
1267
1268 SLIST_INIT(&workq);
1269 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
0a7de745 1270 if (pfr_skip_table(filter, p, flags)) {
b0d623f7 1271 continue;
0a7de745
A
1272 }
1273 if (strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR) == 0) {
b0d623f7 1274 continue;
0a7de745
A
1275 }
1276 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
b0d623f7 1277 continue;
0a7de745 1278 }
b0d623f7
A
1279 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1280 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1281 xdel++;
1282 }
1283 if (!(flags & PFR_FLAG_DUMMY)) {
1284 pfr_setflags_ktables(&workq);
1285 }
0a7de745 1286 if (ndel != NULL) {
b0d623f7 1287 *ndel = xdel;
0a7de745
A
1288 }
1289 return 0;
b0d623f7
A
1290}
1291
1292int
d1ecb069 1293pfr_add_tables(user_addr_t tbl, int size, int *nadd, int flags)
b0d623f7 1294{
0a7de745
A
1295 struct pfr_ktableworkq addq, changeq;
1296 struct pfr_ktable *p, *q, *r, key;
1297 int i, rv, xadd = 0;
1298 u_int64_t tzero = pf_calendar_time_second();
b0d623f7 1299
5ba3f43e 1300 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
1301
1302 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1303 SLIST_INIT(&addq);
1304 SLIST_INIT(&changeq);
0a7de745
A
1305 for (i = 0; i < size; i++, tbl += sizeof(key.pfrkt_t)) {
1306 if (COPYIN(tbl, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) {
b0d623f7 1307 senderr(EFAULT);
0a7de745 1308 }
6d2010ae 1309 pfr_table_copyin_cleanup(&key.pfrkt_t);
b0d623f7 1310 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
0a7de745 1311 flags & PFR_FLAG_USERIOCTL)) {
b0d623f7 1312 senderr(EINVAL);
0a7de745 1313 }
b0d623f7
A
1314 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1315 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1316 if (p == NULL) {
1317 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
0a7de745 1318 if (p == NULL) {
b0d623f7 1319 senderr(ENOMEM);
0a7de745 1320 }
b0d623f7 1321 SLIST_FOREACH(q, &addq, pfrkt_workq) {
0a7de745 1322 if (!pfr_ktable_compare(p, q)) {
f427ee49 1323 pfr_destroy_ktable(p, 0);
b0d623f7 1324 goto _skip;
0a7de745 1325 }
b0d623f7
A
1326 }
1327 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1328 xadd++;
0a7de745 1329 if (!key.pfrkt_anchor[0]) {
b0d623f7 1330 goto _skip;
0a7de745 1331 }
b0d623f7
A
1332
1333 /* find or create root table */
0a7de745 1334 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
b0d623f7
A
1335 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1336 if (r != NULL) {
1337 p->pfrkt_root = r;
1338 goto _skip;
1339 }
1340 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1341 if (!pfr_ktable_compare(&key, q)) {
1342 p->pfrkt_root = q;
1343 goto _skip;
1344 }
1345 }
1346 key.pfrkt_flags = 0;
1347 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
0a7de745 1348 if (r == NULL) {
b0d623f7 1349 senderr(ENOMEM);
0a7de745 1350 }
b0d623f7
A
1351 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1352 p->pfrkt_root = r;
1353 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1354 SLIST_FOREACH(q, &changeq, pfrkt_workq)
0a7de745
A
1355 if (!pfr_ktable_compare(&key, q)) {
1356 goto _skip;
1357 }
b0d623f7
A
1358 p->pfrkt_nflags = (p->pfrkt_flags &
1359 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1360 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1361 xadd++;
1362 }
1363_skip:
0a7de745 1364 ;
b0d623f7
A
1365 }
1366 if (!(flags & PFR_FLAG_DUMMY)) {
1367 pfr_insert_ktables(&addq);
1368 pfr_setflags_ktables(&changeq);
0a7de745 1369 } else {
b0d623f7 1370 pfr_destroy_ktables(&addq, 0);
0a7de745
A
1371 }
1372 if (nadd != NULL) {
b0d623f7 1373 *nadd = xadd;
0a7de745
A
1374 }
1375 return 0;
b0d623f7
A
1376_bad:
1377 pfr_destroy_ktables(&addq, 0);
0a7de745 1378 return rv;
b0d623f7
A
1379}
1380
1381int
d1ecb069 1382pfr_del_tables(user_addr_t tbl, int size, int *ndel, int flags)
b0d623f7 1383{
0a7de745
A
1384 struct pfr_ktableworkq workq;
1385 struct pfr_ktable *p, *q, key;
1386 int i, xdel = 0;
b0d623f7 1387
5ba3f43e 1388 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
1389
1390 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1391 SLIST_INIT(&workq);
0a7de745
A
1392 for (i = 0; i < size; i++, tbl += sizeof(key.pfrkt_t)) {
1393 if (COPYIN(tbl, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) {
1394 return EFAULT;
1395 }
6d2010ae 1396 pfr_table_copyin_cleanup(&key.pfrkt_t);
b0d623f7 1397 if (pfr_validate_table(&key.pfrkt_t, 0,
0a7de745
A
1398 flags & PFR_FLAG_USERIOCTL)) {
1399 return EINVAL;
1400 }
b0d623f7
A
1401 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1402 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1403 SLIST_FOREACH(q, &workq, pfrkt_workq)
0a7de745
A
1404 if (!pfr_ktable_compare(p, q)) {
1405 goto _skip;
1406 }
b0d623f7
A
1407 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1408 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1409 xdel++;
1410 }
1411_skip:
0a7de745 1412 ;
b0d623f7
A
1413 }
1414
1415 if (!(flags & PFR_FLAG_DUMMY)) {
1416 pfr_setflags_ktables(&workq);
1417 }
0a7de745 1418 if (ndel != NULL) {
b0d623f7 1419 *ndel = xdel;
0a7de745
A
1420 }
1421 return 0;
b0d623f7
A
1422}
1423
1424int
d1ecb069 1425pfr_get_tables(struct pfr_table *filter, user_addr_t tbl, int *size,
0a7de745 1426 int flags)
b0d623f7 1427{
0a7de745
A
1428 struct pfr_ktable *p;
1429 int n, nn;
b0d623f7
A
1430
1431 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
0a7de745
A
1432 if (pfr_fix_anchor(filter->pfrt_anchor)) {
1433 return EINVAL;
1434 }
b0d623f7 1435 n = nn = pfr_table_count(filter, flags);
0a7de745
A
1436 if (n < 0) {
1437 return ENOENT;
1438 }
b0d623f7
A
1439 if (n > *size) {
1440 *size = n;
0a7de745 1441 return 0;
b0d623f7
A
1442 }
1443 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
0a7de745 1444 if (pfr_skip_table(filter, p, flags)) {
b0d623f7 1445 continue;
0a7de745
A
1446 }
1447 if (n-- <= 0) {
b0d623f7 1448 continue;
0a7de745
A
1449 }
1450 if (COPYOUT(&p->pfrkt_t, tbl, sizeof(p->pfrkt_t), flags)) {
1451 return EFAULT;
1452 }
1453 tbl += sizeof(p->pfrkt_t);
b0d623f7
A
1454 }
1455 if (n) {
1456 printf("pfr_get_tables: corruption detected (%d).\n", n);
0a7de745 1457 return ENOTTY;
b0d623f7
A
1458 }
1459 *size = nn;
0a7de745 1460 return 0;
b0d623f7
A
1461}
1462
1463int
d1ecb069 1464pfr_get_tstats(struct pfr_table *filter, user_addr_t tbl, int *size,
0a7de745 1465 int flags)
b0d623f7 1466{
0a7de745
A
1467 struct pfr_ktable *p;
1468 struct pfr_ktableworkq workq;
1469 int n, nn;
1470 u_int64_t tzero = pf_calendar_time_second();
b0d623f7 1471
5ba3f43e 1472 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
1473
1474 /* XXX PFR_FLAG_CLSTATS disabled */
1475 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
0a7de745
A
1476 if (pfr_fix_anchor(filter->pfrt_anchor)) {
1477 return EINVAL;
1478 }
b0d623f7 1479 n = nn = pfr_table_count(filter, flags);
0a7de745
A
1480 if (n < 0) {
1481 return ENOENT;
1482 }
b0d623f7
A
1483 if (n > *size) {
1484 *size = n;
0a7de745 1485 return 0;
b0d623f7
A
1486 }
1487 SLIST_INIT(&workq);
1488 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
0a7de745 1489 if (pfr_skip_table(filter, p, flags)) {
b0d623f7 1490 continue;
0a7de745
A
1491 }
1492 if (n-- <= 0) {
b0d623f7 1493 continue;
b0d623f7 1494 }
0a7de745
A
1495 if (COPYOUT(&p->pfrkt_ts, tbl, sizeof(p->pfrkt_ts), flags)) {
1496 return EFAULT;
1497 }
1498 tbl += sizeof(p->pfrkt_ts);
b0d623f7
A
1499 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1500 }
0a7de745 1501 if (flags & PFR_FLAG_CLSTATS) {
b0d623f7
A
1502 pfr_clstats_ktables(&workq, tzero,
1503 flags & PFR_FLAG_ADDRSTOO);
0a7de745 1504 }
b0d623f7
A
1505 if (n) {
1506 printf("pfr_get_tstats: corruption detected (%d).\n", n);
0a7de745 1507 return ENOTTY;
b0d623f7
A
1508 }
1509 *size = nn;
0a7de745 1510 return 0;
b0d623f7
A
1511}
1512
1513int
d1ecb069 1514pfr_clr_tstats(user_addr_t tbl, int size, int *nzero, int flags)
b0d623f7 1515{
0a7de745
A
1516 struct pfr_ktableworkq workq;
1517 struct pfr_ktable *p, key;
1518 int i, xzero = 0;
1519 u_int64_t tzero = pf_calendar_time_second();
b0d623f7 1520
5ba3f43e 1521 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
1522
1523 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1524 PFR_FLAG_ADDRSTOO);
1525 SLIST_INIT(&workq);
0a7de745
A
1526 for (i = 0; i < size; i++, tbl += sizeof(key.pfrkt_t)) {
1527 if (COPYIN(tbl, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) {
1528 return EFAULT;
1529 }
6d2010ae 1530 pfr_table_copyin_cleanup(&key.pfrkt_t);
0a7de745
A
1531 if (pfr_validate_table(&key.pfrkt_t, 0, 0)) {
1532 return EINVAL;
1533 }
b0d623f7
A
1534 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1535 if (p != NULL) {
1536 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1537 xzero++;
1538 }
1539 }
1540 if (!(flags & PFR_FLAG_DUMMY)) {
1541 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1542 }
0a7de745 1543 if (nzero != NULL) {
b0d623f7 1544 *nzero = xzero;
0a7de745
A
1545 }
1546 return 0;
b0d623f7
A
1547}
1548
1549int
d1ecb069 1550pfr_set_tflags(user_addr_t tbl, int size, int setflag, int clrflag,
0a7de745 1551 int *nchange, int *ndel, int flags)
b0d623f7 1552{
0a7de745
A
1553 struct pfr_ktableworkq workq;
1554 struct pfr_ktable *p, *q, key;
1555 int i, xchange = 0, xdel = 0;
b0d623f7 1556
5ba3f43e 1557 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
1558
1559 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1560 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1561 (clrflag & ~PFR_TFLAG_USRMASK) ||
0a7de745
A
1562 (setflag & clrflag)) {
1563 return EINVAL;
1564 }
b0d623f7 1565 SLIST_INIT(&workq);
0a7de745
A
1566 for (i = 0; i < size; i++, tbl += sizeof(key.pfrkt_t)) {
1567 if (COPYIN(tbl, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) {
1568 return EFAULT;
1569 }
6d2010ae 1570 pfr_table_copyin_cleanup(&key.pfrkt_t);
b0d623f7 1571 if (pfr_validate_table(&key.pfrkt_t, 0,
0a7de745
A
1572 flags & PFR_FLAG_USERIOCTL)) {
1573 return EINVAL;
1574 }
b0d623f7
A
1575 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1576 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1577 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1578 ~clrflag;
0a7de745 1579 if (p->pfrkt_nflags == p->pfrkt_flags) {
b0d623f7 1580 goto _skip;
0a7de745 1581 }
b0d623f7 1582 SLIST_FOREACH(q, &workq, pfrkt_workq)
0a7de745
A
1583 if (!pfr_ktable_compare(p, q)) {
1584 goto _skip;
1585 }
b0d623f7
A
1586 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1587 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1588 (clrflag & PFR_TFLAG_PERSIST) &&
0a7de745 1589 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) {
b0d623f7 1590 xdel++;
0a7de745 1591 } else {
b0d623f7 1592 xchange++;
0a7de745 1593 }
b0d623f7
A
1594 }
1595_skip:
0a7de745 1596 ;
b0d623f7
A
1597 }
1598 if (!(flags & PFR_FLAG_DUMMY)) {
1599 pfr_setflags_ktables(&workq);
1600 }
0a7de745 1601 if (nchange != NULL) {
b0d623f7 1602 *nchange = xchange;
0a7de745
A
1603 }
1604 if (ndel != NULL) {
b0d623f7 1605 *ndel = xdel;
0a7de745
A
1606 }
1607 return 0;
b0d623f7
A
1608}
1609
1610int
1611pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1612{
0a7de745
A
1613 struct pfr_ktableworkq workq;
1614 struct pfr_ktable *p;
1615 struct pf_ruleset *rs;
1616 int xdel = 0;
b0d623f7 1617
5ba3f43e 1618 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
1619
1620 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1621 rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
0a7de745
A
1622 if (rs == NULL) {
1623 return ENOMEM;
1624 }
b0d623f7
A
1625 SLIST_INIT(&workq);
1626 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1627 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
0a7de745 1628 pfr_skip_table(trs, p, 0)) {
b0d623f7 1629 continue;
0a7de745 1630 }
b0d623f7
A
1631 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1632 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1633 xdel++;
1634 }
1635 if (!(flags & PFR_FLAG_DUMMY)) {
1636 pfr_setflags_ktables(&workq);
0a7de745 1637 if (ticket != NULL) {
b0d623f7 1638 *ticket = ++rs->tticket;
0a7de745 1639 }
b0d623f7 1640 rs->topen = 1;
0a7de745 1641 } else {
b0d623f7 1642 pf_remove_if_empty_ruleset(rs);
0a7de745
A
1643 }
1644 if (ndel != NULL) {
b0d623f7 1645 *ndel = xdel;
0a7de745
A
1646 }
1647 return 0;
b0d623f7
A
1648}
1649
1650int
d1ecb069 1651pfr_ina_define(struct pfr_table *tbl, user_addr_t addr, int size,
b0d623f7
A
1652 int *nadd, int *naddr, u_int32_t ticket, int flags)
1653{
0a7de745
A
1654 struct pfr_ktableworkq tableq;
1655 struct pfr_kentryworkq addrq;
1656 struct pfr_ktable *kt, *rt, *shadow, key;
1657 struct pfr_kentry *p;
1658 struct pfr_addr ad;
1659 struct pf_ruleset *rs;
1660 int i, rv, xadd = 0, xaddr = 0;
b0d623f7 1661
5ba3f43e 1662 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
1663
1664 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
0a7de745
A
1665 if (size && !(flags & PFR_FLAG_ADDRSTOO)) {
1666 return EINVAL;
1667 }
b0d623f7 1668 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
0a7de745
A
1669 flags & PFR_FLAG_USERIOCTL)) {
1670 return EINVAL;
1671 }
b0d623f7 1672 rs = pf_find_ruleset(tbl->pfrt_anchor);
0a7de745
A
1673 if (rs == NULL || !rs->topen || ticket != rs->tticket) {
1674 return EBUSY;
1675 }
b0d623f7
A
1676 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1677 SLIST_INIT(&tableq);
316670eb 1678 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)(void *)tbl);
b0d623f7
A
1679 if (kt == NULL) {
1680 kt = pfr_create_ktable(tbl, 0, 1);
0a7de745
A
1681 if (kt == NULL) {
1682 return ENOMEM;
1683 }
b0d623f7
A
1684 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1685 xadd++;
0a7de745 1686 if (!tbl->pfrt_anchor[0]) {
b0d623f7 1687 goto _skip;
0a7de745 1688 }
b0d623f7
A
1689
1690 /* find or create root table */
0a7de745 1691 bzero(&key, sizeof(key));
b0d623f7 1692 strlcpy(key.pfrkt_name, tbl->pfrt_name,
0a7de745 1693 sizeof(key.pfrkt_name));
b0d623f7
A
1694 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1695 if (rt != NULL) {
1696 kt->pfrkt_root = rt;
1697 goto _skip;
1698 }
1699 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1700 if (rt == NULL) {
1701 pfr_destroy_ktables(&tableq, 0);
0a7de745 1702 return ENOMEM;
b0d623f7
A
1703 }
1704 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1705 kt->pfrkt_root = rt;
0a7de745 1706 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) {
b0d623f7 1707 xadd++;
0a7de745 1708 }
b0d623f7
A
1709_skip:
1710 shadow = pfr_create_ktable(tbl, 0, 0);
1711 if (shadow == NULL) {
1712 pfr_destroy_ktables(&tableq, 0);
0a7de745 1713 return ENOMEM;
b0d623f7
A
1714 }
1715 SLIST_INIT(&addrq);
0a7de745
A
1716 for (i = 0; i < size; i++, addr += sizeof(ad)) {
1717 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
b0d623f7 1718 senderr(EFAULT);
0a7de745
A
1719 }
1720 if (pfr_validate_addr(&ad)) {
b0d623f7 1721 senderr(EINVAL);
0a7de745
A
1722 }
1723 if (pfr_lookup_addr(shadow, &ad, 1) != NULL) {
b0d623f7 1724 continue;
0a7de745 1725 }
f427ee49 1726 p = pfr_create_kentry(&ad, FALSE);
0a7de745 1727 if (p == NULL) {
b0d623f7 1728 senderr(ENOMEM);
0a7de745 1729 }
b0d623f7
A
1730 if (pfr_route_kentry(shadow, p)) {
1731 pfr_destroy_kentry(p);
1732 continue;
1733 }
1734 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1735 xaddr++;
1736 }
1737 if (!(flags & PFR_FLAG_DUMMY)) {
0a7de745 1738 if (kt->pfrkt_shadow != NULL) {
b0d623f7 1739 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
0a7de745 1740 }
b0d623f7
A
1741 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1742 pfr_insert_ktables(&tableq);
1743 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1744 xaddr : NO_ADDRESSES;
1745 kt->pfrkt_shadow = shadow;
1746 } else {
1747 pfr_clean_node_mask(shadow, &addrq);
1748 pfr_destroy_ktable(shadow, 0);
1749 pfr_destroy_ktables(&tableq, 0);
1750 pfr_destroy_kentries(&addrq);
1751 }
0a7de745 1752 if (nadd != NULL) {
b0d623f7 1753 *nadd = xadd;
0a7de745
A
1754 }
1755 if (naddr != NULL) {
b0d623f7 1756 *naddr = xaddr;
0a7de745
A
1757 }
1758 return 0;
b0d623f7
A
1759_bad:
1760 pfr_destroy_ktable(shadow, 0);
1761 pfr_destroy_ktables(&tableq, 0);
1762 pfr_destroy_kentries(&addrq);
0a7de745 1763 return rv;
b0d623f7
A
1764}
1765
1766int
1767pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1768{
0a7de745
A
1769 struct pfr_ktableworkq workq;
1770 struct pfr_ktable *p;
1771 struct pf_ruleset *rs;
1772 int xdel = 0;
b0d623f7 1773
5ba3f43e 1774 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
1775
1776 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1777 rs = pf_find_ruleset(trs->pfrt_anchor);
0a7de745
A
1778 if (rs == NULL || !rs->topen || ticket != rs->tticket) {
1779 return 0;
1780 }
b0d623f7
A
1781 SLIST_INIT(&workq);
1782 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1783 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
0a7de745 1784 pfr_skip_table(trs, p, 0)) {
b0d623f7 1785 continue;
0a7de745 1786 }
b0d623f7
A
1787 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1788 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1789 xdel++;
1790 }
1791 if (!(flags & PFR_FLAG_DUMMY)) {
1792 pfr_setflags_ktables(&workq);
1793 rs->topen = 0;
1794 pf_remove_if_empty_ruleset(rs);
1795 }
0a7de745 1796 if (ndel != NULL) {
b0d623f7 1797 *ndel = xdel;
0a7de745
A
1798 }
1799 return 0;
b0d623f7
A
1800}
1801
1802int
1803pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1804 int *nchange, int flags)
1805{
0a7de745
A
1806 struct pfr_ktable *p, *q;
1807 struct pfr_ktableworkq workq;
1808 struct pf_ruleset *rs;
1809 int xadd = 0, xchange = 0;
1810 u_int64_t tzero = pf_calendar_time_second();
b0d623f7 1811
5ba3f43e 1812 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
1813
1814 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1815 rs = pf_find_ruleset(trs->pfrt_anchor);
0a7de745
A
1816 if (rs == NULL || !rs->topen || ticket != rs->tticket) {
1817 return EBUSY;
1818 }
b0d623f7
A
1819
1820 SLIST_INIT(&workq);
1821 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1822 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
0a7de745 1823 pfr_skip_table(trs, p, 0)) {
b0d623f7 1824 continue;
0a7de745 1825 }
b0d623f7 1826 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
0a7de745 1827 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) {
b0d623f7 1828 xchange++;
0a7de745 1829 } else {
b0d623f7 1830 xadd++;
0a7de745 1831 }
b0d623f7
A
1832 }
1833
1834 if (!(flags & PFR_FLAG_DUMMY)) {
1835 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1836 q = SLIST_NEXT(p, pfrkt_workq);
1837 pfr_commit_ktable(p, tzero);
1838 }
1839 rs->topen = 0;
1840 pf_remove_if_empty_ruleset(rs);
1841 }
0a7de745 1842 if (nadd != NULL) {
b0d623f7 1843 *nadd = xadd;
0a7de745
A
1844 }
1845 if (nchange != NULL) {
b0d623f7 1846 *nchange = xchange;
0a7de745 1847 }
b0d623f7 1848
0a7de745 1849 return 0;
b0d623f7
A
1850}
1851
d1ecb069 1852static void
b0d623f7
A
1853pfr_commit_ktable(struct pfr_ktable *kt, u_int64_t tzero)
1854{
0a7de745
A
1855 struct pfr_ktable *shadow = kt->pfrkt_shadow;
1856 int nflags;
b0d623f7 1857
5ba3f43e 1858 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
1859
1860 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
0a7de745 1861 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
b0d623f7 1862 pfr_clstats_ktable(kt, tzero, 1);
0a7de745 1863 }
b0d623f7
A
1864 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1865 /* kt might contain addresses */
0a7de745
A
1866 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1867 struct pfr_kentry *p, *q, *next;
1868 struct pfr_addr ad;
b0d623f7
A
1869
1870 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1871 pfr_mark_addrs(kt);
1872 SLIST_INIT(&addq);
1873 SLIST_INIT(&changeq);
1874 SLIST_INIT(&delq);
1875 SLIST_INIT(&garbageq);
1876 pfr_clean_node_mask(shadow, &addrq);
1877 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
0a7de745 1878 next = SLIST_NEXT(p, pfrke_workq); /* XXX */
b0d623f7
A
1879 pfr_copyout_addr(&ad, p);
1880 q = pfr_lookup_addr(kt, &ad, 1);
1881 if (q != NULL) {
0a7de745 1882 if (q->pfrke_not != p->pfrke_not) {
b0d623f7
A
1883 SLIST_INSERT_HEAD(&changeq, q,
1884 pfrke_workq);
0a7de745 1885 }
b0d623f7
A
1886 q->pfrke_mark = 1;
1887 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1888 } else {
1889 p->pfrke_tzero = tzero;
1890 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1891 }
1892 }
1893 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1894 pfr_insert_kentries(kt, &addq, tzero);
1895 pfr_remove_kentries(kt, &delq);
1896 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1897 pfr_destroy_kentries(&garbageq);
1898 } else {
1899 /* kt cannot contain addresses */
1900 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1901 shadow->pfrkt_ip4);
1902 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1903 shadow->pfrkt_ip6);
1904 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1905 pfr_clstats_ktable(kt, tzero, 1);
1906 }
1907 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1908 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) &
1909 ~PFR_TFLAG_INACTIVE;
1910 pfr_destroy_ktable(shadow, 0);
1911 kt->pfrkt_shadow = NULL;
1912 pfr_setflags_ktable(kt, nflags);
1913}
1914
6d2010ae
A
1915void
1916pfr_table_copyin_cleanup(struct pfr_table *tbl)
1917{
0a7de745
A
1918 tbl->pfrt_anchor[sizeof(tbl->pfrt_anchor) - 1] = '\0';
1919 tbl->pfrt_name[sizeof(tbl->pfrt_name) - 1] = '\0';
6d2010ae
A
1920}
1921
d1ecb069 1922static int
b0d623f7
A
1923pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1924{
f427ee49 1925 size_t i;
b0d623f7 1926
0a7de745
A
1927 if (!tbl->pfrt_name[0]) {
1928 return -1;
1929 }
1930 if (no_reserved && strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR) == 0) {
1931 return -1;
1932 }
1933 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE - 1]) {
1934 return -1;
1935 }
1936 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) {
1937 if (tbl->pfrt_name[i]) {
1938 return -1;
1939 }
1940 }
1941 if (pfr_fix_anchor(tbl->pfrt_anchor)) {
1942 return -1;
1943 }
1944 if (tbl->pfrt_flags & ~allowedflags) {
1945 return -1;
1946 }
1947 return 0;
b0d623f7
A
1948}
1949
1950/*
1951 * Rewrite anchors referenced by tables to remove slashes
1952 * and check for validity.
1953 */
d1ecb069 1954static int
b0d623f7
A
1955pfr_fix_anchor(char *anchor)
1956{
1957 size_t siz = MAXPATHLEN;
f427ee49 1958 size_t i;
b0d623f7
A
1959
1960 if (anchor[0] == '/') {
1961 char *path;
1962 int off;
1963
1964 path = anchor;
1965 off = 1;
0a7de745 1966 while (*++path == '/') {
b0d623f7 1967 off++;
0a7de745 1968 }
b0d623f7
A
1969 bcopy(path, anchor, siz - off);
1970 memset(anchor + siz - off, 0, off);
1971 }
0a7de745
A
1972 if (anchor[siz - 1]) {
1973 return -1;
1974 }
f427ee49 1975 for (i = strlen(anchor); i < siz; i++) {
0a7de745
A
1976 if (anchor[i]) {
1977 return -1;
1978 }
1979 }
1980 return 0;
b0d623f7
A
1981}
1982
d1ecb069 1983static int
b0d623f7
A
1984pfr_table_count(struct pfr_table *filter, int flags)
1985{
1986 struct pf_ruleset *rs;
1987
0a7de745
A
1988 if (flags & PFR_FLAG_ALLRSETS) {
1989 return pfr_ktable_cnt;
1990 }
b0d623f7
A
1991 if (filter->pfrt_anchor[0]) {
1992 rs = pf_find_ruleset(filter->pfrt_anchor);
0a7de745 1993 return (rs != NULL) ? rs->tables : -1;
b0d623f7 1994 }
0a7de745 1995 return pf_main_ruleset.tables;
b0d623f7
A
1996}
1997
d1ecb069 1998static int
b0d623f7
A
1999pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
2000{
0a7de745
A
2001 if (flags & PFR_FLAG_ALLRSETS) {
2002 return 0;
2003 }
2004 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor)) {
2005 return 1;
2006 }
2007 return 0;
b0d623f7
A
2008}
2009
d1ecb069 2010static void
b0d623f7
A
2011pfr_insert_ktables(struct pfr_ktableworkq *workq)
2012{
0a7de745 2013 struct pfr_ktable *p;
b0d623f7 2014
5ba3f43e 2015 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
2016
2017 SLIST_FOREACH(p, workq, pfrkt_workq)
0a7de745 2018 pfr_insert_ktable(p);
b0d623f7
A
2019}
2020
d1ecb069 2021static void
b0d623f7
A
2022pfr_insert_ktable(struct pfr_ktable *kt)
2023{
5ba3f43e 2024 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
2025
2026 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
2027 pfr_ktable_cnt++;
0a7de745
A
2028 if (kt->pfrkt_root != NULL) {
2029 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) {
b0d623f7 2030 pfr_setflags_ktable(kt->pfrkt_root,
0a7de745
A
2031 kt->pfrkt_root->pfrkt_flags | PFR_TFLAG_REFDANCHOR);
2032 }
2033 }
b0d623f7
A
2034}
2035
d1ecb069 2036static void
b0d623f7
A
2037pfr_setflags_ktables(struct pfr_ktableworkq *workq)
2038{
0a7de745 2039 struct pfr_ktable *p, *q;
b0d623f7 2040
5ba3f43e 2041 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
2042
2043 for (p = SLIST_FIRST(workq); p; p = q) {
2044 q = SLIST_NEXT(p, pfrkt_workq);
2045 pfr_setflags_ktable(p, p->pfrkt_nflags);
2046 }
2047}
2048
d1ecb069 2049static void
b0d623f7
A
2050pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
2051{
0a7de745 2052 struct pfr_kentryworkq addrq;
b0d623f7 2053
5ba3f43e 2054 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
2055
2056 if (!(newf & PFR_TFLAG_REFERENCED) &&
f427ee49 2057 !(newf & PFR_TFLAG_REFDANCHOR) &&
0a7de745 2058 !(newf & PFR_TFLAG_PERSIST)) {
b0d623f7 2059 newf &= ~PFR_TFLAG_ACTIVE;
0a7de745
A
2060 }
2061 if (!(newf & PFR_TFLAG_ACTIVE)) {
b0d623f7 2062 newf &= ~PFR_TFLAG_USRMASK;
0a7de745 2063 }
b0d623f7
A
2064 if (!(newf & PFR_TFLAG_SETMASK)) {
2065 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
0a7de745
A
2066 if (kt->pfrkt_root != NULL) {
2067 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) {
b0d623f7
A
2068 pfr_setflags_ktable(kt->pfrkt_root,
2069 kt->pfrkt_root->pfrkt_flags &
2070 ~PFR_TFLAG_REFDANCHOR);
0a7de745
A
2071 }
2072 }
b0d623f7
A
2073 pfr_destroy_ktable(kt, 1);
2074 pfr_ktable_cnt--;
2075 return;
2076 }
2077 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
2078 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2079 pfr_remove_kentries(kt, &addrq);
2080 }
2081 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
2082 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
2083 kt->pfrkt_shadow = NULL;
2084 }
2085 kt->pfrkt_flags = newf;
2086}
2087
d1ecb069 2088static void
b0d623f7
A
2089pfr_clstats_ktables(struct pfr_ktableworkq *workq, u_int64_t tzero, int recurse)
2090{
0a7de745 2091 struct pfr_ktable *p;
b0d623f7 2092
5ba3f43e 2093 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
2094
2095 SLIST_FOREACH(p, workq, pfrkt_workq)
0a7de745 2096 pfr_clstats_ktable(p, tzero, recurse);
b0d623f7
A
2097}
2098
d1ecb069 2099static void
b0d623f7
A
2100pfr_clstats_ktable(struct pfr_ktable *kt, u_int64_t tzero, int recurse)
2101{
0a7de745 2102 struct pfr_kentryworkq addrq;
b0d623f7 2103
5ba3f43e 2104 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
2105
2106 if (recurse) {
2107 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2108 pfr_clstats_kentries(&addrq, tzero, 0);
2109 }
0a7de745
A
2110 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
2111 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
b0d623f7
A
2112 kt->pfrkt_match = kt->pfrkt_nomatch = 0;
2113 kt->pfrkt_tzero = tzero;
2114}
2115
d1ecb069 2116static struct pfr_ktable *
b0d623f7
A
2117pfr_create_ktable(struct pfr_table *tbl, u_int64_t tzero, int attachruleset)
2118{
0a7de745
A
2119 struct pfr_ktable *kt;
2120 struct pf_ruleset *rs;
b0d623f7 2121
5ba3f43e 2122 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
2123
2124 kt = pool_get(&pfr_ktable_pl, PR_WAITOK);
0a7de745
A
2125 if (kt == NULL) {
2126 return NULL;
2127 }
2128 bzero(kt, sizeof(*kt));
b0d623f7
A
2129 kt->pfrkt_t = *tbl;
2130
2131 if (attachruleset) {
2132 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
2133 if (!rs) {
2134 pfr_destroy_ktable(kt, 0);
0a7de745 2135 return NULL;
b0d623f7
A
2136 }
2137 kt->pfrkt_rs = rs;
2138 rs->tables++;
2139 }
2140
2141 if (!rn_inithead((void **)&kt->pfrkt_ip4,
2142 offsetof(struct sockaddr_in, sin_addr) * 8) ||
2143 !rn_inithead((void **)&kt->pfrkt_ip6,
2144 offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
2145 pfr_destroy_ktable(kt, 0);
0a7de745 2146 return NULL;
b0d623f7
A
2147 }
2148 kt->pfrkt_tzero = tzero;
2149
0a7de745 2150 return kt;
b0d623f7
A
2151}
2152
d1ecb069 2153static void
b0d623f7
A
2154pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
2155{
0a7de745 2156 struct pfr_ktable *p, *q;
b0d623f7 2157
5ba3f43e 2158 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
2159
2160 for (p = SLIST_FIRST(workq); p; p = q) {
2161 q = SLIST_NEXT(p, pfrkt_workq);
2162 pfr_destroy_ktable(p, flushaddr);
2163 }
2164}
2165
d1ecb069 2166static void
b0d623f7
A
2167pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
2168{
0a7de745 2169 struct pfr_kentryworkq addrq;
b0d623f7 2170
5ba3f43e 2171 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
2172
2173 if (flushaddr) {
2174 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2175 pfr_clean_node_mask(kt, &addrq);
2176 pfr_destroy_kentries(&addrq);
2177 }
0a7de745 2178 if (kt->pfrkt_ip4 != NULL) {
b0d623f7 2179 _FREE((caddr_t)kt->pfrkt_ip4, M_RTABLE);
0a7de745
A
2180 }
2181 if (kt->pfrkt_ip6 != NULL) {
b0d623f7 2182 _FREE((caddr_t)kt->pfrkt_ip6, M_RTABLE);
0a7de745
A
2183 }
2184 if (kt->pfrkt_shadow != NULL) {
b0d623f7 2185 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
0a7de745 2186 }
b0d623f7
A
2187 if (kt->pfrkt_rs != NULL) {
2188 kt->pfrkt_rs->tables--;
2189 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
2190 }
2191 pool_put(&pfr_ktable_pl, kt);
2192}
2193
d1ecb069 2194static int
b0d623f7
A
2195pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
2196{
2197 int d;
2198
0a7de745
A
2199 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) {
2200 return d;
2201 }
2202 return strcmp(p->pfrkt_anchor, q->pfrkt_anchor);
b0d623f7
A
2203}
2204
d1ecb069 2205static struct pfr_ktable *
b0d623f7
A
2206pfr_lookup_table(struct pfr_table *tbl)
2207{
5ba3f43e 2208 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
2209
2210 /* struct pfr_ktable start like a struct pfr_table */
0a7de745
A
2211 return RB_FIND(pfr_ktablehead, &pfr_ktables,
2212 (struct pfr_ktable *)(void *)tbl);
b0d623f7
A
2213}
2214
2215int
2216pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2217{
0a7de745
A
2218 struct pfr_kentry *ke = NULL;
2219 int match;
b0d623f7 2220
5ba3f43e 2221 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7 2222
0a7de745 2223 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) {
b0d623f7 2224 kt = kt->pfrkt_root;
0a7de745
A
2225 }
2226 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
2227 return 0;
2228 }
b0d623f7
A
2229
2230 switch (af) {
2231#if INET
2232 case AF_INET:
2233 pfr_sin.sin_addr.s_addr = a->addr32[0];
2234 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
0a7de745 2235 if (ke && KENTRY_RNF_ROOT(ke)) {
b0d623f7 2236 ke = NULL;
0a7de745 2237 }
b0d623f7
A
2238 break;
2239#endif /* INET */
b0d623f7 2240 case AF_INET6:
0a7de745 2241 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
b0d623f7 2242 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
0a7de745 2243 if (ke && KENTRY_RNF_ROOT(ke)) {
b0d623f7 2244 ke = NULL;
0a7de745 2245 }
b0d623f7 2246 break;
b0d623f7
A
2247 }
2248 match = (ke && !ke->pfrke_not);
0a7de745 2249 if (match) {
b0d623f7 2250 kt->pfrkt_match++;
0a7de745 2251 } else {
b0d623f7 2252 kt->pfrkt_nomatch++;
0a7de745
A
2253 }
2254 return match;
b0d623f7
A
2255}
2256
2257void
2258pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2259 u_int64_t len, int dir_out, int op_pass, int notrule)
2260{
0a7de745 2261 struct pfr_kentry *ke = NULL;
b0d623f7 2262
5ba3f43e 2263 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7 2264
0a7de745 2265 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) {
b0d623f7 2266 kt = kt->pfrkt_root;
0a7de745
A
2267 }
2268 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
b0d623f7 2269 return;
0a7de745 2270 }
b0d623f7
A
2271
2272 switch (af) {
2273#if INET
2274 case AF_INET:
2275 pfr_sin.sin_addr.s_addr = a->addr32[0];
2276 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
0a7de745 2277 if (ke && KENTRY_RNF_ROOT(ke)) {
b0d623f7 2278 ke = NULL;
0a7de745 2279 }
b0d623f7
A
2280 break;
2281#endif /* INET */
b0d623f7 2282 case AF_INET6:
0a7de745 2283 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
b0d623f7 2284 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
0a7de745 2285 if (ke && KENTRY_RNF_ROOT(ke)) {
b0d623f7 2286 ke = NULL;
0a7de745 2287 }
b0d623f7 2288 break;
b0d623f7
A
2289 default:
2290 ;
2291 }
2292 if ((ke == NULL || ke->pfrke_not) != notrule) {
0a7de745 2293 if (op_pass != PFR_OP_PASS) {
b0d623f7 2294 printf("pfr_update_stats: assertion failed.\n");
0a7de745 2295 }
b0d623f7
A
2296 op_pass = PFR_OP_XPASS;
2297 }
2298 kt->pfrkt_packets[dir_out][op_pass]++;
2299 kt->pfrkt_bytes[dir_out][op_pass] += len;
2300 if (ke != NULL && op_pass != PFR_OP_XPASS) {
2301 ke->pfrke_packets[dir_out][op_pass]++;
2302 ke->pfrke_bytes[dir_out][op_pass] += len;
2303 }
2304}
2305
2306struct pfr_ktable *
2307pfr_attach_table(struct pf_ruleset *rs, char *name)
2308{
0a7de745
A
2309 struct pfr_ktable *kt, *rt;
2310 struct pfr_table tbl;
2311 struct pf_anchor *ac = rs->anchor;
b0d623f7 2312
5ba3f43e 2313 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7 2314
0a7de745
A
2315 bzero(&tbl, sizeof(tbl));
2316 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2317 if (ac != NULL) {
2318 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2319 }
b0d623f7
A
2320 kt = pfr_lookup_table(&tbl);
2321 if (kt == NULL) {
d1ecb069 2322 kt = pfr_create_ktable(&tbl, pf_calendar_time_second(), 1);
0a7de745
A
2323 if (kt == NULL) {
2324 return NULL;
2325 }
b0d623f7 2326 if (ac != NULL) {
0a7de745 2327 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
b0d623f7
A
2328 rt = pfr_lookup_table(&tbl);
2329 if (rt == NULL) {
2330 rt = pfr_create_ktable(&tbl, 0, 1);
2331 if (rt == NULL) {
2332 pfr_destroy_ktable(kt, 0);
0a7de745 2333 return NULL;
b0d623f7
A
2334 }
2335 pfr_insert_ktable(rt);
2336 }
2337 kt->pfrkt_root = rt;
2338 }
2339 pfr_insert_ktable(kt);
2340 }
0a7de745
A
2341 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) {
2342 pfr_setflags_ktable(kt, kt->pfrkt_flags | PFR_TFLAG_REFERENCED);
2343 }
2344 return kt;
b0d623f7
A
2345}
2346
2347void
2348pfr_detach_table(struct pfr_ktable *kt)
2349{
5ba3f43e 2350 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7 2351
0a7de745 2352 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0) {
b0d623f7
A
2353 printf("pfr_detach_table: refcount = %d.\n",
2354 kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
0a7de745
A
2355 } else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) {
2356 pfr_setflags_ktable(kt, kt->pfrkt_flags & ~PFR_TFLAG_REFERENCED);
2357 }
b0d623f7
A
2358}
2359
2360int
2361pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2362 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2363{
0a7de745
A
2364 struct pfr_kentry *ke, *ke2;
2365 struct pf_addr *addr;
2366 union sockaddr_union mask;
2367 int idx = -1, use_counter = 0;
b0d623f7 2368
5ba3f43e 2369 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7 2370
0a7de745 2371 if (af == AF_INET) {
b0d623f7 2372 addr = (struct pf_addr *)&pfr_sin.sin_addr;
0a7de745 2373 } else if (af == AF_INET6) {
b0d623f7 2374 addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
0a7de745
A
2375 } else {
2376 return -1;
2377 }
b0d623f7 2378
0a7de745 2379 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) {
b0d623f7 2380 kt = kt->pfrkt_root;
0a7de745
A
2381 }
2382 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
2383 return -1;
2384 }
b0d623f7 2385
0a7de745 2386 if (pidx != NULL) {
b0d623f7 2387 idx = *pidx;
0a7de745
A
2388 }
2389 if (counter != NULL && idx >= 0) {
b0d623f7 2390 use_counter = 1;
0a7de745
A
2391 }
2392 if (idx < 0) {
b0d623f7 2393 idx = 0;
0a7de745 2394 }
b0d623f7
A
2395
2396_next_block:
2397 ke = pfr_kentry_byidx(kt, idx, af);
2398 if (ke == NULL) {
2399 kt->pfrkt_nomatch++;
0a7de745 2400 return 1;
b0d623f7
A
2401 }
2402 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2403 *raddr = SUNION2PF(&ke->pfrke_sa, af);
2404 *rmask = SUNION2PF(&pfr_mask, af);
2405
2406 if (use_counter) {
2407 /* is supplied address within block? */
2408 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2409 /* no, go to next block in table */
2410 idx++;
2411 use_counter = 0;
2412 goto _next_block;
2413 }
2414 PF_ACPY(addr, counter, af);
2415 } else {
2416 /* use first address of block */
2417 PF_ACPY(addr, *raddr, af);
2418 }
2419
2420 if (!KENTRY_NETWORK(ke)) {
2421 /* this is a single IP address - no possible nested block */
2422 PF_ACPY(counter, addr, af);
2423 *pidx = idx;
2424 kt->pfrkt_match++;
0a7de745 2425 return 0;
b0d623f7
A
2426 }
2427 for (;;) {
2428 /* we don't want to use a nested block */
0a7de745 2429 if (af == AF_INET) {
b0d623f7
A
2430 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
2431 kt->pfrkt_ip4);
0a7de745 2432 } else if (af == AF_INET6) {
b0d623f7
A
2433 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
2434 kt->pfrkt_ip6);
0a7de745
A
2435 } else {
2436 return -1; /* never happens */
2437 }
b0d623f7
A
2438 /* no need to check KENTRY_RNF_ROOT() here */
2439 if (ke2 == ke) {
2440 /* lookup return the same block - perfect */
2441 PF_ACPY(counter, addr, af);
2442 *pidx = idx;
2443 kt->pfrkt_match++;
0a7de745 2444 return 0;
b0d623f7
A
2445 }
2446
2447 /* we need to increase the counter past the nested block */
2448 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2449 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2450 PF_AINC(addr, af);
2451 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2452 /* ok, we reached the end of our main block */
2453 /* go to next block in table */
2454 idx++;
2455 use_counter = 0;
2456 goto _next_block;
2457 }
2458 }
2459}
2460
d1ecb069 2461static struct pfr_kentry *
b0d623f7
A
2462pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2463{
0a7de745 2464 struct pfr_walktree w;
b0d623f7 2465
5ba3f43e 2466 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7 2467
0a7de745 2468 bzero(&w, sizeof(w));
b0d623f7
A
2469 w.pfrw_op = PFRW_POOL_GET;
2470 w.pfrw_cnt = idx;
2471
2472 switch (af) {
2473#if INET
2474 case AF_INET:
2475 (void) kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4,
2476 pfr_walktree, &w);
0a7de745 2477 return w.pfrw_kentry;
b0d623f7 2478#endif /* INET */
b0d623f7
A
2479 case AF_INET6:
2480 (void) kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
2481 pfr_walktree, &w);
0a7de745 2482 return w.pfrw_kentry;
b0d623f7 2483 default:
0a7de745 2484 return NULL;
b0d623f7
A
2485 }
2486}
2487
2488void
2489pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2490{
0a7de745 2491 struct pfr_walktree w;
b0d623f7 2492
5ba3f43e 2493 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7 2494
0a7de745 2495 bzero(&w, sizeof(w));
b0d623f7
A
2496 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2497 w.pfrw_dyn = dyn;
2498
2499 dyn->pfid_acnt4 = 0;
2500 dyn->pfid_acnt6 = 0;
0a7de745 2501 if (!dyn->pfid_af || dyn->pfid_af == AF_INET) {
b0d623f7
A
2502 (void) kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4,
2503 pfr_walktree, &w);
0a7de745
A
2504 }
2505 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6) {
b0d623f7
A
2506 (void) kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
2507 pfr_walktree, &w);
0a7de745 2508 }
b0d623f7 2509}