]>
Commit | Line | Data |
---|---|---|
b0d623f7 | 1 | /* |
6d2010ae | 2 | * Copyright (c) 2007-2010 Apple Inc. All rights reserved. |
b0d623f7 A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | /* $apfw: pf_table.c,v 1.4 2008/08/27 00:01:32 jhw Exp $ */ | |
30 | /* $OpenBSD: pf_table.c,v 1.68 2006/05/02 10:08:45 dhartmei Exp $ */ | |
31 | ||
32 | /* | |
33 | * Copyright (c) 2002 Cedric Berger | |
34 | * All rights reserved. | |
35 | * | |
36 | * Redistribution and use in source and binary forms, with or without | |
37 | * modification, are permitted provided that the following conditions | |
38 | * are met: | |
39 | * | |
40 | * - Redistributions of source code must retain the above copyright | |
41 | * notice, this list of conditions and the following disclaimer. | |
42 | * - Redistributions in binary form must reproduce the above | |
43 | * copyright notice, this list of conditions and the following | |
44 | * disclaimer in the documentation and/or other materials provided | |
45 | * with the distribution. | |
46 | * | |
47 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
48 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
49 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
50 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | |
51 | * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | |
52 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | |
53 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
54 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | |
55 | * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
56 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | |
57 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
58 | * POSSIBILITY OF SUCH DAMAGE. | |
59 | * | |
60 | */ | |
61 | ||
62 | #include <sys/param.h> | |
63 | #include <sys/systm.h> | |
64 | #include <sys/socket.h> | |
65 | #include <sys/mbuf.h> | |
66 | #include <sys/kernel.h> | |
67 | #include <sys/malloc.h> | |
68 | ||
69 | #include <net/if.h> | |
70 | #include <net/route.h> | |
71 | #include <netinet/in.h> | |
72 | #include <net/radix.h> | |
73 | #include <net/pfvar.h> | |
74 | ||
75 | #define ACCEPT_FLAGS(flags, oklist) \ | |
76 | do { \ | |
77 | if ((flags & ~(oklist)) & \ | |
78 | PFR_FLAG_ALLMASK) \ | |
79 | return (EINVAL); \ | |
80 | } while (0) | |
81 | ||
82 | #define COPYIN(from, to, size, flags) \ | |
83 | ((flags & PFR_FLAG_USERIOCTL) ? \ | |
d1ecb069 A |
84 | copyin((from), (to), (size)) : \ |
85 | (bcopy((void *)(uintptr_t)(from), (to), (size)), 0)) | |
b0d623f7 A |
86 | |
87 | #define COPYOUT(from, to, size, flags) \ | |
88 | ((flags & PFR_FLAG_USERIOCTL) ? \ | |
d1ecb069 A |
89 | copyout((from), (to), (size)) : \ |
90 | (bcopy((from), (void *)(uintptr_t)(to), (size)), 0)) | |
b0d623f7 A |
91 | |
92 | #define FILLIN_SIN(sin, addr) \ | |
93 | do { \ | |
94 | (sin).sin_len = sizeof (sin); \ | |
95 | (sin).sin_family = AF_INET; \ | |
96 | (sin).sin_addr = (addr); \ | |
97 | } while (0) | |
98 | ||
99 | #define FILLIN_SIN6(sin6, addr) \ | |
100 | do { \ | |
101 | (sin6).sin6_len = sizeof (sin6); \ | |
102 | (sin6).sin6_family = AF_INET6; \ | |
103 | (sin6).sin6_addr = (addr); \ | |
104 | } while (0) | |
105 | ||
106 | #define SWAP(type, a1, a2) \ | |
107 | do { \ | |
108 | type tmp = a1; \ | |
109 | a1 = a2; \ | |
110 | a2 = tmp; \ | |
111 | } while (0) | |
112 | ||
113 | #define SUNION2PF(su, af) (((af) == AF_INET) ? \ | |
114 | (struct pf_addr *)&(su)->sin.sin_addr : \ | |
115 | (struct pf_addr *)&(su)->sin6.sin6_addr) | |
116 | ||
117 | #define AF_BITS(af) (((af) == AF_INET) ? 32 : 128) | |
118 | #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af)) | |
119 | #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af)) | |
120 | #define KENTRY_RNF_ROOT(ke) \ | |
121 | ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0) | |
122 | ||
123 | #define NO_ADDRESSES (-1) | |
124 | #define ENQUEUE_UNMARKED_ONLY (1) | |
125 | #define INVERT_NEG_FLAG (1) | |
126 | ||
127 | struct pfr_walktree { | |
128 | enum pfrw_op { | |
129 | PFRW_MARK, | |
130 | PFRW_SWEEP, | |
131 | PFRW_ENQUEUE, | |
132 | PFRW_GET_ADDRS, | |
133 | PFRW_GET_ASTATS, | |
134 | PFRW_POOL_GET, | |
135 | PFRW_DYNADDR_UPDATE | |
136 | } pfrw_op; | |
137 | union { | |
d1ecb069 A |
138 | user_addr_t pfrw1_addr; |
139 | user_addr_t pfrw1_astats; | |
b0d623f7 A |
140 | struct pfr_kentryworkq *pfrw1_workq; |
141 | struct pfr_kentry *pfrw1_kentry; | |
142 | struct pfi_dynaddr *pfrw1_dyn; | |
143 | } pfrw_1; | |
144 | int pfrw_free; | |
145 | int pfrw_flags; | |
146 | }; | |
147 | #define pfrw_addr pfrw_1.pfrw1_addr | |
148 | #define pfrw_astats pfrw_1.pfrw1_astats | |
149 | #define pfrw_workq pfrw_1.pfrw1_workq | |
150 | #define pfrw_kentry pfrw_1.pfrw1_kentry | |
151 | #define pfrw_dyn pfrw_1.pfrw1_dyn | |
152 | #define pfrw_cnt pfrw_free | |
153 | ||
154 | #define senderr(e) do { rv = (e); goto _bad; } while (0) | |
155 | ||
156 | struct pool pfr_ktable_pl; | |
157 | struct pool pfr_kentry_pl; | |
158 | ||
159 | static struct pool pfr_kentry_pl2; | |
160 | static struct sockaddr_in pfr_sin; | |
161 | static struct sockaddr_in6 pfr_sin6; | |
162 | static union sockaddr_union pfr_mask; | |
163 | static struct pf_addr pfr_ffaddr; | |
164 | ||
165 | static void pfr_copyout_addr(struct pfr_addr *, struct pfr_kentry *ke); | |
166 | static int pfr_validate_addr(struct pfr_addr *); | |
167 | static void pfr_enqueue_addrs(struct pfr_ktable *, struct pfr_kentryworkq *, | |
168 | int *, int); | |
169 | static void pfr_mark_addrs(struct pfr_ktable *); | |
170 | static struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *, | |
171 | struct pfr_addr *, int); | |
172 | static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, int); | |
173 | static void pfr_destroy_kentries(struct pfr_kentryworkq *); | |
174 | static void pfr_destroy_kentry(struct pfr_kentry *); | |
175 | static void pfr_insert_kentries(struct pfr_ktable *, | |
176 | struct pfr_kentryworkq *, u_int64_t); | |
177 | static void pfr_remove_kentries(struct pfr_ktable *, struct pfr_kentryworkq *); | |
178 | static void pfr_clstats_kentries(struct pfr_kentryworkq *, u_int64_t, int); | |
d1ecb069 | 179 | static void pfr_reset_feedback(user_addr_t, int, int); |
b0d623f7 A |
180 | static void pfr_prepare_network(union sockaddr_union *, int, int); |
181 | static int pfr_route_kentry(struct pfr_ktable *, struct pfr_kentry *); | |
182 | static int pfr_unroute_kentry(struct pfr_ktable *, struct pfr_kentry *); | |
183 | static int pfr_walktree(struct radix_node *, void *); | |
184 | static int pfr_validate_table(struct pfr_table *, int, int); | |
185 | static int pfr_fix_anchor(char *); | |
186 | static void pfr_commit_ktable(struct pfr_ktable *, u_int64_t); | |
187 | static void pfr_insert_ktables(struct pfr_ktableworkq *); | |
188 | static void pfr_insert_ktable(struct pfr_ktable *); | |
189 | static void pfr_setflags_ktables(struct pfr_ktableworkq *); | |
190 | static void pfr_setflags_ktable(struct pfr_ktable *, int); | |
191 | static void pfr_clstats_ktables(struct pfr_ktableworkq *, u_int64_t, int); | |
192 | static void pfr_clstats_ktable(struct pfr_ktable *, u_int64_t, int); | |
193 | static struct pfr_ktable *pfr_create_ktable(struct pfr_table *, u_int64_t, int); | |
194 | static void pfr_destroy_ktables(struct pfr_ktableworkq *, int); | |
195 | static void pfr_destroy_ktable(struct pfr_ktable *, int); | |
196 | static int pfr_ktable_compare(struct pfr_ktable *, struct pfr_ktable *); | |
197 | static struct pfr_ktable *pfr_lookup_table(struct pfr_table *); | |
198 | static void pfr_clean_node_mask(struct pfr_ktable *, struct pfr_kentryworkq *); | |
199 | static int pfr_table_count(struct pfr_table *, int); | |
200 | static int pfr_skip_table(struct pfr_table *, struct pfr_ktable *, int); | |
201 | static struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int); | |
202 | ||
203 | RB_PROTOTYPE_SC(static, pfr_ktablehead, pfr_ktable, pfrkt_tree, | |
204 | pfr_ktable_compare); | |
205 | RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); | |
206 | ||
207 | static struct pfr_ktablehead pfr_ktables; | |
208 | static struct pfr_table pfr_nulltable; | |
209 | static int pfr_ktable_cnt; | |
210 | ||
211 | void | |
212 | pfr_initialize(void) | |
213 | { | |
214 | pool_init(&pfr_ktable_pl, sizeof (struct pfr_ktable), 0, 0, 0, | |
215 | "pfrktable", NULL); | |
216 | pool_init(&pfr_kentry_pl, sizeof (struct pfr_kentry), 0, 0, 0, | |
217 | "pfrkentry", NULL); | |
218 | pool_init(&pfr_kentry_pl2, sizeof (struct pfr_kentry), 0, 0, 0, | |
219 | "pfrkentry2", NULL); | |
220 | ||
221 | pfr_sin.sin_len = sizeof (pfr_sin); | |
222 | pfr_sin.sin_family = AF_INET; | |
223 | pfr_sin6.sin6_len = sizeof (pfr_sin6); | |
224 | pfr_sin6.sin6_family = AF_INET6; | |
225 | ||
226 | memset(&pfr_ffaddr, 0xff, sizeof (pfr_ffaddr)); | |
227 | } | |
228 | ||
229 | #if 0 | |
230 | void | |
231 | pfr_destroy(void) | |
232 | { | |
233 | pool_destroy(&pfr_ktable_pl); | |
234 | pool_destroy(&pfr_kentry_pl); | |
235 | pool_destroy(&pfr_kentry_pl2); | |
236 | } | |
237 | #endif | |
238 | ||
239 | int | |
240 | pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) | |
241 | { | |
242 | struct pfr_ktable *kt; | |
243 | struct pfr_kentryworkq workq; | |
244 | ||
245 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY); | |
246 | if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) | |
247 | return (EINVAL); | |
248 | kt = pfr_lookup_table(tbl); | |
249 | if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
250 | return (ESRCH); | |
251 | if (kt->pfrkt_flags & PFR_TFLAG_CONST) | |
252 | return (EPERM); | |
253 | pfr_enqueue_addrs(kt, &workq, ndel, 0); | |
254 | ||
255 | if (!(flags & PFR_FLAG_DUMMY)) { | |
256 | pfr_remove_kentries(kt, &workq); | |
257 | if (kt->pfrkt_cnt) { | |
258 | printf("pfr_clr_addrs: corruption detected (%d).\n", | |
259 | kt->pfrkt_cnt); | |
260 | kt->pfrkt_cnt = 0; | |
261 | } | |
262 | } | |
263 | return (0); | |
264 | } | |
265 | ||
266 | int | |
d1ecb069 | 267 | pfr_add_addrs(struct pfr_table *tbl, user_addr_t _addr, int size, |
b0d623f7 A |
268 | int *nadd, int flags) |
269 | { | |
270 | struct pfr_ktable *kt, *tmpkt; | |
271 | struct pfr_kentryworkq workq; | |
272 | struct pfr_kentry *p, *q; | |
273 | struct pfr_addr ad; | |
274 | int i, rv, xadd = 0; | |
d1ecb069 A |
275 | user_addr_t addr = _addr; |
276 | u_int64_t tzero = pf_calendar_time_second(); | |
b0d623f7 A |
277 | |
278 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY | | |
279 | PFR_FLAG_FEEDBACK); | |
280 | if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) | |
281 | return (EINVAL); | |
282 | kt = pfr_lookup_table(tbl); | |
283 | if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
284 | return (ESRCH); | |
285 | if (kt->pfrkt_flags & PFR_TFLAG_CONST) | |
286 | return (EPERM); | |
287 | tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0); | |
288 | if (tmpkt == NULL) | |
289 | return (ENOMEM); | |
290 | SLIST_INIT(&workq); | |
d1ecb069 A |
291 | for (i = 0; i < size; i++, addr += sizeof (ad)) { |
292 | if (COPYIN(addr, &ad, sizeof (ad), flags)) | |
b0d623f7 A |
293 | senderr(EFAULT); |
294 | if (pfr_validate_addr(&ad)) | |
295 | senderr(EINVAL); | |
296 | p = pfr_lookup_addr(kt, &ad, 1); | |
297 | q = pfr_lookup_addr(tmpkt, &ad, 1); | |
298 | if (flags & PFR_FLAG_FEEDBACK) { | |
299 | if (q != NULL) | |
300 | ad.pfra_fback = PFR_FB_DUPLICATE; | |
301 | else if (p == NULL) | |
302 | ad.pfra_fback = PFR_FB_ADDED; | |
303 | else if (p->pfrke_not != ad.pfra_not) | |
304 | ad.pfra_fback = PFR_FB_CONFLICT; | |
305 | else | |
306 | ad.pfra_fback = PFR_FB_NONE; | |
307 | } | |
308 | if (p == NULL && q == NULL) { | |
309 | p = pfr_create_kentry(&ad, | |
310 | !(flags & PFR_FLAG_USERIOCTL)); | |
311 | if (p == NULL) | |
312 | senderr(ENOMEM); | |
313 | if (pfr_route_kentry(tmpkt, p)) { | |
314 | pfr_destroy_kentry(p); | |
315 | ad.pfra_fback = PFR_FB_NONE; | |
316 | } else { | |
317 | SLIST_INSERT_HEAD(&workq, p, pfrke_workq); | |
318 | xadd++; | |
319 | } | |
320 | } | |
321 | if (flags & PFR_FLAG_FEEDBACK) | |
d1ecb069 | 322 | if (COPYOUT(&ad, addr, sizeof (ad), flags)) |
b0d623f7 A |
323 | senderr(EFAULT); |
324 | } | |
325 | pfr_clean_node_mask(tmpkt, &workq); | |
326 | if (!(flags & PFR_FLAG_DUMMY)) { | |
327 | pfr_insert_kentries(kt, &workq, tzero); | |
328 | } else | |
329 | pfr_destroy_kentries(&workq); | |
330 | if (nadd != NULL) | |
331 | *nadd = xadd; | |
332 | pfr_destroy_ktable(tmpkt, 0); | |
333 | return (0); | |
334 | _bad: | |
335 | pfr_clean_node_mask(tmpkt, &workq); | |
336 | pfr_destroy_kentries(&workq); | |
337 | if (flags & PFR_FLAG_FEEDBACK) | |
d1ecb069 | 338 | pfr_reset_feedback(_addr, size, flags); |
b0d623f7 A |
339 | pfr_destroy_ktable(tmpkt, 0); |
340 | return (rv); | |
341 | } | |
342 | ||
343 | int | |
d1ecb069 | 344 | pfr_del_addrs(struct pfr_table *tbl, user_addr_t _addr, int size, |
b0d623f7 A |
345 | int *ndel, int flags) |
346 | { | |
347 | struct pfr_ktable *kt; | |
348 | struct pfr_kentryworkq workq; | |
349 | struct pfr_kentry *p; | |
350 | struct pfr_addr ad; | |
d1ecb069 | 351 | user_addr_t addr = _addr; |
b0d623f7 A |
352 | int i, rv, xdel = 0, log = 1; |
353 | ||
354 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY | | |
355 | PFR_FLAG_FEEDBACK); | |
356 | if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) | |
357 | return (EINVAL); | |
358 | kt = pfr_lookup_table(tbl); | |
359 | if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
360 | return (ESRCH); | |
361 | if (kt->pfrkt_flags & PFR_TFLAG_CONST) | |
362 | return (EPERM); | |
363 | /* | |
364 | * there are two algorithms to choose from here. | |
365 | * with: | |
366 | * n: number of addresses to delete | |
367 | * N: number of addresses in the table | |
368 | * | |
369 | * one is O(N) and is better for large 'n' | |
370 | * one is O(n*LOG(N)) and is better for small 'n' | |
371 | * | |
372 | * following code try to decide which one is best. | |
373 | */ | |
374 | for (i = kt->pfrkt_cnt; i > 0; i >>= 1) | |
375 | log++; | |
376 | if (size > kt->pfrkt_cnt/log) { | |
377 | /* full table scan */ | |
378 | pfr_mark_addrs(kt); | |
379 | } else { | |
380 | /* iterate over addresses to delete */ | |
d1ecb069 A |
381 | for (i = 0; i < size; i++, addr += sizeof (ad)) { |
382 | if (COPYIN(addr, &ad, sizeof (ad), flags)) | |
b0d623f7 A |
383 | return (EFAULT); |
384 | if (pfr_validate_addr(&ad)) | |
385 | return (EINVAL); | |
386 | p = pfr_lookup_addr(kt, &ad, 1); | |
387 | if (p != NULL) | |
388 | p->pfrke_mark = 0; | |
389 | } | |
390 | } | |
391 | SLIST_INIT(&workq); | |
d1ecb069 A |
392 | for (addr = _addr, i = 0; i < size; i++, addr += sizeof (ad)) { |
393 | if (COPYIN(addr, &ad, sizeof (ad), flags)) | |
b0d623f7 A |
394 | senderr(EFAULT); |
395 | if (pfr_validate_addr(&ad)) | |
396 | senderr(EINVAL); | |
397 | p = pfr_lookup_addr(kt, &ad, 1); | |
398 | if (flags & PFR_FLAG_FEEDBACK) { | |
399 | if (p == NULL) | |
400 | ad.pfra_fback = PFR_FB_NONE; | |
401 | else if (p->pfrke_not != ad.pfra_not) | |
402 | ad.pfra_fback = PFR_FB_CONFLICT; | |
403 | else if (p->pfrke_mark) | |
404 | ad.pfra_fback = PFR_FB_DUPLICATE; | |
405 | else | |
406 | ad.pfra_fback = PFR_FB_DELETED; | |
407 | } | |
408 | if (p != NULL && p->pfrke_not == ad.pfra_not && | |
409 | !p->pfrke_mark) { | |
410 | p->pfrke_mark = 1; | |
411 | SLIST_INSERT_HEAD(&workq, p, pfrke_workq); | |
412 | xdel++; | |
413 | } | |
414 | if (flags & PFR_FLAG_FEEDBACK) | |
d1ecb069 | 415 | if (COPYOUT(&ad, addr, sizeof (ad), flags)) |
b0d623f7 A |
416 | senderr(EFAULT); |
417 | } | |
418 | if (!(flags & PFR_FLAG_DUMMY)) { | |
419 | pfr_remove_kentries(kt, &workq); | |
420 | } | |
421 | if (ndel != NULL) | |
422 | *ndel = xdel; | |
423 | return (0); | |
424 | _bad: | |
425 | if (flags & PFR_FLAG_FEEDBACK) | |
d1ecb069 | 426 | pfr_reset_feedback(_addr, size, flags); |
b0d623f7 A |
427 | return (rv); |
428 | } | |
429 | ||
430 | int | |
d1ecb069 | 431 | pfr_set_addrs(struct pfr_table *tbl, user_addr_t _addr, int size, |
b0d623f7 A |
432 | int *size2, int *nadd, int *ndel, int *nchange, int flags, |
433 | u_int32_t ignore_pfrt_flags) | |
434 | { | |
435 | struct pfr_ktable *kt, *tmpkt; | |
436 | struct pfr_kentryworkq addq, delq, changeq; | |
437 | struct pfr_kentry *p, *q; | |
438 | struct pfr_addr ad; | |
d1ecb069 | 439 | user_addr_t addr = _addr; |
b0d623f7 | 440 | int i, rv, xadd = 0, xdel = 0, xchange = 0; |
d1ecb069 | 441 | u_int64_t tzero = pf_calendar_time_second(); |
b0d623f7 A |
442 | |
443 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY | | |
444 | PFR_FLAG_FEEDBACK); | |
445 | if (pfr_validate_table(tbl, ignore_pfrt_flags, flags & | |
446 | PFR_FLAG_USERIOCTL)) | |
447 | return (EINVAL); | |
448 | kt = pfr_lookup_table(tbl); | |
449 | if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
450 | return (ESRCH); | |
451 | if (kt->pfrkt_flags & PFR_TFLAG_CONST) | |
452 | return (EPERM); | |
453 | tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0); | |
454 | if (tmpkt == NULL) | |
455 | return (ENOMEM); | |
456 | pfr_mark_addrs(kt); | |
457 | SLIST_INIT(&addq); | |
458 | SLIST_INIT(&delq); | |
459 | SLIST_INIT(&changeq); | |
d1ecb069 A |
460 | for (i = 0; i < size; i++, addr += sizeof (ad)) { |
461 | if (COPYIN(addr, &ad, sizeof (ad), flags)) | |
b0d623f7 A |
462 | senderr(EFAULT); |
463 | if (pfr_validate_addr(&ad)) | |
464 | senderr(EINVAL); | |
465 | ad.pfra_fback = PFR_FB_NONE; | |
466 | p = pfr_lookup_addr(kt, &ad, 1); | |
467 | if (p != NULL) { | |
468 | if (p->pfrke_mark) { | |
469 | ad.pfra_fback = PFR_FB_DUPLICATE; | |
470 | goto _skip; | |
471 | } | |
472 | p->pfrke_mark = 1; | |
473 | if (p->pfrke_not != ad.pfra_not) { | |
474 | SLIST_INSERT_HEAD(&changeq, p, pfrke_workq); | |
475 | ad.pfra_fback = PFR_FB_CHANGED; | |
476 | xchange++; | |
477 | } | |
478 | } else { | |
479 | q = pfr_lookup_addr(tmpkt, &ad, 1); | |
480 | if (q != NULL) { | |
481 | ad.pfra_fback = PFR_FB_DUPLICATE; | |
482 | goto _skip; | |
483 | } | |
484 | p = pfr_create_kentry(&ad, | |
485 | !(flags & PFR_FLAG_USERIOCTL)); | |
486 | if (p == NULL) | |
487 | senderr(ENOMEM); | |
488 | if (pfr_route_kentry(tmpkt, p)) { | |
489 | pfr_destroy_kentry(p); | |
490 | ad.pfra_fback = PFR_FB_NONE; | |
491 | } else { | |
492 | SLIST_INSERT_HEAD(&addq, p, pfrke_workq); | |
493 | ad.pfra_fback = PFR_FB_ADDED; | |
494 | xadd++; | |
495 | } | |
496 | } | |
497 | _skip: | |
498 | if (flags & PFR_FLAG_FEEDBACK) | |
d1ecb069 | 499 | if (COPYOUT(&ad, addr, sizeof (ad), flags)) |
b0d623f7 A |
500 | senderr(EFAULT); |
501 | } | |
502 | pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY); | |
503 | if ((flags & PFR_FLAG_FEEDBACK) && *size2) { | |
504 | if (*size2 < size+xdel) { | |
505 | *size2 = size+xdel; | |
506 | senderr(0); | |
507 | } | |
508 | i = 0; | |
d1ecb069 | 509 | addr = _addr + size; |
b0d623f7 A |
510 | SLIST_FOREACH(p, &delq, pfrke_workq) { |
511 | pfr_copyout_addr(&ad, p); | |
512 | ad.pfra_fback = PFR_FB_DELETED; | |
d1ecb069 | 513 | if (COPYOUT(&ad, addr, sizeof (ad), flags)) |
b0d623f7 | 514 | senderr(EFAULT); |
d1ecb069 | 515 | addr += sizeof (ad); |
b0d623f7 A |
516 | i++; |
517 | } | |
518 | } | |
519 | pfr_clean_node_mask(tmpkt, &addq); | |
520 | if (!(flags & PFR_FLAG_DUMMY)) { | |
521 | pfr_insert_kentries(kt, &addq, tzero); | |
522 | pfr_remove_kentries(kt, &delq); | |
523 | pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); | |
524 | } else | |
525 | pfr_destroy_kentries(&addq); | |
526 | if (nadd != NULL) | |
527 | *nadd = xadd; | |
528 | if (ndel != NULL) | |
529 | *ndel = xdel; | |
530 | if (nchange != NULL) | |
531 | *nchange = xchange; | |
532 | if ((flags & PFR_FLAG_FEEDBACK) && size2) | |
533 | *size2 = size+xdel; | |
534 | pfr_destroy_ktable(tmpkt, 0); | |
535 | return (0); | |
536 | _bad: | |
537 | pfr_clean_node_mask(tmpkt, &addq); | |
538 | pfr_destroy_kentries(&addq); | |
539 | if (flags & PFR_FLAG_FEEDBACK) | |
d1ecb069 | 540 | pfr_reset_feedback(_addr, size, flags); |
b0d623f7 A |
541 | pfr_destroy_ktable(tmpkt, 0); |
542 | return (rv); | |
543 | } | |
544 | ||
545 | int | |
d1ecb069 | 546 | pfr_tst_addrs(struct pfr_table *tbl, user_addr_t addr, int size, |
b0d623f7 A |
547 | int *nmatch, int flags) |
548 | { | |
549 | struct pfr_ktable *kt; | |
550 | struct pfr_kentry *p; | |
551 | struct pfr_addr ad; | |
552 | int i, xmatch = 0; | |
553 | ||
554 | ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE); | |
555 | if (pfr_validate_table(tbl, 0, 0)) | |
556 | return (EINVAL); | |
557 | kt = pfr_lookup_table(tbl); | |
558 | if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
559 | return (ESRCH); | |
560 | ||
d1ecb069 A |
561 | for (i = 0; i < size; i++, addr += sizeof (ad)) { |
562 | if (COPYIN(addr, &ad, sizeof (ad), flags)) | |
b0d623f7 A |
563 | return (EFAULT); |
564 | if (pfr_validate_addr(&ad)) | |
565 | return (EINVAL); | |
566 | if (ADDR_NETWORK(&ad)) | |
567 | return (EINVAL); | |
568 | p = pfr_lookup_addr(kt, &ad, 0); | |
569 | if (flags & PFR_FLAG_REPLACE) | |
570 | pfr_copyout_addr(&ad, p); | |
571 | ad.pfra_fback = (p == NULL) ? PFR_FB_NONE : | |
572 | (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH); | |
573 | if (p != NULL && !p->pfrke_not) | |
574 | xmatch++; | |
d1ecb069 | 575 | if (COPYOUT(&ad, addr, sizeof (ad), flags)) |
b0d623f7 A |
576 | return (EFAULT); |
577 | } | |
578 | if (nmatch != NULL) | |
579 | *nmatch = xmatch; | |
580 | return (0); | |
581 | } | |
582 | ||
583 | int | |
d1ecb069 | 584 | pfr_get_addrs(struct pfr_table *tbl, user_addr_t addr, int *size, |
b0d623f7 A |
585 | int flags) |
586 | { | |
587 | struct pfr_ktable *kt; | |
588 | struct pfr_walktree w; | |
589 | int rv; | |
590 | ||
591 | ACCEPT_FLAGS(flags, 0); | |
592 | if (pfr_validate_table(tbl, 0, 0)) | |
593 | return (EINVAL); | |
594 | kt = pfr_lookup_table(tbl); | |
595 | if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
596 | return (ESRCH); | |
597 | if (kt->pfrkt_cnt > *size) { | |
598 | *size = kt->pfrkt_cnt; | |
599 | return (0); | |
600 | } | |
601 | ||
602 | bzero(&w, sizeof (w)); | |
603 | w.pfrw_op = PFRW_GET_ADDRS; | |
604 | w.pfrw_addr = addr; | |
605 | w.pfrw_free = kt->pfrkt_cnt; | |
606 | w.pfrw_flags = flags; | |
607 | rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w); | |
608 | if (!rv) | |
609 | rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, | |
610 | pfr_walktree, &w); | |
611 | if (rv) | |
612 | return (rv); | |
613 | ||
614 | if (w.pfrw_free) { | |
615 | printf("pfr_get_addrs: corruption detected (%d).\n", | |
616 | w.pfrw_free); | |
617 | return (ENOTTY); | |
618 | } | |
619 | *size = kt->pfrkt_cnt; | |
620 | return (0); | |
621 | } | |
622 | ||
623 | int | |
d1ecb069 | 624 | pfr_get_astats(struct pfr_table *tbl, user_addr_t addr, int *size, |
b0d623f7 A |
625 | int flags) |
626 | { | |
627 | struct pfr_ktable *kt; | |
628 | struct pfr_walktree w; | |
629 | struct pfr_kentryworkq workq; | |
630 | int rv; | |
d1ecb069 | 631 | u_int64_t tzero = pf_calendar_time_second(); |
b0d623f7 A |
632 | |
633 | /* XXX PFR_FLAG_CLSTATS disabled */ | |
634 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC); | |
635 | if (pfr_validate_table(tbl, 0, 0)) | |
636 | return (EINVAL); | |
637 | kt = pfr_lookup_table(tbl); | |
638 | if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
639 | return (ESRCH); | |
640 | if (kt->pfrkt_cnt > *size) { | |
641 | *size = kt->pfrkt_cnt; | |
642 | return (0); | |
643 | } | |
644 | ||
645 | bzero(&w, sizeof (w)); | |
646 | w.pfrw_op = PFRW_GET_ASTATS; | |
647 | w.pfrw_astats = addr; | |
648 | w.pfrw_free = kt->pfrkt_cnt; | |
649 | w.pfrw_flags = flags; | |
650 | rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w); | |
651 | if (!rv) | |
652 | rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, | |
653 | pfr_walktree, &w); | |
654 | if (!rv && (flags & PFR_FLAG_CLSTATS)) { | |
655 | pfr_enqueue_addrs(kt, &workq, NULL, 0); | |
656 | pfr_clstats_kentries(&workq, tzero, 0); | |
657 | } | |
658 | if (rv) | |
659 | return (rv); | |
660 | ||
661 | if (w.pfrw_free) { | |
662 | printf("pfr_get_astats: corruption detected (%d).\n", | |
663 | w.pfrw_free); | |
664 | return (ENOTTY); | |
665 | } | |
666 | *size = kt->pfrkt_cnt; | |
667 | return (0); | |
668 | } | |
669 | ||
670 | int | |
d1ecb069 | 671 | pfr_clr_astats(struct pfr_table *tbl, user_addr_t _addr, int size, |
b0d623f7 A |
672 | int *nzero, int flags) |
673 | { | |
674 | struct pfr_ktable *kt; | |
675 | struct pfr_kentryworkq workq; | |
676 | struct pfr_kentry *p; | |
677 | struct pfr_addr ad; | |
d1ecb069 | 678 | user_addr_t addr = _addr; |
b0d623f7 A |
679 | int i, rv, xzero = 0; |
680 | ||
681 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY | | |
682 | PFR_FLAG_FEEDBACK); | |
683 | if (pfr_validate_table(tbl, 0, 0)) | |
684 | return (EINVAL); | |
685 | kt = pfr_lookup_table(tbl); | |
686 | if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
687 | return (ESRCH); | |
688 | SLIST_INIT(&workq); | |
d1ecb069 A |
689 | for (i = 0; i < size; i++, addr += sizeof (ad)) { |
690 | if (COPYIN(addr, &ad, sizeof (ad), flags)) | |
b0d623f7 A |
691 | senderr(EFAULT); |
692 | if (pfr_validate_addr(&ad)) | |
693 | senderr(EINVAL); | |
694 | p = pfr_lookup_addr(kt, &ad, 1); | |
695 | if (flags & PFR_FLAG_FEEDBACK) { | |
696 | ad.pfra_fback = (p != NULL) ? | |
697 | PFR_FB_CLEARED : PFR_FB_NONE; | |
d1ecb069 | 698 | if (COPYOUT(&ad, addr, sizeof (ad), flags)) |
b0d623f7 A |
699 | senderr(EFAULT); |
700 | } | |
701 | if (p != NULL) { | |
702 | SLIST_INSERT_HEAD(&workq, p, pfrke_workq); | |
703 | xzero++; | |
704 | } | |
705 | } | |
706 | ||
707 | if (!(flags & PFR_FLAG_DUMMY)) { | |
708 | pfr_clstats_kentries(&workq, 0, 0); | |
709 | } | |
710 | if (nzero != NULL) | |
711 | *nzero = xzero; | |
712 | return (0); | |
713 | _bad: | |
714 | if (flags & PFR_FLAG_FEEDBACK) | |
d1ecb069 | 715 | pfr_reset_feedback(_addr, size, flags); |
b0d623f7 A |
716 | return (rv); |
717 | } | |
718 | ||
d1ecb069 | 719 | static int |
b0d623f7 A |
720 | pfr_validate_addr(struct pfr_addr *ad) |
721 | { | |
722 | int i; | |
723 | ||
724 | switch (ad->pfra_af) { | |
725 | #if INET | |
726 | case AF_INET: | |
727 | if (ad->pfra_net > 32) | |
728 | return (-1); | |
729 | break; | |
730 | #endif /* INET */ | |
731 | #if INET6 | |
732 | case AF_INET6: | |
733 | if (ad->pfra_net > 128) | |
734 | return (-1); | |
735 | break; | |
736 | #endif /* INET6 */ | |
737 | default: | |
738 | return (-1); | |
739 | } | |
740 | if (ad->pfra_net < 128 && | |
741 | (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8)))) | |
742 | return (-1); | |
743 | for (i = (ad->pfra_net+7)/8; i < (int)sizeof (ad->pfra_u); i++) | |
744 | if (((caddr_t)ad)[i]) | |
745 | return (-1); | |
746 | if (ad->pfra_not && ad->pfra_not != 1) | |
747 | return (-1); | |
748 | if (ad->pfra_fback) | |
749 | return (-1); | |
750 | return (0); | |
751 | } | |
752 | ||
d1ecb069 | 753 | static void |
b0d623f7 A |
754 | pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, |
755 | int *naddr, int sweep) | |
756 | { | |
757 | struct pfr_walktree w; | |
758 | ||
759 | SLIST_INIT(workq); | |
760 | bzero(&w, sizeof (w)); | |
761 | w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE; | |
762 | w.pfrw_workq = workq; | |
763 | if (kt->pfrkt_ip4 != NULL) | |
764 | if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, | |
765 | pfr_walktree, &w)) | |
766 | printf("pfr_enqueue_addrs: IPv4 walktree failed.\n"); | |
767 | if (kt->pfrkt_ip6 != NULL) | |
768 | if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, | |
769 | pfr_walktree, &w)) | |
770 | printf("pfr_enqueue_addrs: IPv6 walktree failed.\n"); | |
771 | if (naddr != NULL) | |
772 | *naddr = w.pfrw_cnt; | |
773 | } | |
774 | ||
d1ecb069 | 775 | static void |
b0d623f7 A |
776 | pfr_mark_addrs(struct pfr_ktable *kt) |
777 | { | |
778 | struct pfr_walktree w; | |
779 | ||
780 | bzero(&w, sizeof (w)); | |
781 | w.pfrw_op = PFRW_MARK; | |
782 | if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) | |
783 | printf("pfr_mark_addrs: IPv4 walktree failed.\n"); | |
784 | if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) | |
785 | printf("pfr_mark_addrs: IPv6 walktree failed.\n"); | |
786 | } | |
787 | ||
788 | ||
d1ecb069 | 789 | static struct pfr_kentry * |
b0d623f7 A |
790 | pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact) |
791 | { | |
792 | union sockaddr_union sa, mask; | |
793 | struct radix_node_head *head; | |
794 | struct pfr_kentry *ke; | |
795 | ||
5ba3f43e | 796 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
797 | |
798 | bzero(&sa, sizeof (sa)); | |
799 | if (ad->pfra_af == AF_INET) { | |
800 | FILLIN_SIN(sa.sin, ad->pfra_ip4addr); | |
801 | head = kt->pfrkt_ip4; | |
802 | } else if (ad->pfra_af == AF_INET6) { | |
803 | FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr); | |
804 | head = kt->pfrkt_ip6; | |
805 | } | |
806 | else | |
807 | return NULL; | |
808 | if (ADDR_NETWORK(ad)) { | |
809 | pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net); | |
810 | ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head); | |
811 | if (ke && KENTRY_RNF_ROOT(ke)) | |
812 | ke = NULL; | |
813 | } else { | |
814 | ke = (struct pfr_kentry *)rn_match(&sa, head); | |
815 | if (ke && KENTRY_RNF_ROOT(ke)) | |
816 | ke = NULL; | |
817 | if (exact && ke && KENTRY_NETWORK(ke)) | |
818 | ke = NULL; | |
819 | } | |
820 | return (ke); | |
821 | } | |
822 | ||
d1ecb069 | 823 | static struct pfr_kentry * |
b0d623f7 A |
824 | pfr_create_kentry(struct pfr_addr *ad, int intr) |
825 | { | |
826 | struct pfr_kentry *ke; | |
827 | ||
828 | if (intr) | |
829 | ke = pool_get(&pfr_kentry_pl2, PR_WAITOK); | |
830 | else | |
831 | ke = pool_get(&pfr_kentry_pl, PR_WAITOK); | |
832 | if (ke == NULL) | |
833 | return (NULL); | |
834 | bzero(ke, sizeof (*ke)); | |
835 | ||
836 | if (ad->pfra_af == AF_INET) | |
837 | FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); | |
838 | else if (ad->pfra_af == AF_INET6) | |
839 | FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); | |
840 | ke->pfrke_af = ad->pfra_af; | |
841 | ke->pfrke_net = ad->pfra_net; | |
842 | ke->pfrke_not = ad->pfra_not; | |
843 | ke->pfrke_intrpool = intr; | |
844 | return (ke); | |
845 | } | |
846 | ||
d1ecb069 | 847 | static void |
b0d623f7 A |
848 | pfr_destroy_kentries(struct pfr_kentryworkq *workq) |
849 | { | |
850 | struct pfr_kentry *p, *q; | |
851 | ||
852 | for (p = SLIST_FIRST(workq); p != NULL; p = q) { | |
853 | q = SLIST_NEXT(p, pfrke_workq); | |
854 | pfr_destroy_kentry(p); | |
855 | } | |
856 | } | |
857 | ||
d1ecb069 | 858 | static void |
b0d623f7 A |
859 | pfr_destroy_kentry(struct pfr_kentry *ke) |
860 | { | |
861 | if (ke->pfrke_intrpool) | |
862 | pool_put(&pfr_kentry_pl2, ke); | |
863 | else | |
864 | pool_put(&pfr_kentry_pl, ke); | |
865 | } | |
866 | ||
d1ecb069 | 867 | static void |
b0d623f7 A |
868 | pfr_insert_kentries(struct pfr_ktable *kt, |
869 | struct pfr_kentryworkq *workq, u_int64_t tzero) | |
870 | { | |
871 | struct pfr_kentry *p; | |
872 | int rv, n = 0; | |
873 | ||
874 | SLIST_FOREACH(p, workq, pfrke_workq) { | |
875 | rv = pfr_route_kentry(kt, p); | |
876 | if (rv) { | |
877 | printf("pfr_insert_kentries: cannot route entry " | |
878 | "(code=%d).\n", rv); | |
879 | break; | |
880 | } | |
881 | p->pfrke_tzero = tzero; | |
882 | n++; | |
883 | } | |
884 | kt->pfrkt_cnt += n; | |
885 | } | |
886 | ||
887 | int | |
888 | pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, u_int64_t tzero) | |
889 | { | |
890 | struct pfr_kentry *p; | |
891 | int rv; | |
892 | ||
893 | p = pfr_lookup_addr(kt, ad, 1); | |
894 | if (p != NULL) | |
895 | return (0); | |
896 | p = pfr_create_kentry(ad, 1); | |
897 | if (p == NULL) | |
898 | return (EINVAL); | |
899 | ||
900 | rv = pfr_route_kentry(kt, p); | |
901 | if (rv) | |
902 | return (rv); | |
903 | ||
904 | p->pfrke_tzero = tzero; | |
905 | kt->pfrkt_cnt++; | |
906 | ||
907 | return (0); | |
908 | } | |
909 | ||
d1ecb069 | 910 | static void |
b0d623f7 A |
911 | pfr_remove_kentries(struct pfr_ktable *kt, |
912 | struct pfr_kentryworkq *workq) | |
913 | { | |
914 | struct pfr_kentry *p; | |
915 | int n = 0; | |
916 | ||
917 | SLIST_FOREACH(p, workq, pfrke_workq) { | |
918 | pfr_unroute_kentry(kt, p); | |
919 | n++; | |
920 | } | |
921 | kt->pfrkt_cnt -= n; | |
922 | pfr_destroy_kentries(workq); | |
923 | } | |
924 | ||
d1ecb069 | 925 | static void |
b0d623f7 A |
926 | pfr_clean_node_mask(struct pfr_ktable *kt, |
927 | struct pfr_kentryworkq *workq) | |
928 | { | |
929 | struct pfr_kentry *p; | |
930 | ||
931 | SLIST_FOREACH(p, workq, pfrke_workq) | |
932 | pfr_unroute_kentry(kt, p); | |
933 | } | |
934 | ||
d1ecb069 | 935 | static void |
b0d623f7 A |
936 | pfr_clstats_kentries(struct pfr_kentryworkq *workq, u_int64_t tzero, |
937 | int negchange) | |
938 | { | |
939 | struct pfr_kentry *p; | |
940 | ||
5ba3f43e | 941 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
942 | |
943 | SLIST_FOREACH(p, workq, pfrke_workq) { | |
944 | if (negchange) | |
945 | p->pfrke_not = !p->pfrke_not; | |
946 | bzero(p->pfrke_packets, sizeof (p->pfrke_packets)); | |
947 | bzero(p->pfrke_bytes, sizeof (p->pfrke_bytes)); | |
948 | p->pfrke_tzero = tzero; | |
949 | } | |
950 | } | |
951 | ||
d1ecb069 A |
952 | static void |
953 | pfr_reset_feedback(user_addr_t addr, int size, int flags) | |
b0d623f7 A |
954 | { |
955 | struct pfr_addr ad; | |
956 | int i; | |
957 | ||
d1ecb069 A |
958 | for (i = 0; i < size; i++, addr += sizeof (ad)) { |
959 | if (COPYIN(addr, &ad, sizeof (ad), flags)) | |
b0d623f7 A |
960 | break; |
961 | ad.pfra_fback = PFR_FB_NONE; | |
d1ecb069 | 962 | if (COPYOUT(&ad, addr, sizeof (ad), flags)) |
b0d623f7 A |
963 | break; |
964 | } | |
965 | } | |
966 | ||
d1ecb069 | 967 | static void |
b0d623f7 A |
968 | pfr_prepare_network(union sockaddr_union *sa, int af, int net) |
969 | { | |
970 | int i; | |
971 | ||
972 | bzero(sa, sizeof (*sa)); | |
973 | if (af == AF_INET) { | |
974 | sa->sin.sin_len = sizeof (sa->sin); | |
975 | sa->sin.sin_family = AF_INET; | |
976 | sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0; | |
977 | } else if (af == AF_INET6) { | |
978 | sa->sin6.sin6_len = sizeof (sa->sin6); | |
979 | sa->sin6.sin6_family = AF_INET6; | |
980 | for (i = 0; i < 4; i++) { | |
981 | if (net <= 32) { | |
982 | sa->sin6.sin6_addr.s6_addr32[i] = | |
983 | net ? htonl(-1 << (32-net)) : 0; | |
984 | break; | |
985 | } | |
986 | sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF; | |
987 | net -= 32; | |
988 | } | |
989 | } | |
990 | } | |
991 | ||
d1ecb069 | 992 | static int |
b0d623f7 A |
993 | pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) |
994 | { | |
995 | union sockaddr_union mask; | |
996 | struct radix_node *rn; | |
997 | struct radix_node_head *head; | |
998 | ||
5ba3f43e | 999 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
1000 | |
1001 | bzero(ke->pfrke_node, sizeof (ke->pfrke_node)); | |
1002 | if (ke->pfrke_af == AF_INET) | |
1003 | head = kt->pfrkt_ip4; | |
1004 | else if (ke->pfrke_af == AF_INET6) | |
1005 | head = kt->pfrkt_ip6; | |
1006 | else | |
1007 | return (-1); | |
1008 | ||
1009 | if (KENTRY_NETWORK(ke)) { | |
1010 | pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); | |
1011 | rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node); | |
1012 | } else | |
1013 | rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node); | |
1014 | ||
1015 | return (rn == NULL ? -1 : 0); | |
1016 | } | |
1017 | ||
d1ecb069 | 1018 | static int |
b0d623f7 A |
1019 | pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) |
1020 | { | |
1021 | union sockaddr_union mask; | |
1022 | struct radix_node *rn; | |
1023 | struct radix_node_head *head; | |
1024 | ||
5ba3f43e | 1025 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
1026 | |
1027 | if (ke->pfrke_af == AF_INET) | |
1028 | head = kt->pfrkt_ip4; | |
1029 | else if (ke->pfrke_af == AF_INET6) | |
1030 | head = kt->pfrkt_ip6; | |
1031 | else | |
1032 | return (-1); | |
1033 | ||
1034 | if (KENTRY_NETWORK(ke)) { | |
1035 | pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); | |
1036 | rn = rn_delete(&ke->pfrke_sa, &mask, head); | |
1037 | } else | |
1038 | rn = rn_delete(&ke->pfrke_sa, NULL, head); | |
1039 | ||
1040 | if (rn == NULL) { | |
1041 | printf("pfr_unroute_kentry: delete failed.\n"); | |
1042 | return (-1); | |
1043 | } | |
1044 | return (0); | |
1045 | } | |
1046 | ||
d1ecb069 | 1047 | static void |
b0d623f7 A |
1048 | pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke) |
1049 | { | |
1050 | bzero(ad, sizeof (*ad)); | |
1051 | if (ke == NULL) | |
1052 | return; | |
1053 | ad->pfra_af = ke->pfrke_af; | |
1054 | ad->pfra_net = ke->pfrke_net; | |
1055 | ad->pfra_not = ke->pfrke_not; | |
1056 | if (ad->pfra_af == AF_INET) | |
1057 | ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; | |
1058 | else if (ad->pfra_af == AF_INET6) | |
1059 | ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; | |
1060 | } | |
1061 | ||
d1ecb069 | 1062 | static int |
b0d623f7 A |
1063 | pfr_walktree(struct radix_node *rn, void *arg) |
1064 | { | |
1065 | struct pfr_kentry *ke = (struct pfr_kentry *)rn; | |
1066 | struct pfr_walktree *w = arg; | |
1067 | int flags = w->pfrw_flags; | |
1068 | ||
5ba3f43e | 1069 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
1070 | |
1071 | switch (w->pfrw_op) { | |
1072 | case PFRW_MARK: | |
1073 | ke->pfrke_mark = 0; | |
1074 | break; | |
1075 | case PFRW_SWEEP: | |
1076 | if (ke->pfrke_mark) | |
1077 | break; | |
1078 | /* FALLTHROUGH */ | |
1079 | case PFRW_ENQUEUE: | |
1080 | SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq); | |
1081 | w->pfrw_cnt++; | |
1082 | break; | |
1083 | case PFRW_GET_ADDRS: | |
1084 | if (w->pfrw_free-- > 0) { | |
1085 | struct pfr_addr ad; | |
1086 | ||
1087 | pfr_copyout_addr(&ad, ke); | |
d1ecb069 | 1088 | if (copyout(&ad, w->pfrw_addr, sizeof (ad))) |
b0d623f7 | 1089 | return (EFAULT); |
d1ecb069 | 1090 | w->pfrw_addr += sizeof (ad); |
b0d623f7 A |
1091 | } |
1092 | break; | |
1093 | case PFRW_GET_ASTATS: | |
1094 | if (w->pfrw_free-- > 0) { | |
1095 | struct pfr_astats as; | |
1096 | ||
1097 | pfr_copyout_addr(&as.pfras_a, ke); | |
1098 | ||
3e170ce0 A |
1099 | #if !defined(__LP64__) |
1100 | /* Initialized to avoid potential info leak to | |
1101 | * userspace */ | |
1102 | as._pad = 0; | |
1103 | #endif | |
b0d623f7 A |
1104 | bcopy(ke->pfrke_packets, as.pfras_packets, |
1105 | sizeof (as.pfras_packets)); | |
1106 | bcopy(ke->pfrke_bytes, as.pfras_bytes, | |
1107 | sizeof (as.pfras_bytes)); | |
1108 | as.pfras_tzero = ke->pfrke_tzero; | |
1109 | ||
1110 | if (COPYOUT(&as, w->pfrw_astats, sizeof (as), flags)) | |
1111 | return (EFAULT); | |
d1ecb069 | 1112 | w->pfrw_astats += sizeof (as); |
b0d623f7 A |
1113 | } |
1114 | break; | |
1115 | case PFRW_POOL_GET: | |
1116 | if (ke->pfrke_not) | |
1117 | break; /* negative entries are ignored */ | |
1118 | if (!w->pfrw_cnt--) { | |
1119 | w->pfrw_kentry = ke; | |
1120 | return (1); /* finish search */ | |
1121 | } | |
1122 | break; | |
1123 | case PFRW_DYNADDR_UPDATE: | |
1124 | if (ke->pfrke_af == AF_INET) { | |
1125 | if (w->pfrw_dyn->pfid_acnt4++ > 0) | |
1126 | break; | |
1127 | pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net); | |
1128 | w->pfrw_dyn->pfid_addr4 = *SUNION2PF( | |
1129 | &ke->pfrke_sa, AF_INET); | |
1130 | w->pfrw_dyn->pfid_mask4 = *SUNION2PF( | |
1131 | &pfr_mask, AF_INET); | |
1132 | } else if (ke->pfrke_af == AF_INET6) { | |
1133 | if (w->pfrw_dyn->pfid_acnt6++ > 0) | |
1134 | break; | |
1135 | pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net); | |
1136 | w->pfrw_dyn->pfid_addr6 = *SUNION2PF( | |
1137 | &ke->pfrke_sa, AF_INET6); | |
1138 | w->pfrw_dyn->pfid_mask6 = *SUNION2PF( | |
1139 | &pfr_mask, AF_INET6); | |
1140 | } | |
1141 | break; | |
1142 | } | |
1143 | return (0); | |
1144 | } | |
1145 | ||
1146 | int | |
1147 | pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags) | |
1148 | { | |
1149 | struct pfr_ktableworkq workq; | |
1150 | struct pfr_ktable *p; | |
1151 | int xdel = 0; | |
1152 | ||
5ba3f43e | 1153 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
1154 | |
1155 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY | | |
1156 | PFR_FLAG_ALLRSETS); | |
1157 | if (pfr_fix_anchor(filter->pfrt_anchor)) | |
1158 | return (EINVAL); | |
1159 | if (pfr_table_count(filter, flags) < 0) | |
1160 | return (ENOENT); | |
1161 | ||
1162 | SLIST_INIT(&workq); | |
1163 | RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { | |
1164 | if (pfr_skip_table(filter, p, flags)) | |
1165 | continue; | |
1166 | if (strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR) == 0) | |
1167 | continue; | |
1168 | if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
1169 | continue; | |
1170 | p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; | |
1171 | SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); | |
1172 | xdel++; | |
1173 | } | |
1174 | if (!(flags & PFR_FLAG_DUMMY)) { | |
1175 | pfr_setflags_ktables(&workq); | |
1176 | } | |
1177 | if (ndel != NULL) | |
1178 | *ndel = xdel; | |
1179 | return (0); | |
1180 | } | |
1181 | ||
1182 | int | |
d1ecb069 | 1183 | pfr_add_tables(user_addr_t tbl, int size, int *nadd, int flags) |
b0d623f7 A |
1184 | { |
1185 | struct pfr_ktableworkq addq, changeq; | |
1186 | struct pfr_ktable *p, *q, *r, key; | |
1187 | int i, rv, xadd = 0; | |
d1ecb069 | 1188 | u_int64_t tzero = pf_calendar_time_second(); |
b0d623f7 | 1189 | |
5ba3f43e | 1190 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
1191 | |
1192 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY); | |
1193 | SLIST_INIT(&addq); | |
1194 | SLIST_INIT(&changeq); | |
d1ecb069 A |
1195 | for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) { |
1196 | if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags)) | |
b0d623f7 | 1197 | senderr(EFAULT); |
6d2010ae | 1198 | pfr_table_copyin_cleanup(&key.pfrkt_t); |
b0d623f7 A |
1199 | if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK, |
1200 | flags & PFR_FLAG_USERIOCTL)) | |
1201 | senderr(EINVAL); | |
1202 | key.pfrkt_flags |= PFR_TFLAG_ACTIVE; | |
1203 | p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); | |
1204 | if (p == NULL) { | |
1205 | p = pfr_create_ktable(&key.pfrkt_t, tzero, 1); | |
1206 | if (p == NULL) | |
1207 | senderr(ENOMEM); | |
1208 | SLIST_FOREACH(q, &addq, pfrkt_workq) { | |
1209 | if (!pfr_ktable_compare(p, q)) | |
1210 | goto _skip; | |
1211 | } | |
1212 | SLIST_INSERT_HEAD(&addq, p, pfrkt_workq); | |
1213 | xadd++; | |
1214 | if (!key.pfrkt_anchor[0]) | |
1215 | goto _skip; | |
1216 | ||
1217 | /* find or create root table */ | |
1218 | bzero(key.pfrkt_anchor, sizeof (key.pfrkt_anchor)); | |
1219 | r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); | |
1220 | if (r != NULL) { | |
1221 | p->pfrkt_root = r; | |
1222 | goto _skip; | |
1223 | } | |
1224 | SLIST_FOREACH(q, &addq, pfrkt_workq) { | |
1225 | if (!pfr_ktable_compare(&key, q)) { | |
1226 | p->pfrkt_root = q; | |
1227 | goto _skip; | |
1228 | } | |
1229 | } | |
1230 | key.pfrkt_flags = 0; | |
1231 | r = pfr_create_ktable(&key.pfrkt_t, 0, 1); | |
1232 | if (r == NULL) | |
1233 | senderr(ENOMEM); | |
1234 | SLIST_INSERT_HEAD(&addq, r, pfrkt_workq); | |
1235 | p->pfrkt_root = r; | |
1236 | } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { | |
1237 | SLIST_FOREACH(q, &changeq, pfrkt_workq) | |
1238 | if (!pfr_ktable_compare(&key, q)) | |
1239 | goto _skip; | |
1240 | p->pfrkt_nflags = (p->pfrkt_flags & | |
1241 | ~PFR_TFLAG_USRMASK) | key.pfrkt_flags; | |
1242 | SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq); | |
1243 | xadd++; | |
1244 | } | |
1245 | _skip: | |
1246 | ; | |
1247 | } | |
1248 | if (!(flags & PFR_FLAG_DUMMY)) { | |
1249 | pfr_insert_ktables(&addq); | |
1250 | pfr_setflags_ktables(&changeq); | |
1251 | } else | |
1252 | pfr_destroy_ktables(&addq, 0); | |
1253 | if (nadd != NULL) | |
1254 | *nadd = xadd; | |
1255 | return (0); | |
1256 | _bad: | |
1257 | pfr_destroy_ktables(&addq, 0); | |
1258 | return (rv); | |
1259 | } | |
1260 | ||
1261 | int | |
d1ecb069 | 1262 | pfr_del_tables(user_addr_t tbl, int size, int *ndel, int flags) |
b0d623f7 A |
1263 | { |
1264 | struct pfr_ktableworkq workq; | |
1265 | struct pfr_ktable *p, *q, key; | |
1266 | int i, xdel = 0; | |
1267 | ||
5ba3f43e | 1268 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
1269 | |
1270 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY); | |
1271 | SLIST_INIT(&workq); | |
d1ecb069 A |
1272 | for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) { |
1273 | if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags)) | |
b0d623f7 | 1274 | return (EFAULT); |
6d2010ae | 1275 | pfr_table_copyin_cleanup(&key.pfrkt_t); |
b0d623f7 A |
1276 | if (pfr_validate_table(&key.pfrkt_t, 0, |
1277 | flags & PFR_FLAG_USERIOCTL)) | |
1278 | return (EINVAL); | |
1279 | p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); | |
1280 | if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { | |
1281 | SLIST_FOREACH(q, &workq, pfrkt_workq) | |
1282 | if (!pfr_ktable_compare(p, q)) | |
1283 | goto _skip; | |
1284 | p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; | |
1285 | SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); | |
1286 | xdel++; | |
1287 | } | |
1288 | _skip: | |
1289 | ; | |
1290 | } | |
1291 | ||
1292 | if (!(flags & PFR_FLAG_DUMMY)) { | |
1293 | pfr_setflags_ktables(&workq); | |
1294 | } | |
1295 | if (ndel != NULL) | |
1296 | *ndel = xdel; | |
1297 | return (0); | |
1298 | } | |
1299 | ||
1300 | int | |
d1ecb069 | 1301 | pfr_get_tables(struct pfr_table *filter, user_addr_t tbl, int *size, |
b0d623f7 A |
1302 | int flags) |
1303 | { | |
1304 | struct pfr_ktable *p; | |
1305 | int n, nn; | |
1306 | ||
1307 | ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); | |
1308 | if (pfr_fix_anchor(filter->pfrt_anchor)) | |
1309 | return (EINVAL); | |
1310 | n = nn = pfr_table_count(filter, flags); | |
1311 | if (n < 0) | |
1312 | return (ENOENT); | |
1313 | if (n > *size) { | |
1314 | *size = n; | |
1315 | return (0); | |
1316 | } | |
1317 | RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { | |
1318 | if (pfr_skip_table(filter, p, flags)) | |
1319 | continue; | |
1320 | if (n-- <= 0) | |
1321 | continue; | |
d1ecb069 | 1322 | if (COPYOUT(&p->pfrkt_t, tbl, sizeof (p->pfrkt_t), flags)) |
b0d623f7 | 1323 | return (EFAULT); |
d1ecb069 | 1324 | tbl += sizeof (p->pfrkt_t); |
b0d623f7 A |
1325 | } |
1326 | if (n) { | |
1327 | printf("pfr_get_tables: corruption detected (%d).\n", n); | |
1328 | return (ENOTTY); | |
1329 | } | |
1330 | *size = nn; | |
1331 | return (0); | |
1332 | } | |
1333 | ||
1334 | int | |
d1ecb069 | 1335 | pfr_get_tstats(struct pfr_table *filter, user_addr_t tbl, int *size, |
b0d623f7 A |
1336 | int flags) |
1337 | { | |
1338 | struct pfr_ktable *p; | |
1339 | struct pfr_ktableworkq workq; | |
1340 | int n, nn; | |
d1ecb069 | 1341 | u_int64_t tzero = pf_calendar_time_second(); |
b0d623f7 | 1342 | |
5ba3f43e | 1343 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
1344 | |
1345 | /* XXX PFR_FLAG_CLSTATS disabled */ | |
1346 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS); | |
1347 | if (pfr_fix_anchor(filter->pfrt_anchor)) | |
1348 | return (EINVAL); | |
1349 | n = nn = pfr_table_count(filter, flags); | |
1350 | if (n < 0) | |
1351 | return (ENOENT); | |
1352 | if (n > *size) { | |
1353 | *size = n; | |
1354 | return (0); | |
1355 | } | |
1356 | SLIST_INIT(&workq); | |
1357 | RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { | |
1358 | if (pfr_skip_table(filter, p, flags)) | |
1359 | continue; | |
1360 | if (n-- <= 0) | |
1361 | continue; | |
d1ecb069 | 1362 | if (COPYOUT(&p->pfrkt_ts, tbl, sizeof (p->pfrkt_ts), flags)) { |
b0d623f7 A |
1363 | return (EFAULT); |
1364 | } | |
d1ecb069 | 1365 | tbl += sizeof (p->pfrkt_ts); |
b0d623f7 A |
1366 | SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); |
1367 | } | |
1368 | if (flags & PFR_FLAG_CLSTATS) | |
1369 | pfr_clstats_ktables(&workq, tzero, | |
1370 | flags & PFR_FLAG_ADDRSTOO); | |
1371 | if (n) { | |
1372 | printf("pfr_get_tstats: corruption detected (%d).\n", n); | |
1373 | return (ENOTTY); | |
1374 | } | |
1375 | *size = nn; | |
1376 | return (0); | |
1377 | } | |
1378 | ||
1379 | int | |
d1ecb069 | 1380 | pfr_clr_tstats(user_addr_t tbl, int size, int *nzero, int flags) |
b0d623f7 A |
1381 | { |
1382 | struct pfr_ktableworkq workq; | |
1383 | struct pfr_ktable *p, key; | |
1384 | int i, xzero = 0; | |
d1ecb069 | 1385 | u_int64_t tzero = pf_calendar_time_second(); |
b0d623f7 | 1386 | |
5ba3f43e | 1387 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
1388 | |
1389 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY | | |
1390 | PFR_FLAG_ADDRSTOO); | |
1391 | SLIST_INIT(&workq); | |
d1ecb069 A |
1392 | for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) { |
1393 | if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags)) | |
b0d623f7 | 1394 | return (EFAULT); |
6d2010ae | 1395 | pfr_table_copyin_cleanup(&key.pfrkt_t); |
b0d623f7 A |
1396 | if (pfr_validate_table(&key.pfrkt_t, 0, 0)) |
1397 | return (EINVAL); | |
1398 | p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); | |
1399 | if (p != NULL) { | |
1400 | SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); | |
1401 | xzero++; | |
1402 | } | |
1403 | } | |
1404 | if (!(flags & PFR_FLAG_DUMMY)) { | |
1405 | pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO); | |
1406 | } | |
1407 | if (nzero != NULL) | |
1408 | *nzero = xzero; | |
1409 | return (0); | |
1410 | } | |
1411 | ||
1412 | int | |
d1ecb069 | 1413 | pfr_set_tflags(user_addr_t tbl, int size, int setflag, int clrflag, |
b0d623f7 A |
1414 | int *nchange, int *ndel, int flags) |
1415 | { | |
1416 | struct pfr_ktableworkq workq; | |
1417 | struct pfr_ktable *p, *q, key; | |
1418 | int i, xchange = 0, xdel = 0; | |
1419 | ||
5ba3f43e | 1420 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
1421 | |
1422 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY); | |
1423 | if ((setflag & ~PFR_TFLAG_USRMASK) || | |
1424 | (clrflag & ~PFR_TFLAG_USRMASK) || | |
1425 | (setflag & clrflag)) | |
1426 | return (EINVAL); | |
1427 | SLIST_INIT(&workq); | |
d1ecb069 A |
1428 | for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) { |
1429 | if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags)) | |
b0d623f7 | 1430 | return (EFAULT); |
6d2010ae | 1431 | pfr_table_copyin_cleanup(&key.pfrkt_t); |
b0d623f7 A |
1432 | if (pfr_validate_table(&key.pfrkt_t, 0, |
1433 | flags & PFR_FLAG_USERIOCTL)) | |
1434 | return (EINVAL); | |
1435 | p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); | |
1436 | if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { | |
1437 | p->pfrkt_nflags = (p->pfrkt_flags | setflag) & | |
1438 | ~clrflag; | |
1439 | if (p->pfrkt_nflags == p->pfrkt_flags) | |
1440 | goto _skip; | |
1441 | SLIST_FOREACH(q, &workq, pfrkt_workq) | |
1442 | if (!pfr_ktable_compare(p, q)) | |
1443 | goto _skip; | |
1444 | SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); | |
1445 | if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) && | |
1446 | (clrflag & PFR_TFLAG_PERSIST) && | |
1447 | !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) | |
1448 | xdel++; | |
1449 | else | |
1450 | xchange++; | |
1451 | } | |
1452 | _skip: | |
1453 | ; | |
1454 | } | |
1455 | if (!(flags & PFR_FLAG_DUMMY)) { | |
1456 | pfr_setflags_ktables(&workq); | |
1457 | } | |
1458 | if (nchange != NULL) | |
1459 | *nchange = xchange; | |
1460 | if (ndel != NULL) | |
1461 | *ndel = xdel; | |
1462 | return (0); | |
1463 | } | |
1464 | ||
1465 | int | |
1466 | pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags) | |
1467 | { | |
1468 | struct pfr_ktableworkq workq; | |
1469 | struct pfr_ktable *p; | |
1470 | struct pf_ruleset *rs; | |
1471 | int xdel = 0; | |
1472 | ||
5ba3f43e | 1473 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
1474 | |
1475 | ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); | |
1476 | rs = pf_find_or_create_ruleset(trs->pfrt_anchor); | |
1477 | if (rs == NULL) | |
1478 | return (ENOMEM); | |
1479 | SLIST_INIT(&workq); | |
1480 | RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { | |
1481 | if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || | |
1482 | pfr_skip_table(trs, p, 0)) | |
1483 | continue; | |
1484 | p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; | |
1485 | SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); | |
1486 | xdel++; | |
1487 | } | |
1488 | if (!(flags & PFR_FLAG_DUMMY)) { | |
1489 | pfr_setflags_ktables(&workq); | |
1490 | if (ticket != NULL) | |
1491 | *ticket = ++rs->tticket; | |
1492 | rs->topen = 1; | |
1493 | } else | |
1494 | pf_remove_if_empty_ruleset(rs); | |
1495 | if (ndel != NULL) | |
1496 | *ndel = xdel; | |
1497 | return (0); | |
1498 | } | |
1499 | ||
1500 | int | |
d1ecb069 | 1501 | pfr_ina_define(struct pfr_table *tbl, user_addr_t addr, int size, |
b0d623f7 A |
1502 | int *nadd, int *naddr, u_int32_t ticket, int flags) |
1503 | { | |
1504 | struct pfr_ktableworkq tableq; | |
1505 | struct pfr_kentryworkq addrq; | |
1506 | struct pfr_ktable *kt, *rt, *shadow, key; | |
1507 | struct pfr_kentry *p; | |
1508 | struct pfr_addr ad; | |
1509 | struct pf_ruleset *rs; | |
1510 | int i, rv, xadd = 0, xaddr = 0; | |
1511 | ||
5ba3f43e | 1512 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
1513 | |
1514 | ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); | |
1515 | if (size && !(flags & PFR_FLAG_ADDRSTOO)) | |
1516 | return (EINVAL); | |
1517 | if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK, | |
1518 | flags & PFR_FLAG_USERIOCTL)) | |
1519 | return (EINVAL); | |
1520 | rs = pf_find_ruleset(tbl->pfrt_anchor); | |
1521 | if (rs == NULL || !rs->topen || ticket != rs->tticket) | |
1522 | return (EBUSY); | |
1523 | tbl->pfrt_flags |= PFR_TFLAG_INACTIVE; | |
1524 | SLIST_INIT(&tableq); | |
316670eb | 1525 | kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)(void *)tbl); |
b0d623f7 A |
1526 | if (kt == NULL) { |
1527 | kt = pfr_create_ktable(tbl, 0, 1); | |
1528 | if (kt == NULL) | |
1529 | return (ENOMEM); | |
1530 | SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq); | |
1531 | xadd++; | |
1532 | if (!tbl->pfrt_anchor[0]) | |
1533 | goto _skip; | |
1534 | ||
1535 | /* find or create root table */ | |
1536 | bzero(&key, sizeof (key)); | |
1537 | strlcpy(key.pfrkt_name, tbl->pfrt_name, | |
1538 | sizeof (key.pfrkt_name)); | |
1539 | rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); | |
1540 | if (rt != NULL) { | |
1541 | kt->pfrkt_root = rt; | |
1542 | goto _skip; | |
1543 | } | |
1544 | rt = pfr_create_ktable(&key.pfrkt_t, 0, 1); | |
1545 | if (rt == NULL) { | |
1546 | pfr_destroy_ktables(&tableq, 0); | |
1547 | return (ENOMEM); | |
1548 | } | |
1549 | SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq); | |
1550 | kt->pfrkt_root = rt; | |
1551 | } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) | |
1552 | xadd++; | |
1553 | _skip: | |
1554 | shadow = pfr_create_ktable(tbl, 0, 0); | |
1555 | if (shadow == NULL) { | |
1556 | pfr_destroy_ktables(&tableq, 0); | |
1557 | return (ENOMEM); | |
1558 | } | |
1559 | SLIST_INIT(&addrq); | |
d1ecb069 A |
1560 | for (i = 0; i < size; i++, addr += sizeof (ad)) { |
1561 | if (COPYIN(addr, &ad, sizeof (ad), flags)) | |
b0d623f7 A |
1562 | senderr(EFAULT); |
1563 | if (pfr_validate_addr(&ad)) | |
1564 | senderr(EINVAL); | |
1565 | if (pfr_lookup_addr(shadow, &ad, 1) != NULL) | |
1566 | continue; | |
1567 | p = pfr_create_kentry(&ad, 0); | |
1568 | if (p == NULL) | |
1569 | senderr(ENOMEM); | |
1570 | if (pfr_route_kentry(shadow, p)) { | |
1571 | pfr_destroy_kentry(p); | |
1572 | continue; | |
1573 | } | |
1574 | SLIST_INSERT_HEAD(&addrq, p, pfrke_workq); | |
1575 | xaddr++; | |
1576 | } | |
1577 | if (!(flags & PFR_FLAG_DUMMY)) { | |
1578 | if (kt->pfrkt_shadow != NULL) | |
1579 | pfr_destroy_ktable(kt->pfrkt_shadow, 1); | |
1580 | kt->pfrkt_flags |= PFR_TFLAG_INACTIVE; | |
1581 | pfr_insert_ktables(&tableq); | |
1582 | shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ? | |
1583 | xaddr : NO_ADDRESSES; | |
1584 | kt->pfrkt_shadow = shadow; | |
1585 | } else { | |
1586 | pfr_clean_node_mask(shadow, &addrq); | |
1587 | pfr_destroy_ktable(shadow, 0); | |
1588 | pfr_destroy_ktables(&tableq, 0); | |
1589 | pfr_destroy_kentries(&addrq); | |
1590 | } | |
1591 | if (nadd != NULL) | |
1592 | *nadd = xadd; | |
1593 | if (naddr != NULL) | |
1594 | *naddr = xaddr; | |
1595 | return (0); | |
1596 | _bad: | |
1597 | pfr_destroy_ktable(shadow, 0); | |
1598 | pfr_destroy_ktables(&tableq, 0); | |
1599 | pfr_destroy_kentries(&addrq); | |
1600 | return (rv); | |
1601 | } | |
1602 | ||
1603 | int | |
1604 | pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags) | |
1605 | { | |
1606 | struct pfr_ktableworkq workq; | |
1607 | struct pfr_ktable *p; | |
1608 | struct pf_ruleset *rs; | |
1609 | int xdel = 0; | |
1610 | ||
5ba3f43e | 1611 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
1612 | |
1613 | ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); | |
1614 | rs = pf_find_ruleset(trs->pfrt_anchor); | |
1615 | if (rs == NULL || !rs->topen || ticket != rs->tticket) | |
1616 | return (0); | |
1617 | SLIST_INIT(&workq); | |
1618 | RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { | |
1619 | if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || | |
1620 | pfr_skip_table(trs, p, 0)) | |
1621 | continue; | |
1622 | p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; | |
1623 | SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); | |
1624 | xdel++; | |
1625 | } | |
1626 | if (!(flags & PFR_FLAG_DUMMY)) { | |
1627 | pfr_setflags_ktables(&workq); | |
1628 | rs->topen = 0; | |
1629 | pf_remove_if_empty_ruleset(rs); | |
1630 | } | |
1631 | if (ndel != NULL) | |
1632 | *ndel = xdel; | |
1633 | return (0); | |
1634 | } | |
1635 | ||
1636 | int | |
1637 | pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd, | |
1638 | int *nchange, int flags) | |
1639 | { | |
1640 | struct pfr_ktable *p, *q; | |
1641 | struct pfr_ktableworkq workq; | |
1642 | struct pf_ruleset *rs; | |
1643 | int xadd = 0, xchange = 0; | |
d1ecb069 | 1644 | u_int64_t tzero = pf_calendar_time_second(); |
b0d623f7 | 1645 | |
5ba3f43e | 1646 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
1647 | |
1648 | ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY); | |
1649 | rs = pf_find_ruleset(trs->pfrt_anchor); | |
1650 | if (rs == NULL || !rs->topen || ticket != rs->tticket) | |
1651 | return (EBUSY); | |
1652 | ||
1653 | SLIST_INIT(&workq); | |
1654 | RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { | |
1655 | if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || | |
1656 | pfr_skip_table(trs, p, 0)) | |
1657 | continue; | |
1658 | SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); | |
1659 | if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) | |
1660 | xchange++; | |
1661 | else | |
1662 | xadd++; | |
1663 | } | |
1664 | ||
1665 | if (!(flags & PFR_FLAG_DUMMY)) { | |
1666 | for (p = SLIST_FIRST(&workq); p != NULL; p = q) { | |
1667 | q = SLIST_NEXT(p, pfrkt_workq); | |
1668 | pfr_commit_ktable(p, tzero); | |
1669 | } | |
1670 | rs->topen = 0; | |
1671 | pf_remove_if_empty_ruleset(rs); | |
1672 | } | |
1673 | if (nadd != NULL) | |
1674 | *nadd = xadd; | |
1675 | if (nchange != NULL) | |
1676 | *nchange = xchange; | |
1677 | ||
1678 | return (0); | |
1679 | } | |
1680 | ||
d1ecb069 | 1681 | static void |
b0d623f7 A |
1682 | pfr_commit_ktable(struct pfr_ktable *kt, u_int64_t tzero) |
1683 | { | |
1684 | struct pfr_ktable *shadow = kt->pfrkt_shadow; | |
1685 | int nflags; | |
1686 | ||
5ba3f43e | 1687 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
1688 | |
1689 | if (shadow->pfrkt_cnt == NO_ADDRESSES) { | |
1690 | if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
1691 | pfr_clstats_ktable(kt, tzero, 1); | |
1692 | } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) { | |
1693 | /* kt might contain addresses */ | |
1694 | struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq; | |
1695 | struct pfr_kentry *p, *q, *next; | |
1696 | struct pfr_addr ad; | |
1697 | ||
1698 | pfr_enqueue_addrs(shadow, &addrq, NULL, 0); | |
1699 | pfr_mark_addrs(kt); | |
1700 | SLIST_INIT(&addq); | |
1701 | SLIST_INIT(&changeq); | |
1702 | SLIST_INIT(&delq); | |
1703 | SLIST_INIT(&garbageq); | |
1704 | pfr_clean_node_mask(shadow, &addrq); | |
1705 | for (p = SLIST_FIRST(&addrq); p != NULL; p = next) { | |
1706 | next = SLIST_NEXT(p, pfrke_workq); /* XXX */ | |
1707 | pfr_copyout_addr(&ad, p); | |
1708 | q = pfr_lookup_addr(kt, &ad, 1); | |
1709 | if (q != NULL) { | |
1710 | if (q->pfrke_not != p->pfrke_not) | |
1711 | SLIST_INSERT_HEAD(&changeq, q, | |
1712 | pfrke_workq); | |
1713 | q->pfrke_mark = 1; | |
1714 | SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); | |
1715 | } else { | |
1716 | p->pfrke_tzero = tzero; | |
1717 | SLIST_INSERT_HEAD(&addq, p, pfrke_workq); | |
1718 | } | |
1719 | } | |
1720 | pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY); | |
1721 | pfr_insert_kentries(kt, &addq, tzero); | |
1722 | pfr_remove_kentries(kt, &delq); | |
1723 | pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); | |
1724 | pfr_destroy_kentries(&garbageq); | |
1725 | } else { | |
1726 | /* kt cannot contain addresses */ | |
1727 | SWAP(struct radix_node_head *, kt->pfrkt_ip4, | |
1728 | shadow->pfrkt_ip4); | |
1729 | SWAP(struct radix_node_head *, kt->pfrkt_ip6, | |
1730 | shadow->pfrkt_ip6); | |
1731 | SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt); | |
1732 | pfr_clstats_ktable(kt, tzero, 1); | |
1733 | } | |
1734 | nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) | | |
1735 | (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) & | |
1736 | ~PFR_TFLAG_INACTIVE; | |
1737 | pfr_destroy_ktable(shadow, 0); | |
1738 | kt->pfrkt_shadow = NULL; | |
1739 | pfr_setflags_ktable(kt, nflags); | |
1740 | } | |
1741 | ||
6d2010ae A |
1742 | void |
1743 | pfr_table_copyin_cleanup(struct pfr_table *tbl) | |
1744 | { | |
1745 | tbl->pfrt_anchor[sizeof (tbl->pfrt_anchor) - 1] = '\0'; | |
1746 | tbl->pfrt_name[sizeof (tbl->pfrt_name) - 1] = '\0'; | |
1747 | } | |
1748 | ||
d1ecb069 | 1749 | static int |
b0d623f7 A |
1750 | pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved) |
1751 | { | |
1752 | int i; | |
1753 | ||
1754 | if (!tbl->pfrt_name[0]) | |
1755 | return (-1); | |
1756 | if (no_reserved && strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR) == 0) | |
1757 | return (-1); | |
1758 | if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1]) | |
1759 | return (-1); | |
1760 | for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) | |
1761 | if (tbl->pfrt_name[i]) | |
1762 | return (-1); | |
1763 | if (pfr_fix_anchor(tbl->pfrt_anchor)) | |
1764 | return (-1); | |
1765 | if (tbl->pfrt_flags & ~allowedflags) | |
1766 | return (-1); | |
1767 | return (0); | |
1768 | } | |
1769 | ||
1770 | /* | |
1771 | * Rewrite anchors referenced by tables to remove slashes | |
1772 | * and check for validity. | |
1773 | */ | |
d1ecb069 | 1774 | static int |
b0d623f7 A |
1775 | pfr_fix_anchor(char *anchor) |
1776 | { | |
1777 | size_t siz = MAXPATHLEN; | |
1778 | int i; | |
1779 | ||
1780 | if (anchor[0] == '/') { | |
1781 | char *path; | |
1782 | int off; | |
1783 | ||
1784 | path = anchor; | |
1785 | off = 1; | |
1786 | while (*++path == '/') | |
1787 | off++; | |
1788 | bcopy(path, anchor, siz - off); | |
1789 | memset(anchor + siz - off, 0, off); | |
1790 | } | |
1791 | if (anchor[siz - 1]) | |
1792 | return (-1); | |
1793 | for (i = strlen(anchor); i < (int)siz; i++) | |
1794 | if (anchor[i]) | |
1795 | return (-1); | |
1796 | return (0); | |
1797 | } | |
1798 | ||
d1ecb069 | 1799 | static int |
b0d623f7 A |
1800 | pfr_table_count(struct pfr_table *filter, int flags) |
1801 | { | |
1802 | struct pf_ruleset *rs; | |
1803 | ||
1804 | if (flags & PFR_FLAG_ALLRSETS) | |
1805 | return (pfr_ktable_cnt); | |
1806 | if (filter->pfrt_anchor[0]) { | |
1807 | rs = pf_find_ruleset(filter->pfrt_anchor); | |
1808 | return ((rs != NULL) ? rs->tables : -1); | |
1809 | } | |
1810 | return (pf_main_ruleset.tables); | |
1811 | } | |
1812 | ||
d1ecb069 | 1813 | static int |
b0d623f7 A |
1814 | pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags) |
1815 | { | |
1816 | if (flags & PFR_FLAG_ALLRSETS) | |
1817 | return (0); | |
1818 | if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor)) | |
1819 | return (1); | |
1820 | return (0); | |
1821 | } | |
1822 | ||
d1ecb069 | 1823 | static void |
b0d623f7 A |
1824 | pfr_insert_ktables(struct pfr_ktableworkq *workq) |
1825 | { | |
1826 | struct pfr_ktable *p; | |
1827 | ||
5ba3f43e | 1828 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
1829 | |
1830 | SLIST_FOREACH(p, workq, pfrkt_workq) | |
1831 | pfr_insert_ktable(p); | |
1832 | } | |
1833 | ||
d1ecb069 | 1834 | static void |
b0d623f7 A |
1835 | pfr_insert_ktable(struct pfr_ktable *kt) |
1836 | { | |
5ba3f43e | 1837 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
1838 | |
1839 | RB_INSERT(pfr_ktablehead, &pfr_ktables, kt); | |
1840 | pfr_ktable_cnt++; | |
1841 | if (kt->pfrkt_root != NULL) | |
1842 | if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) | |
1843 | pfr_setflags_ktable(kt->pfrkt_root, | |
1844 | kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR); | |
1845 | } | |
1846 | ||
d1ecb069 | 1847 | static void |
b0d623f7 A |
1848 | pfr_setflags_ktables(struct pfr_ktableworkq *workq) |
1849 | { | |
1850 | struct pfr_ktable *p, *q; | |
1851 | ||
5ba3f43e | 1852 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
1853 | |
1854 | for (p = SLIST_FIRST(workq); p; p = q) { | |
1855 | q = SLIST_NEXT(p, pfrkt_workq); | |
1856 | pfr_setflags_ktable(p, p->pfrkt_nflags); | |
1857 | } | |
1858 | } | |
1859 | ||
d1ecb069 | 1860 | static void |
b0d623f7 A |
1861 | pfr_setflags_ktable(struct pfr_ktable *kt, int newf) |
1862 | { | |
1863 | struct pfr_kentryworkq addrq; | |
1864 | ||
5ba3f43e | 1865 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
1866 | |
1867 | if (!(newf & PFR_TFLAG_REFERENCED) && | |
1868 | !(newf & PFR_TFLAG_PERSIST)) | |
1869 | newf &= ~PFR_TFLAG_ACTIVE; | |
1870 | if (!(newf & PFR_TFLAG_ACTIVE)) | |
1871 | newf &= ~PFR_TFLAG_USRMASK; | |
1872 | if (!(newf & PFR_TFLAG_SETMASK)) { | |
1873 | RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt); | |
1874 | if (kt->pfrkt_root != NULL) | |
1875 | if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) | |
1876 | pfr_setflags_ktable(kt->pfrkt_root, | |
1877 | kt->pfrkt_root->pfrkt_flags & | |
1878 | ~PFR_TFLAG_REFDANCHOR); | |
1879 | pfr_destroy_ktable(kt, 1); | |
1880 | pfr_ktable_cnt--; | |
1881 | return; | |
1882 | } | |
1883 | if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) { | |
1884 | pfr_enqueue_addrs(kt, &addrq, NULL, 0); | |
1885 | pfr_remove_kentries(kt, &addrq); | |
1886 | } | |
1887 | if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) { | |
1888 | pfr_destroy_ktable(kt->pfrkt_shadow, 1); | |
1889 | kt->pfrkt_shadow = NULL; | |
1890 | } | |
1891 | kt->pfrkt_flags = newf; | |
1892 | } | |
1893 | ||
d1ecb069 | 1894 | static void |
b0d623f7 A |
1895 | pfr_clstats_ktables(struct pfr_ktableworkq *workq, u_int64_t tzero, int recurse) |
1896 | { | |
1897 | struct pfr_ktable *p; | |
1898 | ||
5ba3f43e | 1899 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
1900 | |
1901 | SLIST_FOREACH(p, workq, pfrkt_workq) | |
1902 | pfr_clstats_ktable(p, tzero, recurse); | |
1903 | } | |
1904 | ||
d1ecb069 | 1905 | static void |
b0d623f7 A |
1906 | pfr_clstats_ktable(struct pfr_ktable *kt, u_int64_t tzero, int recurse) |
1907 | { | |
1908 | struct pfr_kentryworkq addrq; | |
1909 | ||
5ba3f43e | 1910 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
1911 | |
1912 | if (recurse) { | |
1913 | pfr_enqueue_addrs(kt, &addrq, NULL, 0); | |
1914 | pfr_clstats_kentries(&addrq, tzero, 0); | |
1915 | } | |
1916 | bzero(kt->pfrkt_packets, sizeof (kt->pfrkt_packets)); | |
1917 | bzero(kt->pfrkt_bytes, sizeof (kt->pfrkt_bytes)); | |
1918 | kt->pfrkt_match = kt->pfrkt_nomatch = 0; | |
1919 | kt->pfrkt_tzero = tzero; | |
1920 | } | |
1921 | ||
d1ecb069 | 1922 | static struct pfr_ktable * |
b0d623f7 A |
1923 | pfr_create_ktable(struct pfr_table *tbl, u_int64_t tzero, int attachruleset) |
1924 | { | |
1925 | struct pfr_ktable *kt; | |
1926 | struct pf_ruleset *rs; | |
1927 | ||
5ba3f43e | 1928 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
1929 | |
1930 | kt = pool_get(&pfr_ktable_pl, PR_WAITOK); | |
1931 | if (kt == NULL) | |
1932 | return (NULL); | |
1933 | bzero(kt, sizeof (*kt)); | |
1934 | kt->pfrkt_t = *tbl; | |
1935 | ||
1936 | if (attachruleset) { | |
1937 | rs = pf_find_or_create_ruleset(tbl->pfrt_anchor); | |
1938 | if (!rs) { | |
1939 | pfr_destroy_ktable(kt, 0); | |
1940 | return (NULL); | |
1941 | } | |
1942 | kt->pfrkt_rs = rs; | |
1943 | rs->tables++; | |
1944 | } | |
1945 | ||
1946 | if (!rn_inithead((void **)&kt->pfrkt_ip4, | |
1947 | offsetof(struct sockaddr_in, sin_addr) * 8) || | |
1948 | !rn_inithead((void **)&kt->pfrkt_ip6, | |
1949 | offsetof(struct sockaddr_in6, sin6_addr) * 8)) { | |
1950 | pfr_destroy_ktable(kt, 0); | |
1951 | return (NULL); | |
1952 | } | |
1953 | kt->pfrkt_tzero = tzero; | |
1954 | ||
1955 | return (kt); | |
1956 | } | |
1957 | ||
d1ecb069 | 1958 | static void |
b0d623f7 A |
1959 | pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr) |
1960 | { | |
1961 | struct pfr_ktable *p, *q; | |
1962 | ||
5ba3f43e | 1963 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
1964 | |
1965 | for (p = SLIST_FIRST(workq); p; p = q) { | |
1966 | q = SLIST_NEXT(p, pfrkt_workq); | |
1967 | pfr_destroy_ktable(p, flushaddr); | |
1968 | } | |
1969 | } | |
1970 | ||
d1ecb069 | 1971 | static void |
b0d623f7 A |
1972 | pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) |
1973 | { | |
1974 | struct pfr_kentryworkq addrq; | |
1975 | ||
5ba3f43e | 1976 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
1977 | |
1978 | if (flushaddr) { | |
1979 | pfr_enqueue_addrs(kt, &addrq, NULL, 0); | |
1980 | pfr_clean_node_mask(kt, &addrq); | |
1981 | pfr_destroy_kentries(&addrq); | |
1982 | } | |
1983 | if (kt->pfrkt_ip4 != NULL) | |
1984 | _FREE((caddr_t)kt->pfrkt_ip4, M_RTABLE); | |
1985 | if (kt->pfrkt_ip6 != NULL) | |
1986 | _FREE((caddr_t)kt->pfrkt_ip6, M_RTABLE); | |
1987 | if (kt->pfrkt_shadow != NULL) | |
1988 | pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr); | |
1989 | if (kt->pfrkt_rs != NULL) { | |
1990 | kt->pfrkt_rs->tables--; | |
1991 | pf_remove_if_empty_ruleset(kt->pfrkt_rs); | |
1992 | } | |
1993 | pool_put(&pfr_ktable_pl, kt); | |
1994 | } | |
1995 | ||
d1ecb069 | 1996 | static int |
b0d623f7 A |
1997 | pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) |
1998 | { | |
1999 | int d; | |
2000 | ||
2001 | if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) | |
2002 | return (d); | |
2003 | return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor)); | |
2004 | } | |
2005 | ||
d1ecb069 | 2006 | static struct pfr_ktable * |
b0d623f7 A |
2007 | pfr_lookup_table(struct pfr_table *tbl) |
2008 | { | |
5ba3f43e | 2009 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
2010 | |
2011 | /* struct pfr_ktable start like a struct pfr_table */ | |
2012 | return (RB_FIND(pfr_ktablehead, &pfr_ktables, | |
316670eb | 2013 | (struct pfr_ktable *)(void *)tbl)); |
b0d623f7 A |
2014 | } |
2015 | ||
2016 | int | |
2017 | pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af) | |
2018 | { | |
2019 | struct pfr_kentry *ke = NULL; | |
2020 | int match; | |
2021 | ||
5ba3f43e | 2022 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
2023 | |
2024 | if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) | |
2025 | kt = kt->pfrkt_root; | |
2026 | if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
2027 | return (0); | |
2028 | ||
2029 | switch (af) { | |
2030 | #if INET | |
2031 | case AF_INET: | |
2032 | pfr_sin.sin_addr.s_addr = a->addr32[0]; | |
2033 | ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4); | |
2034 | if (ke && KENTRY_RNF_ROOT(ke)) | |
2035 | ke = NULL; | |
2036 | break; | |
2037 | #endif /* INET */ | |
2038 | #if INET6 | |
2039 | case AF_INET6: | |
2040 | bcopy(a, &pfr_sin6.sin6_addr, sizeof (pfr_sin6.sin6_addr)); | |
2041 | ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6); | |
2042 | if (ke && KENTRY_RNF_ROOT(ke)) | |
2043 | ke = NULL; | |
2044 | break; | |
2045 | #endif /* INET6 */ | |
2046 | } | |
2047 | match = (ke && !ke->pfrke_not); | |
2048 | if (match) | |
2049 | kt->pfrkt_match++; | |
2050 | else | |
2051 | kt->pfrkt_nomatch++; | |
2052 | return (match); | |
2053 | } | |
2054 | ||
2055 | void | |
2056 | pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, | |
2057 | u_int64_t len, int dir_out, int op_pass, int notrule) | |
2058 | { | |
2059 | struct pfr_kentry *ke = NULL; | |
2060 | ||
5ba3f43e | 2061 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
2062 | |
2063 | if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) | |
2064 | kt = kt->pfrkt_root; | |
2065 | if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
2066 | return; | |
2067 | ||
2068 | switch (af) { | |
2069 | #if INET | |
2070 | case AF_INET: | |
2071 | pfr_sin.sin_addr.s_addr = a->addr32[0]; | |
2072 | ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4); | |
2073 | if (ke && KENTRY_RNF_ROOT(ke)) | |
2074 | ke = NULL; | |
2075 | break; | |
2076 | #endif /* INET */ | |
2077 | #if INET6 | |
2078 | case AF_INET6: | |
2079 | bcopy(a, &pfr_sin6.sin6_addr, sizeof (pfr_sin6.sin6_addr)); | |
2080 | ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6); | |
2081 | if (ke && KENTRY_RNF_ROOT(ke)) | |
2082 | ke = NULL; | |
2083 | break; | |
2084 | #endif /* INET6 */ | |
2085 | default: | |
2086 | ; | |
2087 | } | |
2088 | if ((ke == NULL || ke->pfrke_not) != notrule) { | |
2089 | if (op_pass != PFR_OP_PASS) | |
2090 | printf("pfr_update_stats: assertion failed.\n"); | |
2091 | op_pass = PFR_OP_XPASS; | |
2092 | } | |
2093 | kt->pfrkt_packets[dir_out][op_pass]++; | |
2094 | kt->pfrkt_bytes[dir_out][op_pass] += len; | |
2095 | if (ke != NULL && op_pass != PFR_OP_XPASS) { | |
2096 | ke->pfrke_packets[dir_out][op_pass]++; | |
2097 | ke->pfrke_bytes[dir_out][op_pass] += len; | |
2098 | } | |
2099 | } | |
2100 | ||
2101 | struct pfr_ktable * | |
2102 | pfr_attach_table(struct pf_ruleset *rs, char *name) | |
2103 | { | |
2104 | struct pfr_ktable *kt, *rt; | |
2105 | struct pfr_table tbl; | |
2106 | struct pf_anchor *ac = rs->anchor; | |
2107 | ||
5ba3f43e | 2108 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
2109 | |
2110 | bzero(&tbl, sizeof (tbl)); | |
2111 | strlcpy(tbl.pfrt_name, name, sizeof (tbl.pfrt_name)); | |
2112 | if (ac != NULL) | |
2113 | strlcpy(tbl.pfrt_anchor, ac->path, sizeof (tbl.pfrt_anchor)); | |
2114 | kt = pfr_lookup_table(&tbl); | |
2115 | if (kt == NULL) { | |
d1ecb069 | 2116 | kt = pfr_create_ktable(&tbl, pf_calendar_time_second(), 1); |
b0d623f7 A |
2117 | if (kt == NULL) |
2118 | return (NULL); | |
2119 | if (ac != NULL) { | |
2120 | bzero(tbl.pfrt_anchor, sizeof (tbl.pfrt_anchor)); | |
2121 | rt = pfr_lookup_table(&tbl); | |
2122 | if (rt == NULL) { | |
2123 | rt = pfr_create_ktable(&tbl, 0, 1); | |
2124 | if (rt == NULL) { | |
2125 | pfr_destroy_ktable(kt, 0); | |
2126 | return (NULL); | |
2127 | } | |
2128 | pfr_insert_ktable(rt); | |
2129 | } | |
2130 | kt->pfrkt_root = rt; | |
2131 | } | |
2132 | pfr_insert_ktable(kt); | |
2133 | } | |
2134 | if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) | |
2135 | pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED); | |
2136 | return (kt); | |
2137 | } | |
2138 | ||
2139 | void | |
2140 | pfr_detach_table(struct pfr_ktable *kt) | |
2141 | { | |
5ba3f43e | 2142 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
2143 | |
2144 | if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0) | |
2145 | printf("pfr_detach_table: refcount = %d.\n", | |
2146 | kt->pfrkt_refcnt[PFR_REFCNT_RULE]); | |
2147 | else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) | |
2148 | pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED); | |
2149 | } | |
2150 | ||
2151 | int | |
2152 | pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter, | |
2153 | struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af) | |
2154 | { | |
2155 | struct pfr_kentry *ke, *ke2; | |
2156 | struct pf_addr *addr; | |
2157 | union sockaddr_union mask; | |
2158 | int idx = -1, use_counter = 0; | |
2159 | ||
5ba3f43e | 2160 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
2161 | |
2162 | if (af == AF_INET) | |
2163 | addr = (struct pf_addr *)&pfr_sin.sin_addr; | |
2164 | else if (af == AF_INET6) | |
2165 | addr = (struct pf_addr *)&pfr_sin6.sin6_addr; | |
2166 | else | |
2167 | return (-1); | |
2168 | ||
2169 | if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) | |
2170 | kt = kt->pfrkt_root; | |
2171 | if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) | |
2172 | return (-1); | |
2173 | ||
2174 | if (pidx != NULL) | |
2175 | idx = *pidx; | |
2176 | if (counter != NULL && idx >= 0) | |
2177 | use_counter = 1; | |
2178 | if (idx < 0) | |
2179 | idx = 0; | |
2180 | ||
2181 | _next_block: | |
2182 | ke = pfr_kentry_byidx(kt, idx, af); | |
2183 | if (ke == NULL) { | |
2184 | kt->pfrkt_nomatch++; | |
2185 | return (1); | |
2186 | } | |
2187 | pfr_prepare_network(&pfr_mask, af, ke->pfrke_net); | |
2188 | *raddr = SUNION2PF(&ke->pfrke_sa, af); | |
2189 | *rmask = SUNION2PF(&pfr_mask, af); | |
2190 | ||
2191 | if (use_counter) { | |
2192 | /* is supplied address within block? */ | |
2193 | if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) { | |
2194 | /* no, go to next block in table */ | |
2195 | idx++; | |
2196 | use_counter = 0; | |
2197 | goto _next_block; | |
2198 | } | |
2199 | PF_ACPY(addr, counter, af); | |
2200 | } else { | |
2201 | /* use first address of block */ | |
2202 | PF_ACPY(addr, *raddr, af); | |
2203 | } | |
2204 | ||
2205 | if (!KENTRY_NETWORK(ke)) { | |
2206 | /* this is a single IP address - no possible nested block */ | |
2207 | PF_ACPY(counter, addr, af); | |
2208 | *pidx = idx; | |
2209 | kt->pfrkt_match++; | |
2210 | return (0); | |
2211 | } | |
2212 | for (;;) { | |
2213 | /* we don't want to use a nested block */ | |
2214 | if (af == AF_INET) | |
2215 | ke2 = (struct pfr_kentry *)rn_match(&pfr_sin, | |
2216 | kt->pfrkt_ip4); | |
2217 | else if (af == AF_INET6) | |
2218 | ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6, | |
2219 | kt->pfrkt_ip6); | |
2220 | else | |
2221 | return (-1); /* never happens */ | |
2222 | /* no need to check KENTRY_RNF_ROOT() here */ | |
2223 | if (ke2 == ke) { | |
2224 | /* lookup return the same block - perfect */ | |
2225 | PF_ACPY(counter, addr, af); | |
2226 | *pidx = idx; | |
2227 | kt->pfrkt_match++; | |
2228 | return (0); | |
2229 | } | |
2230 | ||
2231 | /* we need to increase the counter past the nested block */ | |
2232 | pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net); | |
2233 | PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af); | |
2234 | PF_AINC(addr, af); | |
2235 | if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) { | |
2236 | /* ok, we reached the end of our main block */ | |
2237 | /* go to next block in table */ | |
2238 | idx++; | |
2239 | use_counter = 0; | |
2240 | goto _next_block; | |
2241 | } | |
2242 | } | |
2243 | } | |
2244 | ||
d1ecb069 | 2245 | static struct pfr_kentry * |
b0d623f7 A |
2246 | pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af) |
2247 | { | |
2248 | struct pfr_walktree w; | |
2249 | ||
5ba3f43e | 2250 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
2251 | |
2252 | bzero(&w, sizeof (w)); | |
2253 | w.pfrw_op = PFRW_POOL_GET; | |
2254 | w.pfrw_cnt = idx; | |
2255 | ||
2256 | switch (af) { | |
2257 | #if INET | |
2258 | case AF_INET: | |
2259 | (void) kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, | |
2260 | pfr_walktree, &w); | |
2261 | return (w.pfrw_kentry); | |
2262 | #endif /* INET */ | |
2263 | #if INET6 | |
2264 | case AF_INET6: | |
2265 | (void) kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, | |
2266 | pfr_walktree, &w); | |
2267 | return (w.pfrw_kentry); | |
2268 | #endif /* INET6 */ | |
2269 | default: | |
2270 | return (NULL); | |
2271 | } | |
2272 | } | |
2273 | ||
2274 | void | |
2275 | pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn) | |
2276 | { | |
2277 | struct pfr_walktree w; | |
2278 | ||
5ba3f43e | 2279 | LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); |
b0d623f7 A |
2280 | |
2281 | bzero(&w, sizeof (w)); | |
2282 | w.pfrw_op = PFRW_DYNADDR_UPDATE; | |
2283 | w.pfrw_dyn = dyn; | |
2284 | ||
2285 | dyn->pfid_acnt4 = 0; | |
2286 | dyn->pfid_acnt6 = 0; | |
2287 | if (!dyn->pfid_af || dyn->pfid_af == AF_INET) | |
2288 | (void) kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, | |
2289 | pfr_walktree, &w); | |
2290 | if (!dyn->pfid_af || dyn->pfid_af == AF_INET6) | |
2291 | (void) kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, | |
2292 | pfr_walktree, &w); | |
2293 | } |