]>
Commit | Line | Data |
---|---|---|
b0d623f7 | 1 | /* |
d1ecb069 | 2 | * Copyright (c) 2007-2009 Apple Inc. All rights reserved. |
b0d623f7 A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
d1ecb069 | 29 | /* $apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */ |
b0d623f7 A |
30 | /* $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */ |
31 | ||
32 | /* | |
33 | * Copyright (c) 2001 Daniel Hartmeier | |
34 | * Copyright (c) 2002,2003 Henning Brauer | |
35 | * All rights reserved. | |
36 | * | |
37 | * Redistribution and use in source and binary forms, with or without | |
38 | * modification, are permitted provided that the following conditions | |
39 | * are met: | |
40 | * | |
41 | * - Redistributions of source code must retain the above copyright | |
42 | * notice, this list of conditions and the following disclaimer. | |
43 | * - Redistributions in binary form must reproduce the above | |
44 | * copyright notice, this list of conditions and the following | |
45 | * disclaimer in the documentation and/or other materials provided | |
46 | * with the distribution. | |
47 | * | |
48 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
49 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
50 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
51 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | |
52 | * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | |
53 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | |
54 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
55 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | |
56 | * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
57 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | |
58 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
59 | * POSSIBILITY OF SUCH DAMAGE. | |
60 | * | |
61 | * Effort sponsored in part by the Defense Advanced Research Projects | |
62 | * Agency (DARPA) and Air Force Research Laboratory, Air Force | |
63 | * Materiel Command, USAF, under agreement number F30602-01-2-0537. | |
64 | * | |
65 | */ | |
66 | ||
67 | #include <machine/endian.h> | |
68 | #include <sys/param.h> | |
69 | #include <sys/systm.h> | |
70 | #include <sys/mbuf.h> | |
71 | #include <sys/filio.h> | |
72 | #include <sys/fcntl.h> | |
73 | #include <sys/socket.h> | |
74 | #include <sys/socketvar.h> | |
75 | #include <sys/kernel.h> | |
76 | #include <sys/time.h> | |
77 | #include <sys/proc_internal.h> | |
78 | #include <sys/malloc.h> | |
79 | #include <sys/kauth.h> | |
80 | #include <sys/conf.h> | |
81 | #include <sys/mcache.h> | |
82 | ||
83 | #include <mach/vm_param.h> | |
84 | ||
85 | #include <net/if.h> | |
86 | #include <net/if_types.h> | |
87 | #include <net/route.h> | |
88 | ||
89 | #include <netinet/in.h> | |
90 | #include <netinet/in_var.h> | |
91 | #include <netinet/in_systm.h> | |
92 | #include <netinet/ip.h> | |
93 | #include <netinet/ip_var.h> | |
94 | #include <netinet/ip_icmp.h> | |
95 | #include <netinet/if_ether.h> | |
96 | ||
97 | #include <libkern/crypto/md5.h> | |
98 | ||
99 | #include <miscfs/devfs/devfs.h> | |
100 | ||
101 | #include <net/pfvar.h> | |
102 | ||
103 | #if NPFSYNC | |
104 | #include <net/if_pfsync.h> | |
105 | #endif /* NPFSYNC */ | |
106 | ||
107 | #if PFLOG | |
108 | #include <net/if_pflog.h> | |
109 | #endif /* PFLOG */ | |
110 | ||
111 | #if INET6 | |
112 | #include <netinet/ip6.h> | |
113 | #include <netinet/in_pcb.h> | |
114 | #endif /* INET6 */ | |
115 | ||
116 | #if ALTQ | |
117 | #include <altq/altq.h> | |
118 | #endif /* ALTQ */ | |
119 | ||
120 | #if 0 | |
121 | static void pfdetach(void); | |
122 | #endif | |
123 | static int pfopen(dev_t, int, int, struct proc *); | |
124 | static int pfclose(dev_t, int, int, struct proc *); | |
125 | static int pfioctl(dev_t, u_long, caddr_t, int, struct proc *); | |
126 | static struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t, | |
127 | u_int8_t, u_int8_t, u_int8_t); | |
128 | ||
129 | static void pf_mv_pool(struct pf_palist *, struct pf_palist *); | |
130 | static void pf_empty_pool(struct pf_palist *); | |
131 | #if ALTQ | |
132 | static int pf_begin_altq(u_int32_t *); | |
133 | static int pf_rollback_altq(u_int32_t); | |
134 | static int pf_commit_altq(u_int32_t); | |
135 | static int pf_enable_altq(struct pf_altq *); | |
136 | static int pf_disable_altq(struct pf_altq *); | |
137 | #endif /* ALTQ */ | |
138 | static int pf_begin_rules(u_int32_t *, int, const char *); | |
139 | static int pf_rollback_rules(u_int32_t, int, char *); | |
140 | static int pf_setup_pfsync_matching(struct pf_ruleset *); | |
141 | static void pf_hash_rule(MD5_CTX *, struct pf_rule *); | |
142 | #ifndef NO_APPLE_EXTENSIONS | |
143 | static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *, u_int8_t); | |
144 | #else | |
145 | static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); | |
146 | #endif | |
147 | static int pf_commit_rules(u_int32_t, int, char *); | |
148 | static void pf_state_export(struct pfsync_state *, struct pf_state_key *, | |
149 | struct pf_state *); | |
150 | static void pf_state_import(struct pfsync_state *, struct pf_state_key *, | |
151 | struct pf_state *); | |
152 | ||
153 | #define PF_CDEV_MAJOR (-1) | |
154 | ||
155 | static struct cdevsw pf_cdevsw = { | |
156 | /* open */ pfopen, | |
157 | /* close */ pfclose, | |
158 | /* read */ eno_rdwrt, | |
159 | /* write */ eno_rdwrt, | |
160 | /* ioctl */ pfioctl, | |
161 | /* stop */ eno_stop, | |
162 | /* reset */ eno_reset, | |
163 | /* tty */ NULL, | |
164 | /* select */ eno_select, | |
165 | /* mmap */ eno_mmap, | |
166 | /* strategy */ eno_strat, | |
167 | /* getc */ eno_getc, | |
168 | /* putc */ eno_putc, | |
169 | /* type */ 0 | |
170 | }; | |
171 | ||
172 | static void pf_attach_hooks(void); | |
d1ecb069 A |
173 | #if 0 |
174 | /* currently unused along with pfdetach() */ | |
b0d623f7 | 175 | static void pf_detach_hooks(void); |
d1ecb069 A |
176 | #endif |
177 | ||
178 | /* | |
179 | * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer, | |
180 | * and used in pf_af_hook() for performance optimization, such that packets | |
181 | * will enter pf_test() or pf_test6() only when PF is running. | |
182 | */ | |
183 | static int pf_is_enabled; | |
b0d623f7 A |
184 | |
185 | struct pf_rule pf_default_rule; | |
186 | #if ALTQ | |
187 | static int pf_altq_running; | |
188 | #endif /* ALTQ */ | |
189 | ||
190 | #define TAGID_MAX 50000 | |
191 | static TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = | |
192 | TAILQ_HEAD_INITIALIZER(pf_tags); | |
193 | #if ALTQ | |
194 | static TAILQ_HEAD(pf_tags, pf_tagname) pf_qids = | |
195 | TAILQ_HEAD_INITIALIZER(pf_qids); | |
196 | #endif /* ALTQ */ | |
197 | ||
198 | #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) | |
199 | #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE | |
200 | #endif | |
201 | static u_int16_t tagname2tag(struct pf_tags *, char *); | |
202 | static void tag2tagname(struct pf_tags *, u_int16_t, char *); | |
203 | static void tag_unref(struct pf_tags *, u_int16_t); | |
204 | static int pf_rtlabel_add(struct pf_addr_wrap *); | |
205 | static void pf_rtlabel_remove(struct pf_addr_wrap *); | |
206 | static void pf_rtlabel_copyout(struct pf_addr_wrap *); | |
207 | ||
208 | #if INET | |
209 | static int pf_inet_hook(struct ifnet *, struct mbuf **, int); | |
210 | #endif /* INET */ | |
211 | #if INET6 | |
212 | static int pf_inet6_hook(struct ifnet *, struct mbuf **, int); | |
213 | #endif /* INET6 */ | |
214 | ||
215 | #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x | |
216 | ||
d1ecb069 A |
217 | #define PF_USER_ADDR(a, s, f) \ |
218 | (proc_is64bit(current_proc()) ? \ | |
219 | ((struct s##_64 *)a)->f : ((struct s##_32 *)a)->f) | |
220 | ||
b0d623f7 A |
221 | static lck_attr_t *pf_perim_lock_attr; |
222 | static lck_grp_t *pf_perim_lock_grp; | |
223 | static lck_grp_attr_t *pf_perim_lock_grp_attr; | |
224 | ||
225 | static lck_attr_t *pf_lock_attr; | |
226 | static lck_grp_t *pf_lock_grp; | |
227 | static lck_grp_attr_t *pf_lock_grp_attr; | |
228 | ||
229 | struct thread *pf_purge_thread; | |
230 | ||
231 | extern void pfi_kifaddr_update(void *); | |
232 | ||
233 | void | |
234 | pfinit(void) | |
235 | { | |
236 | u_int32_t *t = pf_default_rule.timeout; | |
237 | int maj; | |
238 | ||
239 | pf_perim_lock_grp_attr = lck_grp_attr_alloc_init(); | |
240 | pf_perim_lock_grp = lck_grp_alloc_init("pf_perim", | |
241 | pf_perim_lock_grp_attr); | |
242 | pf_perim_lock_attr = lck_attr_alloc_init(); | |
243 | pf_perim_lock = lck_rw_alloc_init(pf_perim_lock_grp, | |
244 | pf_perim_lock_attr); | |
245 | ||
246 | pf_lock_grp_attr = lck_grp_attr_alloc_init(); | |
247 | pf_lock_grp = lck_grp_alloc_init("pf", pf_lock_grp_attr); | |
248 | pf_lock_attr = lck_attr_alloc_init(); | |
249 | pf_lock = lck_mtx_alloc_init(pf_lock_grp, pf_lock_attr); | |
250 | ||
251 | pool_init(&pf_rule_pl, sizeof (struct pf_rule), 0, 0, 0, "pfrulepl", | |
252 | NULL); | |
253 | pool_init(&pf_src_tree_pl, sizeof (struct pf_src_node), 0, 0, 0, | |
254 | "pfsrctrpl", NULL); | |
255 | pool_init(&pf_state_pl, sizeof (struct pf_state), 0, 0, 0, "pfstatepl", | |
256 | NULL); | |
257 | pool_init(&pf_state_key_pl, sizeof (struct pf_state_key), 0, 0, 0, | |
258 | "pfstatekeypl", NULL); | |
259 | #ifndef NO_APPLE_EXTENSIONS | |
260 | pool_init(&pf_app_state_pl, sizeof (struct pf_app_state), 0, 0, 0, | |
261 | "pfappstatepl", NULL); | |
262 | #endif | |
263 | #if ALTQ | |
264 | pool_init(&pf_altq_pl, sizeof (struct pf_altq), 0, 0, 0, "pfaltqpl", | |
265 | NULL); | |
266 | #endif /* ALTQ */ | |
267 | pool_init(&pf_pooladdr_pl, sizeof (struct pf_pooladdr), 0, 0, 0, | |
268 | "pfpooladdrpl", NULL); | |
269 | pfr_initialize(); | |
270 | pfi_initialize(); | |
271 | pf_osfp_initialize(); | |
272 | ||
273 | pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, | |
274 | pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); | |
275 | ||
276 | if (max_mem <= 256*1024*1024) | |
277 | pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = | |
278 | PFR_KENTRY_HIWAT_SMALL; | |
279 | ||
280 | RB_INIT(&tree_src_tracking); | |
281 | RB_INIT(&pf_anchors); | |
282 | pf_init_ruleset(&pf_main_ruleset); | |
283 | TAILQ_INIT(&pf_pabuf); | |
284 | TAILQ_INIT(&state_list); | |
285 | #if ALTQ | |
286 | TAILQ_INIT(&pf_altqs[0]); | |
287 | TAILQ_INIT(&pf_altqs[1]); | |
288 | pf_altqs_active = &pf_altqs[0]; | |
289 | pf_altqs_inactive = &pf_altqs[1]; | |
290 | #endif /* ALTQ */ | |
291 | ||
292 | /* default rule should never be garbage collected */ | |
293 | pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; | |
294 | pf_default_rule.action = PF_PASS; | |
295 | pf_default_rule.nr = -1; | |
296 | pf_default_rule.rtableid = IFSCOPE_NONE; | |
297 | ||
298 | /* initialize default timeouts */ | |
299 | t[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; | |
300 | t[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; | |
301 | t[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; | |
302 | t[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; | |
303 | t[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; | |
304 | t[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; | |
305 | t[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; | |
306 | t[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; | |
307 | t[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; | |
308 | t[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; | |
309 | t[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; | |
310 | #ifndef NO_APPLE_EXTENSIONS | |
311 | t[PFTM_GREv1_FIRST_PACKET] = PFTM_GREv1_FIRST_PACKET_VAL; | |
312 | t[PFTM_GREv1_INITIATING] = PFTM_GREv1_INITIATING_VAL; | |
313 | t[PFTM_GREv1_ESTABLISHED] = PFTM_GREv1_ESTABLISHED_VAL; | |
314 | t[PFTM_ESP_FIRST_PACKET] = PFTM_ESP_FIRST_PACKET_VAL; | |
315 | t[PFTM_ESP_INITIATING] = PFTM_ESP_INITIATING_VAL; | |
316 | t[PFTM_ESP_ESTABLISHED] = PFTM_ESP_ESTABLISHED_VAL; | |
317 | #endif | |
318 | t[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; | |
319 | t[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; | |
320 | t[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; | |
321 | t[PFTM_FRAG] = PFTM_FRAG_VAL; | |
322 | t[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; | |
323 | t[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; | |
324 | t[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; | |
325 | t[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; | |
326 | t[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; | |
327 | ||
328 | pf_normalize_init(); | |
329 | bzero(&pf_status, sizeof (pf_status)); | |
330 | pf_status.debug = PF_DEBUG_URGENT; | |
331 | ||
332 | /* XXX do our best to avoid a conflict */ | |
333 | pf_status.hostid = random(); | |
334 | ||
335 | if (kernel_thread_start(pf_purge_thread_fn, NULL, | |
336 | &pf_purge_thread) != 0) { | |
337 | printf("%s: unable to start purge thread!", __func__); | |
338 | return; | |
339 | } | |
340 | ||
341 | maj = cdevsw_add(PF_CDEV_MAJOR, &pf_cdevsw); | |
342 | if (maj == -1) { | |
343 | printf("%s: failed to allocate major number!\n", __func__); | |
344 | return; | |
345 | } | |
346 | (void) devfs_make_node(makedev(maj, 0), DEVFS_CHAR, | |
347 | UID_ROOT, GID_WHEEL, 0600, "pf", 0); | |
d1ecb069 A |
348 | |
349 | pf_attach_hooks(); | |
b0d623f7 A |
350 | } |
351 | ||
352 | #if 0 | |
353 | static void | |
354 | pfdetach(void) | |
355 | { | |
356 | struct pf_anchor *anchor; | |
357 | struct pf_state *state; | |
358 | struct pf_src_node *node; | |
359 | struct pfioc_table pt; | |
360 | u_int32_t ticket; | |
361 | int i; | |
362 | char r = '\0'; | |
363 | ||
d1ecb069 A |
364 | pf_detach_hooks(); |
365 | ||
b0d623f7 A |
366 | pf_status.running = 0; |
367 | wakeup(pf_purge_thread_fn); | |
368 | ||
369 | /* clear the rulesets */ | |
370 | for (i = 0; i < PF_RULESET_MAX; i++) | |
371 | if (pf_begin_rules(&ticket, i, &r) == 0) | |
372 | pf_commit_rules(ticket, i, &r); | |
373 | #if ALTQ | |
374 | if (pf_begin_altq(&ticket) == 0) | |
375 | pf_commit_altq(ticket); | |
376 | #endif /* ALTQ */ | |
377 | ||
378 | /* clear states */ | |
379 | RB_FOREACH(state, pf_state_tree_id, &tree_id) { | |
380 | state->timeout = PFTM_PURGE; | |
381 | #if NPFSYNC | |
382 | state->sync_flags = PFSTATE_NOSYNC; | |
383 | #endif | |
384 | } | |
385 | pf_purge_expired_states(pf_status.states); | |
386 | ||
387 | #if NPFSYNC | |
388 | pfsync_clear_states(pf_status.hostid, NULL); | |
389 | #endif | |
390 | ||
391 | /* clear source nodes */ | |
392 | RB_FOREACH(state, pf_state_tree_id, &tree_id) { | |
393 | state->src_node = NULL; | |
394 | state->nat_src_node = NULL; | |
395 | } | |
396 | RB_FOREACH(node, pf_src_tree, &tree_src_tracking) { | |
397 | node->expire = 1; | |
398 | node->states = 0; | |
399 | } | |
400 | pf_purge_expired_src_nodes(); | |
401 | ||
402 | /* clear tables */ | |
403 | memset(&pt, '\0', sizeof (pt)); | |
404 | pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags); | |
405 | ||
406 | /* destroy anchors */ | |
407 | while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) { | |
408 | for (i = 0; i < PF_RULESET_MAX; i++) | |
409 | if (pf_begin_rules(&ticket, i, anchor->name) == 0) | |
410 | pf_commit_rules(ticket, i, anchor->name); | |
411 | } | |
412 | ||
413 | /* destroy main ruleset */ | |
414 | pf_remove_if_empty_ruleset(&pf_main_ruleset); | |
415 | ||
416 | /* destroy the pools */ | |
417 | pool_destroy(&pf_pooladdr_pl); | |
418 | #if ALTQ | |
419 | pool_destroy(&pf_altq_pl); | |
420 | #endif /* ALTQ */ | |
421 | pool_destroy(&pf_state_pl); | |
422 | pool_destroy(&pf_rule_pl); | |
423 | pool_destroy(&pf_src_tree_pl); | |
424 | ||
425 | /* destroy subsystems */ | |
426 | pf_normalize_destroy(); | |
427 | pf_osfp_destroy(); | |
428 | pfr_destroy(); | |
429 | pfi_destroy(); | |
430 | } | |
431 | #endif | |
432 | ||
433 | static int | |
434 | pfopen(dev_t dev, int flags, int fmt, struct proc *p) | |
435 | { | |
436 | #pragma unused(flags, fmt, p) | |
437 | if (minor(dev) >= 1) | |
438 | return (ENXIO); | |
439 | return (0); | |
440 | } | |
441 | ||
442 | static int | |
443 | pfclose(dev_t dev, int flags, int fmt, struct proc *p) | |
444 | { | |
445 | #pragma unused(flags, fmt, p) | |
446 | if (minor(dev) >= 1) | |
447 | return (ENXIO); | |
448 | return (0); | |
449 | } | |
450 | ||
451 | static struct pf_pool * | |
452 | pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action, | |
453 | u_int32_t rule_number, u_int8_t r_last, u_int8_t active, | |
454 | u_int8_t check_ticket) | |
455 | { | |
456 | struct pf_ruleset *ruleset; | |
457 | struct pf_rule *rule; | |
458 | int rs_num; | |
459 | ||
460 | ruleset = pf_find_ruleset(anchor); | |
461 | if (ruleset == NULL) | |
462 | return (NULL); | |
463 | rs_num = pf_get_ruleset_number(rule_action); | |
464 | if (rs_num >= PF_RULESET_MAX) | |
465 | return (NULL); | |
466 | if (active) { | |
467 | if (check_ticket && ticket != | |
468 | ruleset->rules[rs_num].active.ticket) | |
469 | return (NULL); | |
470 | if (r_last) | |
471 | rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, | |
472 | pf_rulequeue); | |
473 | else | |
474 | rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); | |
475 | } else { | |
476 | if (check_ticket && ticket != | |
477 | ruleset->rules[rs_num].inactive.ticket) | |
478 | return (NULL); | |
479 | if (r_last) | |
480 | rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, | |
481 | pf_rulequeue); | |
482 | else | |
483 | rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); | |
484 | } | |
485 | if (!r_last) { | |
486 | while ((rule != NULL) && (rule->nr != rule_number)) | |
487 | rule = TAILQ_NEXT(rule, entries); | |
488 | } | |
489 | if (rule == NULL) | |
490 | return (NULL); | |
491 | ||
492 | return (&rule->rpool); | |
493 | } | |
494 | ||
495 | static void | |
496 | pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) | |
497 | { | |
498 | struct pf_pooladdr *mv_pool_pa; | |
499 | ||
500 | while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { | |
501 | TAILQ_REMOVE(poola, mv_pool_pa, entries); | |
502 | TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); | |
503 | } | |
504 | } | |
505 | ||
506 | static void | |
507 | pf_empty_pool(struct pf_palist *poola) | |
508 | { | |
509 | struct pf_pooladdr *empty_pool_pa; | |
510 | ||
511 | while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) { | |
512 | pfi_dynaddr_remove(&empty_pool_pa->addr); | |
513 | pf_tbladdr_remove(&empty_pool_pa->addr); | |
514 | pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE); | |
515 | TAILQ_REMOVE(poola, empty_pool_pa, entries); | |
516 | pool_put(&pf_pooladdr_pl, empty_pool_pa); | |
517 | } | |
518 | } | |
519 | ||
520 | void | |
521 | pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) | |
522 | { | |
523 | if (rulequeue != NULL) { | |
524 | if (rule->states <= 0) { | |
525 | /* | |
526 | * XXX - we need to remove the table *before* detaching | |
527 | * the rule to make sure the table code does not delete | |
528 | * the anchor under our feet. | |
529 | */ | |
530 | pf_tbladdr_remove(&rule->src.addr); | |
531 | pf_tbladdr_remove(&rule->dst.addr); | |
532 | if (rule->overload_tbl) | |
533 | pfr_detach_table(rule->overload_tbl); | |
534 | } | |
535 | TAILQ_REMOVE(rulequeue, rule, entries); | |
536 | rule->entries.tqe_prev = NULL; | |
537 | rule->nr = -1; | |
538 | } | |
539 | ||
540 | if (rule->states > 0 || rule->src_nodes > 0 || | |
541 | rule->entries.tqe_prev != NULL) | |
542 | return; | |
543 | pf_tag_unref(rule->tag); | |
544 | pf_tag_unref(rule->match_tag); | |
545 | #if ALTQ | |
546 | if (rule->pqid != rule->qid) | |
547 | pf_qid_unref(rule->pqid); | |
548 | pf_qid_unref(rule->qid); | |
549 | #endif /* ALTQ */ | |
550 | pf_rtlabel_remove(&rule->src.addr); | |
551 | pf_rtlabel_remove(&rule->dst.addr); | |
552 | pfi_dynaddr_remove(&rule->src.addr); | |
553 | pfi_dynaddr_remove(&rule->dst.addr); | |
554 | if (rulequeue == NULL) { | |
555 | pf_tbladdr_remove(&rule->src.addr); | |
556 | pf_tbladdr_remove(&rule->dst.addr); | |
557 | if (rule->overload_tbl) | |
558 | pfr_detach_table(rule->overload_tbl); | |
559 | } | |
560 | pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE); | |
561 | pf_anchor_remove(rule); | |
562 | pf_empty_pool(&rule->rpool.list); | |
563 | pool_put(&pf_rule_pl, rule); | |
564 | } | |
565 | ||
566 | static u_int16_t | |
567 | tagname2tag(struct pf_tags *head, char *tagname) | |
568 | { | |
569 | struct pf_tagname *tag, *p = NULL; | |
570 | u_int16_t new_tagid = 1; | |
571 | ||
572 | TAILQ_FOREACH(tag, head, entries) | |
573 | if (strcmp(tagname, tag->name) == 0) { | |
574 | tag->ref++; | |
575 | return (tag->tag); | |
576 | } | |
577 | ||
578 | /* | |
579 | * to avoid fragmentation, we do a linear search from the beginning | |
580 | * and take the first free slot we find. if there is none or the list | |
581 | * is empty, append a new entry at the end. | |
582 | */ | |
583 | ||
584 | /* new entry */ | |
585 | if (!TAILQ_EMPTY(head)) | |
586 | for (p = TAILQ_FIRST(head); p != NULL && | |
587 | p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) | |
588 | new_tagid = p->tag + 1; | |
589 | ||
590 | if (new_tagid > TAGID_MAX) | |
591 | return (0); | |
592 | ||
593 | /* allocate and fill new struct pf_tagname */ | |
594 | tag = _MALLOC(sizeof (*tag), M_TEMP, M_WAITOK|M_ZERO); | |
595 | if (tag == NULL) | |
596 | return (0); | |
597 | strlcpy(tag->name, tagname, sizeof (tag->name)); | |
598 | tag->tag = new_tagid; | |
599 | tag->ref++; | |
600 | ||
601 | if (p != NULL) /* insert new entry before p */ | |
602 | TAILQ_INSERT_BEFORE(p, tag, entries); | |
603 | else /* either list empty or no free slot in between */ | |
604 | TAILQ_INSERT_TAIL(head, tag, entries); | |
605 | ||
606 | return (tag->tag); | |
607 | } | |
608 | ||
609 | static void | |
610 | tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) | |
611 | { | |
612 | struct pf_tagname *tag; | |
613 | ||
614 | TAILQ_FOREACH(tag, head, entries) | |
615 | if (tag->tag == tagid) { | |
616 | strlcpy(p, tag->name, PF_TAG_NAME_SIZE); | |
617 | return; | |
618 | } | |
619 | } | |
620 | ||
621 | static void | |
622 | tag_unref(struct pf_tags *head, u_int16_t tag) | |
623 | { | |
624 | struct pf_tagname *p, *next; | |
625 | ||
626 | if (tag == 0) | |
627 | return; | |
628 | ||
629 | for (p = TAILQ_FIRST(head); p != NULL; p = next) { | |
630 | next = TAILQ_NEXT(p, entries); | |
631 | if (tag == p->tag) { | |
632 | if (--p->ref == 0) { | |
633 | TAILQ_REMOVE(head, p, entries); | |
634 | _FREE(p, M_TEMP); | |
635 | } | |
636 | break; | |
637 | } | |
638 | } | |
639 | } | |
640 | ||
641 | u_int16_t | |
642 | pf_tagname2tag(char *tagname) | |
643 | { | |
644 | return (tagname2tag(&pf_tags, tagname)); | |
645 | } | |
646 | ||
647 | void | |
648 | pf_tag2tagname(u_int16_t tagid, char *p) | |
649 | { | |
650 | tag2tagname(&pf_tags, tagid, p); | |
651 | } | |
652 | ||
653 | void | |
654 | pf_tag_ref(u_int16_t tag) | |
655 | { | |
656 | struct pf_tagname *t; | |
657 | ||
658 | TAILQ_FOREACH(t, &pf_tags, entries) | |
659 | if (t->tag == tag) | |
660 | break; | |
661 | if (t != NULL) | |
662 | t->ref++; | |
663 | } | |
664 | ||
665 | void | |
666 | pf_tag_unref(u_int16_t tag) | |
667 | { | |
668 | tag_unref(&pf_tags, tag); | |
669 | } | |
670 | ||
671 | static int | |
672 | pf_rtlabel_add(struct pf_addr_wrap *a) | |
673 | { | |
674 | #pragma unused(a) | |
675 | return (0); | |
676 | } | |
677 | ||
678 | static void | |
679 | pf_rtlabel_remove(struct pf_addr_wrap *a) | |
680 | { | |
681 | #pragma unused(a) | |
682 | } | |
683 | ||
684 | static void | |
685 | pf_rtlabel_copyout(struct pf_addr_wrap *a) | |
686 | { | |
687 | #pragma unused(a) | |
688 | } | |
689 | ||
690 | #if ALTQ | |
691 | u_int32_t | |
692 | pf_qname2qid(char *qname) | |
693 | { | |
694 | return ((u_int32_t)tagname2tag(&pf_qids, qname)); | |
695 | } | |
696 | ||
697 | void | |
698 | pf_qid2qname(u_int32_t qid, char *p) | |
699 | { | |
700 | tag2tagname(&pf_qids, (u_int16_t)qid, p); | |
701 | } | |
702 | ||
703 | void | |
704 | pf_qid_unref(u_int32_t qid) | |
705 | { | |
706 | tag_unref(&pf_qids, (u_int16_t)qid); | |
707 | } | |
708 | ||
709 | static int | |
710 | pf_begin_altq(u_int32_t *ticket) | |
711 | { | |
712 | struct pf_altq *altq; | |
713 | int error = 0; | |
714 | ||
715 | /* Purge the old altq list */ | |
716 | while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { | |
717 | TAILQ_REMOVE(pf_altqs_inactive, altq, entries); | |
718 | if (altq->qname[0] == 0) { | |
719 | /* detach and destroy the discipline */ | |
720 | error = altq_remove(altq); | |
721 | } else | |
722 | pf_qid_unref(altq->qid); | |
723 | pool_put(&pf_altq_pl, altq); | |
724 | } | |
725 | if (error) | |
726 | return (error); | |
727 | *ticket = ++ticket_altqs_inactive; | |
728 | altqs_inactive_open = 1; | |
729 | return (0); | |
730 | } | |
731 | ||
732 | static int | |
733 | pf_rollback_altq(u_int32_t ticket) | |
734 | { | |
735 | struct pf_altq *altq; | |
736 | int error = 0; | |
737 | ||
738 | if (!altqs_inactive_open || ticket != ticket_altqs_inactive) | |
739 | return (0); | |
740 | /* Purge the old altq list */ | |
741 | while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { | |
742 | TAILQ_REMOVE(pf_altqs_inactive, altq, entries); | |
743 | if (altq->qname[0] == 0) { | |
744 | /* detach and destroy the discipline */ | |
745 | error = altq_remove(altq); | |
746 | } else | |
747 | pf_qid_unref(altq->qid); | |
748 | pool_put(&pf_altq_pl, altq); | |
749 | } | |
750 | altqs_inactive_open = 0; | |
751 | return (error); | |
752 | } | |
753 | ||
754 | static int | |
755 | pf_commit_altq(u_int32_t ticket) | |
756 | { | |
757 | struct pf_altqqueue *old_altqs; | |
758 | struct pf_altq *altq; | |
759 | int s, err, error = 0; | |
760 | ||
761 | if (!altqs_inactive_open || ticket != ticket_altqs_inactive) | |
762 | return (EBUSY); | |
763 | ||
764 | /* swap altqs, keep the old. */ | |
765 | s = splnet(); | |
766 | old_altqs = pf_altqs_active; | |
767 | pf_altqs_active = pf_altqs_inactive; | |
768 | pf_altqs_inactive = old_altqs; | |
769 | ticket_altqs_active = ticket_altqs_inactive; | |
770 | ||
771 | /* Attach new disciplines */ | |
772 | TAILQ_FOREACH(altq, pf_altqs_active, entries) { | |
773 | if (altq->qname[0] == 0) { | |
774 | /* attach the discipline */ | |
775 | error = altq_pfattach(altq); | |
776 | if (error == 0 && pf_altq_running) | |
777 | error = pf_enable_altq(altq); | |
778 | if (error != 0) { | |
779 | splx(s); | |
780 | return (error); | |
781 | } | |
782 | } | |
783 | } | |
784 | ||
785 | /* Purge the old altq list */ | |
786 | while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { | |
787 | TAILQ_REMOVE(pf_altqs_inactive, altq, entries); | |
788 | if (altq->qname[0] == 0) { | |
789 | /* detach and destroy the discipline */ | |
790 | if (pf_altq_running) | |
791 | error = pf_disable_altq(altq); | |
792 | err = altq_pfdetach(altq); | |
793 | if (err != 0 && error == 0) | |
794 | error = err; | |
795 | err = altq_remove(altq); | |
796 | if (err != 0 && error == 0) | |
797 | error = err; | |
798 | } else | |
799 | pf_qid_unref(altq->qid); | |
800 | pool_put(&pf_altq_pl, altq); | |
801 | } | |
802 | splx(s); | |
803 | ||
804 | altqs_inactive_open = 0; | |
805 | return (error); | |
806 | } | |
807 | ||
808 | static int | |
809 | pf_enable_altq(struct pf_altq *altq) | |
810 | { | |
811 | struct ifnet *ifp; | |
812 | struct tb_profile tb; | |
813 | int s, error = 0; | |
814 | ||
815 | if ((ifp = ifunit(altq->ifname)) == NULL) | |
816 | return (EINVAL); | |
817 | ||
818 | if (ifp->if_snd.altq_type != ALTQT_NONE) | |
819 | error = altq_enable(&ifp->if_snd); | |
820 | ||
821 | /* set tokenbucket regulator */ | |
822 | if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { | |
823 | tb.rate = altq->ifbandwidth; | |
824 | tb.depth = altq->tbrsize; | |
825 | s = splnet(); | |
826 | error = tbr_set(&ifp->if_snd, &tb); | |
827 | splx(s); | |
828 | } | |
829 | ||
830 | return (error); | |
831 | } | |
832 | ||
833 | static int | |
834 | pf_disable_altq(struct pf_altq *altq) | |
835 | { | |
836 | struct ifnet *ifp; | |
837 | struct tb_profile tb; | |
838 | int s, error; | |
839 | ||
840 | if ((ifp = ifunit(altq->ifname)) == NULL) | |
841 | return (EINVAL); | |
842 | ||
843 | /* | |
844 | * when the discipline is no longer referenced, it was overridden | |
845 | * by a new one. if so, just return. | |
846 | */ | |
847 | if (altq->altq_disc != ifp->if_snd.altq_disc) | |
848 | return (0); | |
849 | ||
850 | error = altq_disable(&ifp->if_snd); | |
851 | ||
852 | if (error == 0) { | |
853 | /* clear tokenbucket regulator */ | |
854 | tb.rate = 0; | |
855 | s = splnet(); | |
856 | error = tbr_set(&ifp->if_snd, &tb); | |
857 | splx(s); | |
858 | } | |
859 | ||
860 | return (error); | |
861 | } | |
862 | #endif /* ALTQ */ | |
863 | ||
864 | static int | |
865 | pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) | |
866 | { | |
867 | struct pf_ruleset *rs; | |
868 | struct pf_rule *rule; | |
869 | ||
870 | if (rs_num < 0 || rs_num >= PF_RULESET_MAX) | |
871 | return (EINVAL); | |
872 | rs = pf_find_or_create_ruleset(anchor); | |
873 | if (rs == NULL) | |
874 | return (EINVAL); | |
875 | while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { | |
876 | pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); | |
877 | rs->rules[rs_num].inactive.rcount--; | |
878 | } | |
879 | *ticket = ++rs->rules[rs_num].inactive.ticket; | |
880 | rs->rules[rs_num].inactive.open = 1; | |
881 | return (0); | |
882 | } | |
883 | ||
884 | static int | |
885 | pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) | |
886 | { | |
887 | struct pf_ruleset *rs; | |
888 | struct pf_rule *rule; | |
889 | ||
890 | if (rs_num < 0 || rs_num >= PF_RULESET_MAX) | |
891 | return (EINVAL); | |
892 | rs = pf_find_ruleset(anchor); | |
893 | if (rs == NULL || !rs->rules[rs_num].inactive.open || | |
894 | rs->rules[rs_num].inactive.ticket != ticket) | |
895 | return (0); | |
896 | while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { | |
897 | pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); | |
898 | rs->rules[rs_num].inactive.rcount--; | |
899 | } | |
900 | rs->rules[rs_num].inactive.open = 0; | |
901 | return (0); | |
902 | } | |
903 | ||
904 | #define PF_MD5_UPD(st, elm) \ | |
905 | MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm)) | |
906 | ||
907 | #define PF_MD5_UPD_STR(st, elm) \ | |
908 | MD5Update(ctx, (u_int8_t *)(st)->elm, strlen((st)->elm)) | |
909 | ||
910 | #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ | |
911 | (stor) = htonl((st)->elm); \ | |
912 | MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t)); \ | |
913 | } while (0) | |
914 | ||
915 | #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ | |
916 | (stor) = htons((st)->elm); \ | |
917 | MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t)); \ | |
918 | } while (0) | |
919 | ||
920 | #ifndef NO_APPLE_EXTENSIONS | |
921 | static void | |
922 | pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr, u_int8_t proto) | |
923 | #else | |
924 | static void | |
925 | pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) | |
926 | #endif | |
927 | { | |
928 | PF_MD5_UPD(pfr, addr.type); | |
929 | switch (pfr->addr.type) { | |
930 | case PF_ADDR_DYNIFTL: | |
931 | PF_MD5_UPD(pfr, addr.v.ifname); | |
932 | PF_MD5_UPD(pfr, addr.iflags); | |
933 | break; | |
934 | case PF_ADDR_TABLE: | |
935 | PF_MD5_UPD(pfr, addr.v.tblname); | |
936 | break; | |
937 | case PF_ADDR_ADDRMASK: | |
938 | /* XXX ignore af? */ | |
939 | PF_MD5_UPD(pfr, addr.v.a.addr.addr32); | |
940 | PF_MD5_UPD(pfr, addr.v.a.mask.addr32); | |
941 | break; | |
942 | case PF_ADDR_RTLABEL: | |
943 | PF_MD5_UPD(pfr, addr.v.rtlabelname); | |
944 | break; | |
945 | } | |
946 | ||
947 | #ifndef NO_APPLE_EXTENSIONS | |
948 | switch (proto) { | |
949 | case IPPROTO_TCP: | |
950 | case IPPROTO_UDP: | |
951 | PF_MD5_UPD(pfr, xport.range.port[0]); | |
952 | PF_MD5_UPD(pfr, xport.range.port[1]); | |
953 | PF_MD5_UPD(pfr, xport.range.op); | |
954 | break; | |
955 | ||
956 | default: | |
957 | break; | |
958 | } | |
959 | ||
960 | PF_MD5_UPD(pfr, neg); | |
961 | #else | |
962 | PF_MD5_UPD(pfr, port[0]); | |
963 | PF_MD5_UPD(pfr, port[1]); | |
964 | PF_MD5_UPD(pfr, neg); | |
965 | PF_MD5_UPD(pfr, port_op); | |
966 | #endif | |
967 | } | |
968 | ||
969 | static void | |
970 | pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule) | |
971 | { | |
972 | u_int16_t x; | |
973 | u_int32_t y; | |
974 | ||
975 | #ifndef NO_APPLE_EXTENSIONS | |
976 | pf_hash_rule_addr(ctx, &rule->src, rule->proto); | |
977 | pf_hash_rule_addr(ctx, &rule->dst, rule->proto); | |
978 | #else | |
979 | pf_hash_rule_addr(ctx, &rule->src); | |
980 | pf_hash_rule_addr(ctx, &rule->dst); | |
981 | #endif | |
982 | PF_MD5_UPD_STR(rule, label); | |
983 | PF_MD5_UPD_STR(rule, ifname); | |
984 | PF_MD5_UPD_STR(rule, match_tagname); | |
985 | PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ | |
986 | PF_MD5_UPD_HTONL(rule, os_fingerprint, y); | |
987 | PF_MD5_UPD_HTONL(rule, prob, y); | |
988 | PF_MD5_UPD_HTONL(rule, uid.uid[0], y); | |
989 | PF_MD5_UPD_HTONL(rule, uid.uid[1], y); | |
990 | PF_MD5_UPD(rule, uid.op); | |
991 | PF_MD5_UPD_HTONL(rule, gid.gid[0], y); | |
992 | PF_MD5_UPD_HTONL(rule, gid.gid[1], y); | |
993 | PF_MD5_UPD(rule, gid.op); | |
994 | PF_MD5_UPD_HTONL(rule, rule_flag, y); | |
995 | PF_MD5_UPD(rule, action); | |
996 | PF_MD5_UPD(rule, direction); | |
997 | PF_MD5_UPD(rule, af); | |
998 | PF_MD5_UPD(rule, quick); | |
999 | PF_MD5_UPD(rule, ifnot); | |
1000 | PF_MD5_UPD(rule, match_tag_not); | |
1001 | PF_MD5_UPD(rule, natpass); | |
1002 | PF_MD5_UPD(rule, keep_state); | |
1003 | PF_MD5_UPD(rule, proto); | |
1004 | PF_MD5_UPD(rule, type); | |
1005 | PF_MD5_UPD(rule, code); | |
1006 | PF_MD5_UPD(rule, flags); | |
1007 | PF_MD5_UPD(rule, flagset); | |
1008 | PF_MD5_UPD(rule, allow_opts); | |
1009 | PF_MD5_UPD(rule, rt); | |
1010 | PF_MD5_UPD(rule, tos); | |
1011 | } | |
1012 | ||
1013 | static int | |
1014 | pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) | |
1015 | { | |
1016 | struct pf_ruleset *rs; | |
1017 | struct pf_rule *rule, **old_array; | |
1018 | struct pf_rulequeue *old_rules; | |
1019 | int error; | |
1020 | u_int32_t old_rcount; | |
1021 | ||
1022 | lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED); | |
1023 | ||
1024 | if (rs_num < 0 || rs_num >= PF_RULESET_MAX) | |
1025 | return (EINVAL); | |
1026 | rs = pf_find_ruleset(anchor); | |
1027 | if (rs == NULL || !rs->rules[rs_num].inactive.open || | |
1028 | ticket != rs->rules[rs_num].inactive.ticket) | |
1029 | return (EBUSY); | |
1030 | ||
1031 | /* Calculate checksum for the main ruleset */ | |
1032 | if (rs == &pf_main_ruleset) { | |
1033 | error = pf_setup_pfsync_matching(rs); | |
1034 | if (error != 0) | |
1035 | return (error); | |
1036 | } | |
1037 | ||
1038 | /* Swap rules, keep the old. */ | |
1039 | old_rules = rs->rules[rs_num].active.ptr; | |
1040 | old_rcount = rs->rules[rs_num].active.rcount; | |
1041 | old_array = rs->rules[rs_num].active.ptr_array; | |
1042 | ||
1043 | rs->rules[rs_num].active.ptr = | |
1044 | rs->rules[rs_num].inactive.ptr; | |
1045 | rs->rules[rs_num].active.ptr_array = | |
1046 | rs->rules[rs_num].inactive.ptr_array; | |
1047 | rs->rules[rs_num].active.rcount = | |
1048 | rs->rules[rs_num].inactive.rcount; | |
1049 | rs->rules[rs_num].inactive.ptr = old_rules; | |
1050 | rs->rules[rs_num].inactive.ptr_array = old_array; | |
1051 | rs->rules[rs_num].inactive.rcount = old_rcount; | |
1052 | ||
1053 | rs->rules[rs_num].active.ticket = | |
1054 | rs->rules[rs_num].inactive.ticket; | |
1055 | pf_calc_skip_steps(rs->rules[rs_num].active.ptr); | |
1056 | ||
1057 | ||
1058 | /* Purge the old rule list. */ | |
1059 | while ((rule = TAILQ_FIRST(old_rules)) != NULL) | |
1060 | pf_rm_rule(old_rules, rule); | |
1061 | if (rs->rules[rs_num].inactive.ptr_array) | |
1062 | _FREE(rs->rules[rs_num].inactive.ptr_array, M_TEMP); | |
1063 | rs->rules[rs_num].inactive.ptr_array = NULL; | |
1064 | rs->rules[rs_num].inactive.rcount = 0; | |
1065 | rs->rules[rs_num].inactive.open = 0; | |
1066 | pf_remove_if_empty_ruleset(rs); | |
1067 | return (0); | |
1068 | } | |
1069 | ||
1070 | static void | |
1071 | pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk, | |
1072 | struct pf_state *s) | |
1073 | { | |
1074 | uint64_t secs = pf_time_second(); | |
1075 | bzero(sp, sizeof (struct pfsync_state)); | |
1076 | ||
1077 | /* copy from state key */ | |
1078 | #ifndef NO_APPLE_EXTENSIONS | |
1079 | sp->lan.addr = sk->lan.addr; | |
1080 | sp->lan.xport = sk->lan.xport; | |
1081 | sp->gwy.addr = sk->gwy.addr; | |
1082 | sp->gwy.xport = sk->gwy.xport; | |
1083 | sp->ext.addr = sk->ext.addr; | |
1084 | sp->ext.xport = sk->ext.xport; | |
1085 | sp->proto_variant = sk->proto_variant; | |
1086 | sp->tag = s->tag; | |
1087 | #else | |
1088 | sp->lan.addr = sk->lan.addr; | |
1089 | sp->lan.port = sk->lan.port; | |
1090 | sp->gwy.addr = sk->gwy.addr; | |
1091 | sp->gwy.port = sk->gwy.port; | |
1092 | sp->ext.addr = sk->ext.addr; | |
1093 | sp->ext.port = sk->ext.port; | |
1094 | #endif | |
1095 | sp->proto = sk->proto; | |
1096 | sp->af = sk->af; | |
1097 | sp->direction = sk->direction; | |
1098 | ||
1099 | /* copy from state */ | |
1100 | memcpy(&sp->id, &s->id, sizeof (sp->id)); | |
1101 | sp->creatorid = s->creatorid; | |
1102 | strlcpy(sp->ifname, s->kif->pfik_name, sizeof (sp->ifname)); | |
1103 | pf_state_peer_to_pfsync(&s->src, &sp->src); | |
1104 | pf_state_peer_to_pfsync(&s->dst, &sp->dst); | |
1105 | ||
1106 | sp->rule = s->rule.ptr->nr; | |
1107 | sp->nat_rule = (s->nat_rule.ptr == NULL) ? | |
1108 | (unsigned)-1 : s->nat_rule.ptr->nr; | |
1109 | sp->anchor = (s->anchor.ptr == NULL) ? | |
1110 | (unsigned)-1 : s->anchor.ptr->nr; | |
1111 | ||
1112 | pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]); | |
1113 | pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]); | |
1114 | pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]); | |
1115 | pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]); | |
1116 | sp->creation = secs - s->creation; | |
1117 | sp->expire = pf_state_expires(s); | |
1118 | sp->log = s->log; | |
1119 | sp->allow_opts = s->allow_opts; | |
1120 | sp->timeout = s->timeout; | |
1121 | ||
1122 | if (s->src_node) | |
1123 | sp->sync_flags |= PFSYNC_FLAG_SRCNODE; | |
1124 | if (s->nat_src_node) | |
1125 | sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; | |
1126 | ||
1127 | if (sp->expire > secs) | |
1128 | sp->expire -= secs; | |
1129 | else | |
1130 | sp->expire = 0; | |
1131 | ||
1132 | } | |
1133 | ||
1134 | static void | |
1135 | pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk, | |
1136 | struct pf_state *s) | |
1137 | { | |
1138 | /* copy to state key */ | |
1139 | #ifndef NO_APPLE_EXTENSIONS | |
1140 | sk->lan.addr = sp->lan.addr; | |
1141 | sk->lan.xport = sp->lan.xport; | |
1142 | sk->gwy.addr = sp->gwy.addr; | |
1143 | sk->gwy.xport = sp->gwy.xport; | |
1144 | sk->ext.addr = sp->ext.addr; | |
1145 | sk->ext.xport = sp->ext.xport; | |
1146 | sk->proto_variant = sp->proto_variant; | |
1147 | s->tag = sp->tag; | |
1148 | #else | |
1149 | sk->lan.addr = sp->lan.addr; | |
1150 | sk->lan.port = sp->lan.port; | |
1151 | sk->gwy.addr = sp->gwy.addr; | |
1152 | sk->gwy.port = sp->gwy.port; | |
1153 | sk->ext.addr = sp->ext.addr; | |
1154 | sk->ext.port = sp->ext.port; | |
1155 | #endif | |
1156 | sk->proto = sp->proto; | |
1157 | sk->af = sp->af; | |
1158 | sk->direction = sp->direction; | |
1159 | ||
1160 | /* copy to state */ | |
1161 | memcpy(&s->id, &sp->id, sizeof (sp->id)); | |
1162 | s->creatorid = sp->creatorid; | |
1163 | pf_state_peer_from_pfsync(&sp->src, &s->src); | |
1164 | pf_state_peer_from_pfsync(&sp->dst, &s->dst); | |
1165 | ||
1166 | s->rule.ptr = &pf_default_rule; | |
1167 | s->nat_rule.ptr = NULL; | |
1168 | s->anchor.ptr = NULL; | |
1169 | s->rt_kif = NULL; | |
1170 | s->creation = pf_time_second(); | |
1171 | s->expire = pf_time_second(); | |
1172 | if (sp->expire > 0) | |
1173 | s->expire -= pf_default_rule.timeout[sp->timeout] - sp->expire; | |
1174 | s->pfsync_time = 0; | |
1175 | s->packets[0] = s->packets[1] = 0; | |
1176 | s->bytes[0] = s->bytes[1] = 0; | |
1177 | } | |
1178 | ||
1179 | static int | |
1180 | pf_setup_pfsync_matching(struct pf_ruleset *rs) | |
1181 | { | |
1182 | MD5_CTX ctx; | |
1183 | struct pf_rule *rule; | |
1184 | int rs_cnt; | |
1185 | u_int8_t digest[PF_MD5_DIGEST_LENGTH]; | |
1186 | ||
1187 | MD5Init(&ctx); | |
1188 | for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { | |
1189 | /* XXX PF_RULESET_SCRUB as well? */ | |
1190 | if (rs_cnt == PF_RULESET_SCRUB) | |
1191 | continue; | |
1192 | ||
1193 | if (rs->rules[rs_cnt].inactive.ptr_array) | |
1194 | _FREE(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); | |
1195 | rs->rules[rs_cnt].inactive.ptr_array = NULL; | |
1196 | ||
1197 | if (rs->rules[rs_cnt].inactive.rcount) { | |
1198 | rs->rules[rs_cnt].inactive.ptr_array = | |
1199 | _MALLOC(sizeof (caddr_t) * | |
1200 | rs->rules[rs_cnt].inactive.rcount, | |
1201 | M_TEMP, M_WAITOK); | |
1202 | ||
1203 | if (!rs->rules[rs_cnt].inactive.ptr_array) | |
1204 | return (ENOMEM); | |
1205 | } | |
1206 | ||
1207 | TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, | |
1208 | entries) { | |
1209 | pf_hash_rule(&ctx, rule); | |
1210 | (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; | |
1211 | } | |
1212 | } | |
1213 | ||
1214 | MD5Final(digest, &ctx); | |
1215 | memcpy(pf_status.pf_chksum, digest, sizeof (pf_status.pf_chksum)); | |
1216 | return (0); | |
1217 | } | |
1218 | ||
1219 | static int | |
1220 | pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) | |
1221 | { | |
1222 | #pragma unused(dev) | |
1223 | struct pf_pooladdr *pa = NULL; | |
1224 | struct pf_pool *pool = NULL; | |
1225 | int error = 0; | |
1226 | ||
1227 | if (kauth_cred_issuser(kauth_cred_get()) == 0) | |
1228 | return (EPERM); | |
1229 | ||
1230 | /* XXX keep in sync with switch() below */ | |
1231 | if (securelevel > 1) | |
1232 | switch (cmd) { | |
1233 | case DIOCGETRULES: | |
1234 | case DIOCGETRULE: | |
1235 | case DIOCGETADDRS: | |
1236 | case DIOCGETADDR: | |
1237 | case DIOCGETSTATE: | |
1238 | case DIOCSETSTATUSIF: | |
1239 | case DIOCGETSTATUS: | |
1240 | case DIOCCLRSTATUS: | |
1241 | case DIOCNATLOOK: | |
1242 | case DIOCSETDEBUG: | |
1243 | case DIOCGETSTATES: | |
1244 | case DIOCGETTIMEOUT: | |
1245 | case DIOCCLRRULECTRS: | |
1246 | case DIOCGETLIMIT: | |
1247 | case DIOCGETALTQS: | |
1248 | case DIOCGETALTQ: | |
1249 | case DIOCGETQSTATS: | |
1250 | case DIOCGETRULESETS: | |
1251 | case DIOCGETRULESET: | |
1252 | case DIOCRGETTABLES: | |
1253 | case DIOCRGETTSTATS: | |
1254 | case DIOCRCLRTSTATS: | |
1255 | case DIOCRCLRADDRS: | |
1256 | case DIOCRADDADDRS: | |
1257 | case DIOCRDELADDRS: | |
1258 | case DIOCRSETADDRS: | |
1259 | case DIOCRGETADDRS: | |
1260 | case DIOCRGETASTATS: | |
1261 | case DIOCRCLRASTATS: | |
1262 | case DIOCRTSTADDRS: | |
1263 | case DIOCOSFPGET: | |
1264 | case DIOCGETSRCNODES: | |
1265 | case DIOCCLRSRCNODES: | |
1266 | case DIOCIGETIFACES: | |
1267 | case DIOCSETIFFLAG: | |
1268 | case DIOCCLRIFFLAG: | |
1269 | break; | |
1270 | case DIOCRCLRTABLES: | |
1271 | case DIOCRADDTABLES: | |
1272 | case DIOCRDELTABLES: | |
1273 | case DIOCRSETTFLAGS: | |
1274 | if (((struct pfioc_table *)addr)->pfrio_flags & | |
1275 | PFR_FLAG_DUMMY) | |
1276 | break; /* dummy operation ok */ | |
1277 | return (EPERM); | |
1278 | default: | |
1279 | return (EPERM); | |
1280 | } | |
1281 | ||
1282 | if (!(flags & FWRITE)) | |
1283 | switch (cmd) { | |
1284 | case DIOCSTART: | |
1285 | case DIOCSTOP: | |
1286 | case DIOCGETRULES: | |
1287 | case DIOCGETADDRS: | |
1288 | case DIOCGETADDR: | |
1289 | case DIOCGETSTATE: | |
1290 | case DIOCGETSTATUS: | |
1291 | case DIOCGETSTATES: | |
1292 | case DIOCGETTIMEOUT: | |
1293 | case DIOCGETLIMIT: | |
1294 | case DIOCGETALTQS: | |
1295 | case DIOCGETALTQ: | |
1296 | case DIOCGETQSTATS: | |
1297 | case DIOCGETRULESETS: | |
1298 | case DIOCGETRULESET: | |
1299 | case DIOCNATLOOK: | |
1300 | case DIOCRGETTABLES: | |
1301 | case DIOCRGETTSTATS: | |
1302 | case DIOCRGETADDRS: | |
1303 | case DIOCRGETASTATS: | |
1304 | case DIOCRTSTADDRS: | |
1305 | case DIOCOSFPGET: | |
1306 | case DIOCGETSRCNODES: | |
1307 | case DIOCIGETIFACES: | |
1308 | break; | |
1309 | case DIOCRCLRTABLES: | |
1310 | case DIOCRADDTABLES: | |
1311 | case DIOCRDELTABLES: | |
1312 | case DIOCRCLRTSTATS: | |
1313 | case DIOCRCLRADDRS: | |
1314 | case DIOCRADDADDRS: | |
1315 | case DIOCRDELADDRS: | |
1316 | case DIOCRSETADDRS: | |
1317 | case DIOCRSETTFLAGS: | |
1318 | if (((struct pfioc_table *)addr)->pfrio_flags & | |
1319 | PFR_FLAG_DUMMY) { | |
1320 | flags |= FWRITE; /* need write lock for dummy */ | |
1321 | break; /* dummy operation ok */ | |
1322 | } | |
1323 | return (EACCES); | |
1324 | case DIOCGETRULE: | |
1325 | if (((struct pfioc_rule *)addr)->action == | |
1326 | PF_GET_CLR_CNTR) | |
1327 | return (EACCES); | |
1328 | break; | |
1329 | default: | |
1330 | return (EACCES); | |
1331 | } | |
1332 | ||
1333 | if (flags & FWRITE) | |
1334 | lck_rw_lock_exclusive(pf_perim_lock); | |
1335 | else | |
1336 | lck_rw_lock_shared(pf_perim_lock); | |
1337 | ||
1338 | lck_mtx_lock(pf_lock); | |
1339 | ||
1340 | switch (cmd) { | |
1341 | ||
1342 | case DIOCSTART: | |
1343 | if (pf_status.running) { | |
1344 | error = EEXIST; | |
1345 | } else if (pf_purge_thread == NULL) { | |
1346 | error = ENOMEM; | |
1347 | } else { | |
d1ecb069 | 1348 | pf_is_enabled = 1; |
b0d623f7 | 1349 | pf_status.running = 1; |
b7266188 | 1350 | pf_status.since = pf_calendar_time_second(); |
b0d623f7 A |
1351 | if (pf_status.stateid == 0) { |
1352 | pf_status.stateid = pf_time_second(); | |
1353 | pf_status.stateid = pf_status.stateid << 32; | |
1354 | } | |
1355 | mbuf_growth_aggressive(); | |
b0d623f7 A |
1356 | wakeup(pf_purge_thread_fn); |
1357 | DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); | |
1358 | } | |
1359 | break; | |
1360 | ||
1361 | case DIOCSTOP: | |
1362 | if (!pf_status.running) { | |
1363 | error = ENOENT; | |
1364 | } else { | |
1365 | mbuf_growth_normal(); | |
b0d623f7 | 1366 | pf_status.running = 0; |
d1ecb069 | 1367 | pf_is_enabled = 0; |
b7266188 | 1368 | pf_status.since = pf_calendar_time_second(); |
b0d623f7 A |
1369 | wakeup(pf_purge_thread_fn); |
1370 | DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); | |
1371 | } | |
1372 | break; | |
1373 | ||
1374 | case DIOCADDRULE: { | |
1375 | struct pfioc_rule *pr = (struct pfioc_rule *)addr; | |
1376 | struct pf_ruleset *ruleset; | |
1377 | struct pf_rule *rule, *tail; | |
1378 | struct pf_pooladdr *apa; | |
1379 | int rs_num; | |
1380 | ||
1381 | pr->anchor[sizeof (pr->anchor) - 1] = 0; | |
1382 | ruleset = pf_find_ruleset(pr->anchor); | |
1383 | if (ruleset == NULL) { | |
1384 | error = EINVAL; | |
1385 | break; | |
1386 | } | |
1387 | rs_num = pf_get_ruleset_number(pr->rule.action); | |
1388 | if (rs_num >= PF_RULESET_MAX) { | |
1389 | error = EINVAL; | |
1390 | break; | |
1391 | } | |
1392 | if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { | |
1393 | error = EINVAL; | |
1394 | break; | |
1395 | } | |
1396 | if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) { | |
1397 | error = EBUSY; | |
1398 | break; | |
1399 | } | |
1400 | if (pr->pool_ticket != ticket_pabuf) { | |
1401 | error = EBUSY; | |
1402 | break; | |
1403 | } | |
1404 | rule = pool_get(&pf_rule_pl, PR_WAITOK); | |
1405 | if (rule == NULL) { | |
1406 | error = ENOMEM; | |
1407 | break; | |
1408 | } | |
1409 | bcopy(&pr->rule, rule, sizeof (struct pf_rule)); | |
1410 | rule->cuid = kauth_cred_getuid(p->p_ucred); | |
1411 | rule->cpid = p->p_pid; | |
1412 | rule->anchor = NULL; | |
1413 | rule->kif = NULL; | |
1414 | TAILQ_INIT(&rule->rpool.list); | |
1415 | /* initialize refcounting */ | |
1416 | rule->states = 0; | |
1417 | rule->src_nodes = 0; | |
1418 | rule->entries.tqe_prev = NULL; | |
1419 | #if !INET | |
1420 | if (rule->af == AF_INET) { | |
1421 | pool_put(&pf_rule_pl, rule); | |
1422 | error = EAFNOSUPPORT; | |
1423 | break; | |
1424 | } | |
1425 | #endif /* INET */ | |
1426 | #if !INET6 | |
1427 | if (rule->af == AF_INET6) { | |
1428 | pool_put(&pf_rule_pl, rule); | |
1429 | error = EAFNOSUPPORT; | |
1430 | break; | |
1431 | } | |
1432 | #endif /* INET6 */ | |
1433 | tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, | |
1434 | pf_rulequeue); | |
1435 | if (tail) | |
1436 | rule->nr = tail->nr + 1; | |
1437 | else | |
1438 | rule->nr = 0; | |
1439 | if (rule->ifname[0]) { | |
1440 | rule->kif = pfi_kif_get(rule->ifname); | |
1441 | if (rule->kif == NULL) { | |
1442 | pool_put(&pf_rule_pl, rule); | |
1443 | error = EINVAL; | |
1444 | break; | |
1445 | } | |
1446 | pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE); | |
1447 | } | |
1448 | ||
1449 | #if ALTQ | |
1450 | /* set queue IDs */ | |
1451 | if (rule->qname[0] != 0) { | |
1452 | if ((rule->qid = pf_qname2qid(rule->qname)) == 0) | |
1453 | error = EBUSY; | |
1454 | else if (rule->pqname[0] != 0) { | |
1455 | if ((rule->pqid = | |
1456 | pf_qname2qid(rule->pqname)) == 0) | |
1457 | error = EBUSY; | |
1458 | } else | |
1459 | rule->pqid = rule->qid; | |
1460 | } | |
1461 | #endif /* ALTQ */ | |
1462 | if (rule->tagname[0]) | |
1463 | if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) | |
1464 | error = EBUSY; | |
1465 | if (rule->match_tagname[0]) | |
1466 | if ((rule->match_tag = | |
1467 | pf_tagname2tag(rule->match_tagname)) == 0) | |
1468 | error = EBUSY; | |
1469 | if (rule->rt && !rule->direction) | |
1470 | error = EINVAL; | |
1471 | #if PFLOG | |
1472 | if (!rule->log) | |
1473 | rule->logif = 0; | |
1474 | if (rule->logif >= PFLOGIFS_MAX) | |
1475 | error = EINVAL; | |
1476 | #endif /* PFLOG */ | |
1477 | if (pf_rtlabel_add(&rule->src.addr) || | |
1478 | pf_rtlabel_add(&rule->dst.addr)) | |
1479 | error = EBUSY; | |
1480 | if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) | |
1481 | error = EINVAL; | |
1482 | if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) | |
1483 | error = EINVAL; | |
1484 | if (pf_tbladdr_setup(ruleset, &rule->src.addr)) | |
1485 | error = EINVAL; | |
1486 | if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) | |
1487 | error = EINVAL; | |
1488 | if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) | |
1489 | error = EINVAL; | |
1490 | TAILQ_FOREACH(apa, &pf_pabuf, entries) | |
1491 | if (pf_tbladdr_setup(ruleset, &apa->addr)) | |
1492 | error = EINVAL; | |
1493 | ||
1494 | if (rule->overload_tblname[0]) { | |
1495 | if ((rule->overload_tbl = pfr_attach_table(ruleset, | |
1496 | rule->overload_tblname)) == NULL) | |
1497 | error = EINVAL; | |
1498 | else | |
1499 | rule->overload_tbl->pfrkt_flags |= | |
1500 | PFR_TFLAG_ACTIVE; | |
1501 | } | |
1502 | ||
1503 | pf_mv_pool(&pf_pabuf, &rule->rpool.list); | |
1504 | if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || | |
1505 | (rule->action == PF_BINAT)) && rule->anchor == NULL) || | |
1506 | (rule->rt > PF_FASTROUTE)) && | |
1507 | (TAILQ_FIRST(&rule->rpool.list) == NULL)) | |
1508 | error = EINVAL; | |
1509 | ||
1510 | if (error) { | |
1511 | pf_rm_rule(NULL, rule); | |
1512 | break; | |
1513 | } | |
1514 | rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); | |
1515 | rule->evaluations = rule->packets[0] = rule->packets[1] = | |
1516 | rule->bytes[0] = rule->bytes[1] = 0; | |
1517 | TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, | |
1518 | rule, entries); | |
1519 | ruleset->rules[rs_num].inactive.rcount++; | |
1520 | break; | |
1521 | } | |
1522 | ||
1523 | case DIOCGETRULES: { | |
1524 | struct pfioc_rule *pr = (struct pfioc_rule *)addr; | |
1525 | struct pf_ruleset *ruleset; | |
1526 | struct pf_rule *tail; | |
1527 | int rs_num; | |
1528 | ||
1529 | pr->anchor[sizeof (pr->anchor) - 1] = 0; | |
1530 | ruleset = pf_find_ruleset(pr->anchor); | |
1531 | if (ruleset == NULL) { | |
1532 | error = EINVAL; | |
1533 | break; | |
1534 | } | |
1535 | rs_num = pf_get_ruleset_number(pr->rule.action); | |
1536 | if (rs_num >= PF_RULESET_MAX) { | |
1537 | error = EINVAL; | |
1538 | break; | |
1539 | } | |
1540 | tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, | |
1541 | pf_rulequeue); | |
1542 | if (tail) | |
1543 | pr->nr = tail->nr + 1; | |
1544 | else | |
1545 | pr->nr = 0; | |
1546 | pr->ticket = ruleset->rules[rs_num].active.ticket; | |
1547 | break; | |
1548 | } | |
1549 | ||
1550 | case DIOCGETRULE: { | |
1551 | struct pfioc_rule *pr = (struct pfioc_rule *)addr; | |
1552 | struct pf_ruleset *ruleset; | |
1553 | struct pf_rule *rule; | |
1554 | int rs_num, i; | |
1555 | ||
1556 | pr->anchor[sizeof (pr->anchor) - 1] = 0; | |
1557 | ruleset = pf_find_ruleset(pr->anchor); | |
1558 | if (ruleset == NULL) { | |
1559 | error = EINVAL; | |
1560 | break; | |
1561 | } | |
1562 | rs_num = pf_get_ruleset_number(pr->rule.action); | |
1563 | if (rs_num >= PF_RULESET_MAX) { | |
1564 | error = EINVAL; | |
1565 | break; | |
1566 | } | |
1567 | if (pr->ticket != ruleset->rules[rs_num].active.ticket) { | |
1568 | error = EBUSY; | |
1569 | break; | |
1570 | } | |
1571 | rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); | |
1572 | while ((rule != NULL) && (rule->nr != pr->nr)) | |
1573 | rule = TAILQ_NEXT(rule, entries); | |
1574 | if (rule == NULL) { | |
1575 | error = EBUSY; | |
1576 | break; | |
1577 | } | |
1578 | bcopy(rule, &pr->rule, sizeof (struct pf_rule)); | |
1579 | if (pf_anchor_copyout(ruleset, rule, pr)) { | |
1580 | error = EBUSY; | |
1581 | break; | |
1582 | } | |
1583 | pfi_dynaddr_copyout(&pr->rule.src.addr); | |
1584 | pfi_dynaddr_copyout(&pr->rule.dst.addr); | |
1585 | pf_tbladdr_copyout(&pr->rule.src.addr); | |
1586 | pf_tbladdr_copyout(&pr->rule.dst.addr); | |
1587 | pf_rtlabel_copyout(&pr->rule.src.addr); | |
1588 | pf_rtlabel_copyout(&pr->rule.dst.addr); | |
1589 | for (i = 0; i < PF_SKIP_COUNT; ++i) | |
1590 | if (rule->skip[i].ptr == NULL) | |
1591 | pr->rule.skip[i].nr = -1; | |
1592 | else | |
1593 | pr->rule.skip[i].nr = | |
1594 | rule->skip[i].ptr->nr; | |
1595 | ||
1596 | if (pr->action == PF_GET_CLR_CNTR) { | |
1597 | rule->evaluations = 0; | |
1598 | rule->packets[0] = rule->packets[1] = 0; | |
1599 | rule->bytes[0] = rule->bytes[1] = 0; | |
1600 | } | |
1601 | break; | |
1602 | } | |
1603 | ||
1604 | case DIOCCHANGERULE: { | |
1605 | struct pfioc_rule *pcr = (struct pfioc_rule *)addr; | |
1606 | struct pf_ruleset *ruleset; | |
1607 | struct pf_rule *oldrule = NULL, *newrule = NULL; | |
1608 | u_int32_t nr = 0; | |
1609 | int rs_num; | |
1610 | ||
1611 | if (!(pcr->action == PF_CHANGE_REMOVE || | |
1612 | pcr->action == PF_CHANGE_GET_TICKET) && | |
1613 | pcr->pool_ticket != ticket_pabuf) { | |
1614 | error = EBUSY; | |
1615 | break; | |
1616 | } | |
1617 | ||
1618 | if (pcr->action < PF_CHANGE_ADD_HEAD || | |
1619 | pcr->action > PF_CHANGE_GET_TICKET) { | |
1620 | error = EINVAL; | |
1621 | break; | |
1622 | } | |
1623 | ruleset = pf_find_ruleset(pcr->anchor); | |
1624 | if (ruleset == NULL) { | |
1625 | error = EINVAL; | |
1626 | break; | |
1627 | } | |
1628 | rs_num = pf_get_ruleset_number(pcr->rule.action); | |
1629 | if (rs_num >= PF_RULESET_MAX) { | |
1630 | error = EINVAL; | |
1631 | break; | |
1632 | } | |
1633 | ||
1634 | if (pcr->action == PF_CHANGE_GET_TICKET) { | |
1635 | pcr->ticket = ++ruleset->rules[rs_num].active.ticket; | |
1636 | break; | |
1637 | } else { | |
1638 | if (pcr->ticket != | |
1639 | ruleset->rules[rs_num].active.ticket) { | |
1640 | error = EINVAL; | |
1641 | break; | |
1642 | } | |
1643 | if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { | |
1644 | error = EINVAL; | |
1645 | break; | |
1646 | } | |
1647 | } | |
1648 | ||
1649 | if (pcr->action != PF_CHANGE_REMOVE) { | |
1650 | newrule = pool_get(&pf_rule_pl, PR_WAITOK); | |
1651 | if (newrule == NULL) { | |
1652 | error = ENOMEM; | |
1653 | break; | |
1654 | } | |
1655 | bcopy(&pcr->rule, newrule, sizeof (struct pf_rule)); | |
1656 | newrule->cuid = kauth_cred_getuid(p->p_ucred); | |
1657 | newrule->cpid = p->p_pid; | |
1658 | TAILQ_INIT(&newrule->rpool.list); | |
1659 | /* initialize refcounting */ | |
1660 | newrule->states = 0; | |
1661 | newrule->entries.tqe_prev = NULL; | |
1662 | #if !INET | |
1663 | if (newrule->af == AF_INET) { | |
1664 | pool_put(&pf_rule_pl, newrule); | |
1665 | error = EAFNOSUPPORT; | |
1666 | break; | |
1667 | } | |
1668 | #endif /* INET */ | |
1669 | #if !INET6 | |
1670 | if (newrule->af == AF_INET6) { | |
1671 | pool_put(&pf_rule_pl, newrule); | |
1672 | error = EAFNOSUPPORT; | |
1673 | break; | |
1674 | } | |
1675 | #endif /* INET6 */ | |
1676 | if (newrule->ifname[0]) { | |
1677 | newrule->kif = pfi_kif_get(newrule->ifname); | |
1678 | if (newrule->kif == NULL) { | |
1679 | pool_put(&pf_rule_pl, newrule); | |
1680 | error = EINVAL; | |
1681 | break; | |
1682 | } | |
1683 | pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE); | |
1684 | } else | |
1685 | newrule->kif = NULL; | |
1686 | ||
1687 | #if ALTQ | |
1688 | /* set queue IDs */ | |
1689 | if (newrule->qname[0] != 0) { | |
1690 | if ((newrule->qid = | |
1691 | pf_qname2qid(newrule->qname)) == 0) | |
1692 | error = EBUSY; | |
1693 | else if (newrule->pqname[0] != 0) { | |
1694 | if ((newrule->pqid = | |
1695 | pf_qname2qid(newrule->pqname)) == 0) | |
1696 | error = EBUSY; | |
1697 | } else | |
1698 | newrule->pqid = newrule->qid; | |
1699 | } | |
1700 | #endif /* ALTQ */ | |
1701 | if (newrule->tagname[0]) | |
1702 | if ((newrule->tag = | |
1703 | pf_tagname2tag(newrule->tagname)) == 0) | |
1704 | error = EBUSY; | |
1705 | if (newrule->match_tagname[0]) | |
1706 | if ((newrule->match_tag = pf_tagname2tag( | |
1707 | newrule->match_tagname)) == 0) | |
1708 | error = EBUSY; | |
1709 | if (newrule->rt && !newrule->direction) | |
1710 | error = EINVAL; | |
1711 | #if PFLOG | |
1712 | if (!newrule->log) | |
1713 | newrule->logif = 0; | |
1714 | if (newrule->logif >= PFLOGIFS_MAX) | |
1715 | error = EINVAL; | |
1716 | #endif /* PFLOG */ | |
1717 | if (pf_rtlabel_add(&newrule->src.addr) || | |
1718 | pf_rtlabel_add(&newrule->dst.addr)) | |
1719 | error = EBUSY; | |
1720 | if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) | |
1721 | error = EINVAL; | |
1722 | if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) | |
1723 | error = EINVAL; | |
1724 | if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) | |
1725 | error = EINVAL; | |
1726 | if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) | |
1727 | error = EINVAL; | |
1728 | if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) | |
1729 | error = EINVAL; | |
1730 | TAILQ_FOREACH(pa, &pf_pabuf, entries) | |
1731 | if (pf_tbladdr_setup(ruleset, &pa->addr)) | |
1732 | error = EINVAL; | |
1733 | ||
1734 | if (newrule->overload_tblname[0]) { | |
1735 | if ((newrule->overload_tbl = pfr_attach_table( | |
1736 | ruleset, newrule->overload_tblname)) == | |
1737 | NULL) | |
1738 | error = EINVAL; | |
1739 | else | |
1740 | newrule->overload_tbl->pfrkt_flags |= | |
1741 | PFR_TFLAG_ACTIVE; | |
1742 | } | |
1743 | ||
1744 | pf_mv_pool(&pf_pabuf, &newrule->rpool.list); | |
1745 | if (((((newrule->action == PF_NAT) || | |
1746 | (newrule->action == PF_RDR) || | |
1747 | (newrule->action == PF_BINAT) || | |
1748 | (newrule->rt > PF_FASTROUTE)) && | |
1749 | !newrule->anchor)) && | |
1750 | (TAILQ_FIRST(&newrule->rpool.list) == NULL)) | |
1751 | error = EINVAL; | |
1752 | ||
1753 | if (error) { | |
1754 | pf_rm_rule(NULL, newrule); | |
1755 | break; | |
1756 | } | |
1757 | newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); | |
1758 | newrule->evaluations = 0; | |
1759 | newrule->packets[0] = newrule->packets[1] = 0; | |
1760 | newrule->bytes[0] = newrule->bytes[1] = 0; | |
1761 | } | |
1762 | pf_empty_pool(&pf_pabuf); | |
1763 | ||
1764 | if (pcr->action == PF_CHANGE_ADD_HEAD) | |
1765 | oldrule = TAILQ_FIRST( | |
1766 | ruleset->rules[rs_num].active.ptr); | |
1767 | else if (pcr->action == PF_CHANGE_ADD_TAIL) | |
1768 | oldrule = TAILQ_LAST( | |
1769 | ruleset->rules[rs_num].active.ptr, pf_rulequeue); | |
1770 | else { | |
1771 | oldrule = TAILQ_FIRST( | |
1772 | ruleset->rules[rs_num].active.ptr); | |
1773 | while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) | |
1774 | oldrule = TAILQ_NEXT(oldrule, entries); | |
1775 | if (oldrule == NULL) { | |
1776 | if (newrule != NULL) | |
1777 | pf_rm_rule(NULL, newrule); | |
1778 | error = EINVAL; | |
1779 | break; | |
1780 | } | |
1781 | } | |
1782 | ||
1783 | if (pcr->action == PF_CHANGE_REMOVE) { | |
1784 | pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule); | |
1785 | ruleset->rules[rs_num].active.rcount--; | |
1786 | } else { | |
1787 | if (oldrule == NULL) | |
1788 | TAILQ_INSERT_TAIL( | |
1789 | ruleset->rules[rs_num].active.ptr, | |
1790 | newrule, entries); | |
1791 | else if (pcr->action == PF_CHANGE_ADD_HEAD || | |
1792 | pcr->action == PF_CHANGE_ADD_BEFORE) | |
1793 | TAILQ_INSERT_BEFORE(oldrule, newrule, entries); | |
1794 | else | |
1795 | TAILQ_INSERT_AFTER( | |
1796 | ruleset->rules[rs_num].active.ptr, | |
1797 | oldrule, newrule, entries); | |
1798 | ruleset->rules[rs_num].active.rcount++; | |
1799 | } | |
1800 | ||
1801 | nr = 0; | |
1802 | TAILQ_FOREACH(oldrule, | |
1803 | ruleset->rules[rs_num].active.ptr, entries) | |
1804 | oldrule->nr = nr++; | |
1805 | ||
1806 | ruleset->rules[rs_num].active.ticket++; | |
1807 | ||
1808 | pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); | |
1809 | pf_remove_if_empty_ruleset(ruleset); | |
1810 | ||
1811 | break; | |
1812 | } | |
1813 | ||
1814 | case DIOCCLRSTATES: { | |
1815 | struct pf_state *s, *nexts; | |
1816 | struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; | |
1817 | int killed = 0; | |
1818 | ||
1819 | for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) { | |
1820 | nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); | |
1821 | ||
1822 | if (!psk->psk_ifname[0] || strcmp(psk->psk_ifname, | |
1823 | s->kif->pfik_name) == 0) { | |
1824 | #if NPFSYNC | |
1825 | /* don't send out individual delete messages */ | |
1826 | s->sync_flags = PFSTATE_NOSYNC; | |
1827 | #endif | |
1828 | pf_unlink_state(s); | |
1829 | killed++; | |
1830 | } | |
1831 | } | |
1832 | psk->psk_af = killed; | |
1833 | #if NPFSYNC | |
1834 | pfsync_clear_states(pf_status.hostid, psk->psk_ifname); | |
1835 | #endif | |
1836 | break; | |
1837 | } | |
1838 | ||
1839 | case DIOCKILLSTATES: { | |
1840 | struct pf_state *s, *nexts; | |
1841 | struct pf_state_key *sk; | |
1842 | struct pf_state_host *src, *dst; | |
1843 | struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; | |
1844 | int killed = 0; | |
1845 | ||
1846 | for (s = RB_MIN(pf_state_tree_id, &tree_id); s; | |
1847 | s = nexts) { | |
1848 | nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); | |
1849 | sk = s->state_key; | |
1850 | ||
1851 | if (sk->direction == PF_OUT) { | |
1852 | src = &sk->lan; | |
1853 | dst = &sk->ext; | |
1854 | } else { | |
1855 | src = &sk->ext; | |
1856 | dst = &sk->lan; | |
1857 | } | |
1858 | if ((!psk->psk_af || sk->af == psk->psk_af) && | |
1859 | (!psk->psk_proto || psk->psk_proto == sk->proto) && | |
1860 | PF_MATCHA(psk->psk_src.neg, | |
1861 | &psk->psk_src.addr.v.a.addr, | |
1862 | &psk->psk_src.addr.v.a.mask, | |
1863 | &src->addr, sk->af) && | |
1864 | PF_MATCHA(psk->psk_dst.neg, | |
1865 | &psk->psk_dst.addr.v.a.addr, | |
1866 | &psk->psk_dst.addr.v.a.mask, | |
1867 | &dst->addr, sk->af) && | |
1868 | #ifndef NO_APPLE_EXTENSIONS | |
1869 | (pf_match_xport(psk->psk_proto, | |
1870 | psk->psk_proto_variant, &psk->psk_src.xport, | |
1871 | &src->xport)) && | |
1872 | (pf_match_xport(psk->psk_proto, | |
1873 | psk->psk_proto_variant, &psk->psk_dst.xport, | |
1874 | &dst->xport)) && | |
1875 | #else | |
1876 | (psk->psk_src.port_op == 0 || | |
1877 | pf_match_port(psk->psk_src.port_op, | |
1878 | psk->psk_src.port[0], psk->psk_src.port[1], | |
1879 | src->port)) && | |
1880 | (psk->psk_dst.port_op == 0 || | |
1881 | pf_match_port(psk->psk_dst.port_op, | |
1882 | psk->psk_dst.port[0], psk->psk_dst.port[1], | |
1883 | dst->port)) && | |
1884 | #endif | |
1885 | (!psk->psk_ifname[0] || strcmp(psk->psk_ifname, | |
1886 | s->kif->pfik_name) == 0)) { | |
1887 | #if NPFSYNC | |
1888 | /* send immediate delete of state */ | |
1889 | pfsync_delete_state(s); | |
1890 | s->sync_flags |= PFSTATE_NOSYNC; | |
1891 | #endif | |
1892 | pf_unlink_state(s); | |
1893 | killed++; | |
1894 | } | |
1895 | } | |
1896 | psk->psk_af = killed; | |
1897 | break; | |
1898 | } | |
1899 | ||
1900 | case DIOCADDSTATE: { | |
1901 | struct pfioc_state *ps = (struct pfioc_state *)addr; | |
1902 | struct pfsync_state *sp = &ps->state; | |
1903 | struct pf_state *s; | |
1904 | struct pf_state_key *sk; | |
1905 | struct pfi_kif *kif; | |
1906 | ||
1907 | if (sp->timeout >= PFTM_MAX && | |
1908 | sp->timeout != PFTM_UNTIL_PACKET) { | |
1909 | error = EINVAL; | |
1910 | break; | |
1911 | } | |
1912 | s = pool_get(&pf_state_pl, PR_WAITOK); | |
1913 | if (s == NULL) { | |
1914 | error = ENOMEM; | |
1915 | break; | |
1916 | } | |
1917 | bzero(s, sizeof (struct pf_state)); | |
1918 | if ((sk = pf_alloc_state_key(s)) == NULL) { | |
1919 | pool_put(&pf_state_pl, s); | |
1920 | error = ENOMEM; | |
1921 | break; | |
1922 | } | |
1923 | pf_state_import(sp, sk, s); | |
1924 | kif = pfi_kif_get(sp->ifname); | |
1925 | if (kif == NULL) { | |
1926 | pool_put(&pf_state_pl, s); | |
1927 | pool_put(&pf_state_key_pl, sk); | |
1928 | error = ENOENT; | |
1929 | break; | |
1930 | } | |
1931 | #ifndef NO_APPLE_EXTENSIONS | |
1932 | TAILQ_INIT(&s->unlink_hooks); | |
1933 | s->state_key->app_state = 0; | |
1934 | #endif | |
1935 | if (pf_insert_state(kif, s)) { | |
1936 | pfi_kif_unref(kif, PFI_KIF_REF_NONE); | |
1937 | pool_put(&pf_state_pl, s); | |
1938 | error = EEXIST; | |
1939 | break; | |
1940 | } | |
1941 | pf_default_rule.states++; | |
b7266188 | 1942 | VERIFY(pf_default_rule.states != 0); |
b0d623f7 A |
1943 | break; |
1944 | } | |
1945 | ||
1946 | case DIOCGETSTATE: { | |
1947 | struct pfioc_state *ps = (struct pfioc_state *)addr; | |
1948 | struct pf_state *s; | |
1949 | struct pf_state_cmp id_key; | |
1950 | ||
1951 | bcopy(ps->state.id, &id_key.id, sizeof (id_key.id)); | |
1952 | id_key.creatorid = ps->state.creatorid; | |
1953 | ||
1954 | s = pf_find_state_byid(&id_key); | |
1955 | if (s == NULL) { | |
1956 | error = ENOENT; | |
1957 | break; | |
1958 | } | |
1959 | ||
1960 | pf_state_export(&ps->state, s->state_key, s); | |
1961 | break; | |
1962 | } | |
1963 | ||
1964 | case DIOCGETSTATES: { | |
1965 | struct pfioc_states *ps = (struct pfioc_states *)addr; | |
1966 | struct pf_state *state; | |
d1ecb069 A |
1967 | struct pfsync_state *pstore; |
1968 | user_addr_t buf; | |
b0d623f7 A |
1969 | u_int32_t nr = 0; |
1970 | ||
1971 | if (ps->ps_len == 0) { | |
1972 | nr = pf_status.states; | |
1973 | ps->ps_len = sizeof (struct pfsync_state) * nr; | |
1974 | break; | |
1975 | } | |
1976 | ||
1977 | pstore = _MALLOC(sizeof (*pstore), M_TEMP, M_WAITOK); | |
d1ecb069 | 1978 | buf = PF_USER_ADDR(addr, pfioc_states, ps_buf); |
b0d623f7 A |
1979 | |
1980 | state = TAILQ_FIRST(&state_list); | |
1981 | while (state) { | |
1982 | if (state->timeout != PFTM_UNLINKED) { | |
d1ecb069 A |
1983 | if ((nr + 1) * sizeof (*pstore) > |
1984 | (unsigned)ps->ps_len) | |
b0d623f7 A |
1985 | break; |
1986 | ||
1987 | pf_state_export(pstore, | |
1988 | state->state_key, state); | |
d1ecb069 | 1989 | error = copyout(pstore, buf, sizeof (*pstore)); |
b0d623f7 A |
1990 | if (error) { |
1991 | _FREE(pstore, M_TEMP); | |
1992 | goto fail; | |
1993 | } | |
d1ecb069 | 1994 | buf += sizeof (*pstore); |
b0d623f7 A |
1995 | nr++; |
1996 | } | |
1997 | state = TAILQ_NEXT(state, entry_list); | |
1998 | } | |
1999 | ||
2000 | ps->ps_len = sizeof (struct pfsync_state) * nr; | |
2001 | ||
2002 | _FREE(pstore, M_TEMP); | |
2003 | break; | |
2004 | } | |
2005 | ||
2006 | case DIOCGETSTATUS: { | |
2007 | struct pf_status *s = (struct pf_status *)addr; | |
2008 | bcopy(&pf_status, s, sizeof (struct pf_status)); | |
2009 | pfi_update_status(s->ifname, s); | |
2010 | break; | |
2011 | } | |
2012 | ||
2013 | case DIOCSETSTATUSIF: { | |
2014 | struct pfioc_if *pi = (struct pfioc_if *)addr; | |
2015 | ||
2016 | if (pi->ifname[0] == 0) { | |
2017 | bzero(pf_status.ifname, IFNAMSIZ); | |
2018 | break; | |
2019 | } | |
2020 | strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ); | |
2021 | break; | |
2022 | } | |
2023 | ||
2024 | case DIOCCLRSTATUS: { | |
2025 | bzero(pf_status.counters, sizeof (pf_status.counters)); | |
2026 | bzero(pf_status.fcounters, sizeof (pf_status.fcounters)); | |
2027 | bzero(pf_status.scounters, sizeof (pf_status.scounters)); | |
b7266188 | 2028 | pf_status.since = pf_calendar_time_second(); |
b0d623f7 A |
2029 | if (*pf_status.ifname) |
2030 | pfi_update_status(pf_status.ifname, NULL); | |
2031 | break; | |
2032 | } | |
2033 | ||
2034 | case DIOCNATLOOK: { | |
2035 | struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; | |
2036 | struct pf_state_key *sk; | |
2037 | struct pf_state *state; | |
2038 | struct pf_state_key_cmp key; | |
2039 | int m = 0, direction = pnl->direction; | |
2040 | ||
2041 | key.af = pnl->af; | |
2042 | key.proto = pnl->proto; | |
2043 | ||
2044 | #ifndef NO_APPLE_EXTENSIONS | |
2045 | key.proto_variant = pnl->proto_variant; | |
2046 | #endif | |
2047 | ||
2048 | if (!pnl->proto || | |
2049 | PF_AZERO(&pnl->saddr, pnl->af) || | |
2050 | PF_AZERO(&pnl->daddr, pnl->af) || | |
2051 | ((pnl->proto == IPPROTO_TCP || | |
2052 | pnl->proto == IPPROTO_UDP) && | |
2053 | #ifndef NO_APPLE_EXTENSIONS | |
2054 | (!pnl->dxport.port || !pnl->sxport.port))) | |
2055 | #else | |
2056 | (!pnl->dport || !pnl->sport))) | |
2057 | #endif | |
2058 | error = EINVAL; | |
2059 | else { | |
2060 | /* | |
2061 | * userland gives us source and dest of connection, | |
2062 | * reverse the lookup so we ask for what happens with | |
2063 | * the return traffic, enabling us to find it in the | |
2064 | * state tree. | |
2065 | */ | |
2066 | if (direction == PF_IN) { | |
2067 | PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af); | |
2068 | #ifndef NO_APPLE_EXTENSIONS | |
2069 | memcpy(&key.ext.xport, &pnl->dxport, | |
2070 | sizeof (key.ext.xport)); | |
2071 | #else | |
2072 | key.ext.port = pnl->dport; | |
2073 | #endif | |
2074 | PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af); | |
2075 | #ifndef NO_APPLE_EXTENSIONS | |
2076 | memcpy(&key.gwy.xport, &pnl->sxport, | |
2077 | sizeof (key.gwy.xport)); | |
2078 | #else | |
2079 | key.gwy.port = pnl->sport; | |
2080 | #endif | |
2081 | state = pf_find_state_all(&key, PF_IN, &m); | |
2082 | } else { | |
2083 | PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af); | |
2084 | #ifndef NO_APPLE_EXTENSIONS | |
2085 | memcpy(&key.lan.xport, &pnl->dxport, | |
2086 | sizeof (key.lan.xport)); | |
2087 | #else | |
2088 | key.lan.port = pnl->dport; | |
2089 | #endif | |
2090 | PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af); | |
2091 | #ifndef NO_APPLE_EXTENSIONS | |
2092 | memcpy(&key.ext.xport, &pnl->sxport, | |
2093 | sizeof (key.ext.xport)); | |
2094 | #else | |
2095 | key.ext.port = pnl->sport; | |
2096 | #endif | |
2097 | state = pf_find_state_all(&key, PF_OUT, &m); | |
2098 | } | |
2099 | if (m > 1) | |
2100 | error = E2BIG; /* more than one state */ | |
2101 | else if (state != NULL) { | |
2102 | sk = state->state_key; | |
2103 | if (direction == PF_IN) { | |
2104 | PF_ACPY(&pnl->rsaddr, &sk->lan.addr, | |
2105 | sk->af); | |
2106 | #ifndef NO_APPLE_EXTENSIONS | |
2107 | memcpy(&pnl->rsxport, &sk->lan.xport, | |
2108 | sizeof (pnl->rsxport)); | |
2109 | #else | |
2110 | pnl->rsport = sk->lan.port; | |
2111 | #endif | |
2112 | PF_ACPY(&pnl->rdaddr, &pnl->daddr, | |
2113 | pnl->af); | |
2114 | #ifndef NO_APPLE_EXTENSIONS | |
2115 | memcpy(&pnl->rdxport, &pnl->dxport, | |
2116 | sizeof (pnl->rdxport)); | |
2117 | #else | |
2118 | pnl->rdport = pnl->dport; | |
2119 | #endif | |
2120 | } else { | |
2121 | PF_ACPY(&pnl->rdaddr, &sk->gwy.addr, | |
2122 | sk->af); | |
2123 | #ifndef NO_APPLE_EXTENSIONS | |
2124 | memcpy(&pnl->rdxport, &sk->gwy.xport, | |
2125 | sizeof (pnl->rdxport)); | |
2126 | #else | |
2127 | pnl->rdport = sk->gwy.port; | |
2128 | #endif | |
2129 | PF_ACPY(&pnl->rsaddr, &pnl->saddr, | |
2130 | pnl->af); | |
2131 | #ifndef NO_APPLE_EXTENSIONS | |
2132 | memcpy(&pnl->rsxport, &pnl->sxport, | |
2133 | sizeof (pnl->rsxport)); | |
2134 | #else | |
2135 | pnl->rsport = pnl->sport; | |
2136 | #endif | |
2137 | } | |
2138 | } else | |
2139 | error = ENOENT; | |
2140 | } | |
2141 | break; | |
2142 | } | |
2143 | ||
2144 | case DIOCSETTIMEOUT: { | |
2145 | struct pfioc_tm *pt = (struct pfioc_tm *)addr; | |
2146 | int old; | |
2147 | ||
2148 | if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || | |
2149 | pt->seconds < 0) { | |
2150 | error = EINVAL; | |
2151 | goto fail; | |
2152 | } | |
2153 | old = pf_default_rule.timeout[pt->timeout]; | |
2154 | if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) | |
2155 | pt->seconds = 1; | |
2156 | pf_default_rule.timeout[pt->timeout] = pt->seconds; | |
2157 | if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) | |
2158 | wakeup(pf_purge_thread_fn); | |
2159 | pt->seconds = old; | |
2160 | break; | |
2161 | } | |
2162 | ||
2163 | case DIOCGETTIMEOUT: { | |
2164 | struct pfioc_tm *pt = (struct pfioc_tm *)addr; | |
2165 | ||
2166 | if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { | |
2167 | error = EINVAL; | |
2168 | goto fail; | |
2169 | } | |
2170 | pt->seconds = pf_default_rule.timeout[pt->timeout]; | |
2171 | break; | |
2172 | } | |
2173 | ||
2174 | case DIOCGETLIMIT: { | |
2175 | struct pfioc_limit *pl = (struct pfioc_limit *)addr; | |
2176 | ||
2177 | if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { | |
2178 | error = EINVAL; | |
2179 | goto fail; | |
2180 | } | |
2181 | pl->limit = pf_pool_limits[pl->index].limit; | |
2182 | break; | |
2183 | } | |
2184 | ||
2185 | case DIOCSETLIMIT: { | |
2186 | struct pfioc_limit *pl = (struct pfioc_limit *)addr; | |
2187 | int old_limit; | |
2188 | ||
2189 | if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || | |
2190 | pf_pool_limits[pl->index].pp == NULL) { | |
2191 | error = EINVAL; | |
2192 | goto fail; | |
2193 | } | |
2194 | pool_sethardlimit(pf_pool_limits[pl->index].pp, | |
2195 | pl->limit, NULL, 0); | |
2196 | old_limit = pf_pool_limits[pl->index].limit; | |
2197 | pf_pool_limits[pl->index].limit = pl->limit; | |
2198 | pl->limit = old_limit; | |
2199 | break; | |
2200 | } | |
2201 | ||
2202 | case DIOCSETDEBUG: { | |
2203 | u_int32_t *level = (u_int32_t *)addr; | |
2204 | ||
2205 | pf_status.debug = *level; | |
2206 | break; | |
2207 | } | |
2208 | ||
2209 | case DIOCCLRRULECTRS: { | |
2210 | /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ | |
2211 | struct pf_ruleset *ruleset = &pf_main_ruleset; | |
2212 | struct pf_rule *rule; | |
2213 | ||
2214 | TAILQ_FOREACH(rule, | |
2215 | ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { | |
2216 | rule->evaluations = 0; | |
2217 | rule->packets[0] = rule->packets[1] = 0; | |
2218 | rule->bytes[0] = rule->bytes[1] = 0; | |
2219 | } | |
2220 | break; | |
2221 | } | |
2222 | ||
2223 | #if ALTQ | |
2224 | case DIOCSTARTALTQ: { | |
2225 | struct pf_altq *altq; | |
2226 | ||
2227 | /* enable all altq interfaces on active list */ | |
2228 | TAILQ_FOREACH(altq, pf_altqs_active, entries) { | |
2229 | if (altq->qname[0] == 0) { | |
2230 | error = pf_enable_altq(altq); | |
2231 | if (error != 0) | |
2232 | break; | |
2233 | } | |
2234 | } | |
2235 | if (error == 0) | |
2236 | pf_altq_running = 1; | |
2237 | DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); | |
2238 | break; | |
2239 | } | |
2240 | ||
2241 | case DIOCSTOPALTQ: { | |
2242 | struct pf_altq *altq; | |
2243 | ||
2244 | /* disable all altq interfaces on active list */ | |
2245 | TAILQ_FOREACH(altq, pf_altqs_active, entries) { | |
2246 | if (altq->qname[0] == 0) { | |
2247 | error = pf_disable_altq(altq); | |
2248 | if (error != 0) | |
2249 | break; | |
2250 | } | |
2251 | } | |
2252 | if (error == 0) | |
2253 | pf_altq_running = 0; | |
2254 | DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); | |
2255 | break; | |
2256 | } | |
2257 | ||
2258 | case DIOCADDALTQ: { | |
2259 | struct pfioc_altq *pa = (struct pfioc_altq *)addr; | |
2260 | struct pf_altq *altq, *a; | |
2261 | ||
2262 | if (pa->ticket != ticket_altqs_inactive) { | |
2263 | error = EBUSY; | |
2264 | break; | |
2265 | } | |
2266 | altq = pool_get(&pf_altq_pl, PR_WAITOK); | |
2267 | if (altq == NULL) { | |
2268 | error = ENOMEM; | |
2269 | break; | |
2270 | } | |
2271 | bcopy(&pa->altq, altq, sizeof (struct pf_altq)); | |
2272 | ||
2273 | /* | |
2274 | * if this is for a queue, find the discipline and | |
2275 | * copy the necessary fields | |
2276 | */ | |
2277 | if (altq->qname[0] != 0) { | |
2278 | if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { | |
2279 | error = EBUSY; | |
2280 | pool_put(&pf_altq_pl, altq); | |
2281 | break; | |
2282 | } | |
2283 | altq->altq_disc = NULL; | |
2284 | TAILQ_FOREACH(a, pf_altqs_inactive, entries) { | |
2285 | if (strncmp(a->ifname, altq->ifname, | |
2286 | IFNAMSIZ) == 0 && a->qname[0] == 0) { | |
2287 | altq->altq_disc = a->altq_disc; | |
2288 | break; | |
2289 | } | |
2290 | } | |
2291 | } | |
2292 | ||
2293 | error = altq_add(altq); | |
2294 | if (error) { | |
2295 | pool_put(&pf_altq_pl, altq); | |
2296 | break; | |
2297 | } | |
2298 | ||
2299 | TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries); | |
2300 | bcopy(altq, &pa->altq, sizeof (struct pf_altq)); | |
2301 | break; | |
2302 | } | |
2303 | ||
2304 | case DIOCGETALTQS: { | |
2305 | struct pfioc_altq *pa = (struct pfioc_altq *)addr; | |
2306 | struct pf_altq *altq; | |
2307 | ||
2308 | pa->nr = 0; | |
2309 | TAILQ_FOREACH(altq, pf_altqs_active, entries) | |
2310 | pa->nr++; | |
2311 | pa->ticket = ticket_altqs_active; | |
2312 | break; | |
2313 | } | |
2314 | ||
2315 | case DIOCGETALTQ: { | |
2316 | struct pfioc_altq *pa = (struct pfioc_altq *)addr; | |
2317 | struct pf_altq *altq; | |
2318 | u_int32_t nr; | |
2319 | ||
2320 | if (pa->ticket != ticket_altqs_active) { | |
2321 | error = EBUSY; | |
2322 | break; | |
2323 | } | |
2324 | nr = 0; | |
2325 | altq = TAILQ_FIRST(pf_altqs_active); | |
2326 | while ((altq != NULL) && (nr < pa->nr)) { | |
2327 | altq = TAILQ_NEXT(altq, entries); | |
2328 | nr++; | |
2329 | } | |
2330 | if (altq == NULL) { | |
2331 | error = EBUSY; | |
2332 | break; | |
2333 | } | |
2334 | bcopy(altq, &pa->altq, sizeof (struct pf_altq)); | |
2335 | break; | |
2336 | } | |
2337 | ||
2338 | case DIOCCHANGEALTQ: | |
2339 | /* CHANGEALTQ not supported yet! */ | |
2340 | error = ENODEV; | |
2341 | break; | |
2342 | ||
2343 | case DIOCGETQSTATS: { | |
2344 | struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; | |
2345 | struct pf_altq *altq; | |
2346 | u_int32_t nr; | |
2347 | int nbytes; | |
2348 | ||
2349 | if (pq->ticket != ticket_altqs_active) { | |
2350 | error = EBUSY; | |
2351 | break; | |
2352 | } | |
2353 | nbytes = pq->nbytes; | |
2354 | nr = 0; | |
2355 | altq = TAILQ_FIRST(pf_altqs_active); | |
2356 | while ((altq != NULL) && (nr < pq->nr)) { | |
2357 | altq = TAILQ_NEXT(altq, entries); | |
2358 | nr++; | |
2359 | } | |
2360 | if (altq == NULL) { | |
2361 | error = EBUSY; | |
2362 | break; | |
2363 | } | |
2364 | error = altq_getqstats(altq, pq->buf, &nbytes); | |
2365 | if (error == 0) { | |
2366 | pq->scheduler = altq->scheduler; | |
2367 | pq->nbytes = nbytes; | |
2368 | } | |
2369 | break; | |
2370 | } | |
2371 | #endif /* ALTQ */ | |
2372 | ||
2373 | case DIOCBEGINADDRS: { | |
2374 | struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; | |
2375 | ||
2376 | pf_empty_pool(&pf_pabuf); | |
2377 | pp->ticket = ++ticket_pabuf; | |
2378 | break; | |
2379 | } | |
2380 | ||
2381 | case DIOCADDADDR: { | |
2382 | struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; | |
2383 | ||
2384 | if (pp->ticket != ticket_pabuf) { | |
2385 | error = EBUSY; | |
2386 | break; | |
2387 | } | |
2388 | #if !INET | |
2389 | if (pp->af == AF_INET) { | |
2390 | error = EAFNOSUPPORT; | |
2391 | break; | |
2392 | } | |
2393 | #endif /* INET */ | |
2394 | #if !INET6 | |
2395 | if (pp->af == AF_INET6) { | |
2396 | error = EAFNOSUPPORT; | |
2397 | break; | |
2398 | } | |
2399 | #endif /* INET6 */ | |
2400 | if (pp->addr.addr.type != PF_ADDR_ADDRMASK && | |
2401 | pp->addr.addr.type != PF_ADDR_DYNIFTL && | |
2402 | pp->addr.addr.type != PF_ADDR_TABLE) { | |
2403 | error = EINVAL; | |
2404 | break; | |
2405 | } | |
2406 | pa = pool_get(&pf_pooladdr_pl, PR_WAITOK); | |
2407 | if (pa == NULL) { | |
2408 | error = ENOMEM; | |
2409 | break; | |
2410 | } | |
2411 | bcopy(&pp->addr, pa, sizeof (struct pf_pooladdr)); | |
2412 | if (pa->ifname[0]) { | |
2413 | pa->kif = pfi_kif_get(pa->ifname); | |
2414 | if (pa->kif == NULL) { | |
2415 | pool_put(&pf_pooladdr_pl, pa); | |
2416 | error = EINVAL; | |
2417 | break; | |
2418 | } | |
2419 | pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE); | |
2420 | } | |
2421 | if (pfi_dynaddr_setup(&pa->addr, pp->af)) { | |
2422 | pfi_dynaddr_remove(&pa->addr); | |
2423 | pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE); | |
2424 | pool_put(&pf_pooladdr_pl, pa); | |
2425 | error = EINVAL; | |
2426 | break; | |
2427 | } | |
2428 | TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries); | |
2429 | break; | |
2430 | } | |
2431 | ||
2432 | case DIOCGETADDRS: { | |
2433 | struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; | |
2434 | ||
2435 | pp->nr = 0; | |
2436 | pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, | |
2437 | pp->r_num, 0, 1, 0); | |
2438 | if (pool == NULL) { | |
2439 | error = EBUSY; | |
2440 | break; | |
2441 | } | |
2442 | TAILQ_FOREACH(pa, &pool->list, entries) | |
2443 | pp->nr++; | |
2444 | break; | |
2445 | } | |
2446 | ||
2447 | case DIOCGETADDR: { | |
2448 | struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; | |
2449 | u_int32_t nr = 0; | |
2450 | ||
2451 | pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, | |
2452 | pp->r_num, 0, 1, 1); | |
2453 | if (pool == NULL) { | |
2454 | error = EBUSY; | |
2455 | break; | |
2456 | } | |
2457 | pa = TAILQ_FIRST(&pool->list); | |
2458 | while ((pa != NULL) && (nr < pp->nr)) { | |
2459 | pa = TAILQ_NEXT(pa, entries); | |
2460 | nr++; | |
2461 | } | |
2462 | if (pa == NULL) { | |
2463 | error = EBUSY; | |
2464 | break; | |
2465 | } | |
2466 | bcopy(pa, &pp->addr, sizeof (struct pf_pooladdr)); | |
2467 | pfi_dynaddr_copyout(&pp->addr.addr); | |
2468 | pf_tbladdr_copyout(&pp->addr.addr); | |
2469 | pf_rtlabel_copyout(&pp->addr.addr); | |
2470 | break; | |
2471 | } | |
2472 | ||
2473 | case DIOCCHANGEADDR: { | |
2474 | struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; | |
2475 | struct pf_pooladdr *oldpa = NULL, *newpa = NULL; | |
2476 | struct pf_ruleset *ruleset; | |
2477 | ||
2478 | if (pca->action < PF_CHANGE_ADD_HEAD || | |
2479 | pca->action > PF_CHANGE_REMOVE) { | |
2480 | error = EINVAL; | |
2481 | break; | |
2482 | } | |
2483 | if (pca->addr.addr.type != PF_ADDR_ADDRMASK && | |
2484 | pca->addr.addr.type != PF_ADDR_DYNIFTL && | |
2485 | pca->addr.addr.type != PF_ADDR_TABLE) { | |
2486 | error = EINVAL; | |
2487 | break; | |
2488 | } | |
2489 | ||
2490 | ruleset = pf_find_ruleset(pca->anchor); | |
2491 | if (ruleset == NULL) { | |
2492 | error = EBUSY; | |
2493 | break; | |
2494 | } | |
2495 | pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action, | |
2496 | pca->r_num, pca->r_last, 1, 1); | |
2497 | if (pool == NULL) { | |
2498 | error = EBUSY; | |
2499 | break; | |
2500 | } | |
2501 | if (pca->action != PF_CHANGE_REMOVE) { | |
2502 | newpa = pool_get(&pf_pooladdr_pl, PR_WAITOK); | |
2503 | if (newpa == NULL) { | |
2504 | error = ENOMEM; | |
2505 | break; | |
2506 | } | |
2507 | bcopy(&pca->addr, newpa, sizeof (struct pf_pooladdr)); | |
2508 | #if !INET | |
2509 | if (pca->af == AF_INET) { | |
2510 | pool_put(&pf_pooladdr_pl, newpa); | |
2511 | error = EAFNOSUPPORT; | |
2512 | break; | |
2513 | } | |
2514 | #endif /* INET */ | |
2515 | #if !INET6 | |
2516 | if (pca->af == AF_INET6) { | |
2517 | pool_put(&pf_pooladdr_pl, newpa); | |
2518 | error = EAFNOSUPPORT; | |
2519 | break; | |
2520 | } | |
2521 | #endif /* INET6 */ | |
2522 | if (newpa->ifname[0]) { | |
2523 | newpa->kif = pfi_kif_get(newpa->ifname); | |
2524 | if (newpa->kif == NULL) { | |
2525 | pool_put(&pf_pooladdr_pl, newpa); | |
2526 | error = EINVAL; | |
2527 | break; | |
2528 | } | |
2529 | pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE); | |
2530 | } else | |
2531 | newpa->kif = NULL; | |
2532 | if (pfi_dynaddr_setup(&newpa->addr, pca->af) || | |
2533 | pf_tbladdr_setup(ruleset, &newpa->addr)) { | |
2534 | pfi_dynaddr_remove(&newpa->addr); | |
2535 | pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE); | |
2536 | pool_put(&pf_pooladdr_pl, newpa); | |
2537 | error = EINVAL; | |
2538 | break; | |
2539 | } | |
2540 | } | |
2541 | ||
2542 | if (pca->action == PF_CHANGE_ADD_HEAD) | |
2543 | oldpa = TAILQ_FIRST(&pool->list); | |
2544 | else if (pca->action == PF_CHANGE_ADD_TAIL) | |
2545 | oldpa = TAILQ_LAST(&pool->list, pf_palist); | |
2546 | else { | |
2547 | int i = 0; | |
2548 | ||
2549 | oldpa = TAILQ_FIRST(&pool->list); | |
2550 | while ((oldpa != NULL) && (i < (int)pca->nr)) { | |
2551 | oldpa = TAILQ_NEXT(oldpa, entries); | |
2552 | i++; | |
2553 | } | |
2554 | if (oldpa == NULL) { | |
2555 | error = EINVAL; | |
2556 | break; | |
2557 | } | |
2558 | } | |
2559 | ||
2560 | if (pca->action == PF_CHANGE_REMOVE) { | |
2561 | TAILQ_REMOVE(&pool->list, oldpa, entries); | |
2562 | pfi_dynaddr_remove(&oldpa->addr); | |
2563 | pf_tbladdr_remove(&oldpa->addr); | |
2564 | pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE); | |
2565 | pool_put(&pf_pooladdr_pl, oldpa); | |
2566 | } else { | |
2567 | if (oldpa == NULL) | |
2568 | TAILQ_INSERT_TAIL(&pool->list, newpa, entries); | |
2569 | else if (pca->action == PF_CHANGE_ADD_HEAD || | |
2570 | pca->action == PF_CHANGE_ADD_BEFORE) | |
2571 | TAILQ_INSERT_BEFORE(oldpa, newpa, entries); | |
2572 | else | |
2573 | TAILQ_INSERT_AFTER(&pool->list, oldpa, | |
2574 | newpa, entries); | |
2575 | } | |
2576 | ||
2577 | pool->cur = TAILQ_FIRST(&pool->list); | |
2578 | PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, | |
2579 | pca->af); | |
2580 | break; | |
2581 | } | |
2582 | ||
2583 | case DIOCGETRULESETS: { | |
2584 | struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; | |
2585 | struct pf_ruleset *ruleset; | |
2586 | struct pf_anchor *anchor; | |
2587 | ||
2588 | pr->path[sizeof (pr->path) - 1] = 0; | |
2589 | if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { | |
2590 | error = EINVAL; | |
2591 | break; | |
2592 | } | |
2593 | pr->nr = 0; | |
2594 | if (ruleset->anchor == NULL) { | |
2595 | /* XXX kludge for pf_main_ruleset */ | |
2596 | RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) | |
2597 | if (anchor->parent == NULL) | |
2598 | pr->nr++; | |
2599 | } else { | |
2600 | RB_FOREACH(anchor, pf_anchor_node, | |
2601 | &ruleset->anchor->children) | |
2602 | pr->nr++; | |
2603 | } | |
2604 | break; | |
2605 | } | |
2606 | ||
2607 | case DIOCGETRULESET: { | |
2608 | struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; | |
2609 | struct pf_ruleset *ruleset; | |
2610 | struct pf_anchor *anchor; | |
2611 | u_int32_t nr = 0; | |
2612 | ||
2613 | pr->path[sizeof (pr->path) - 1] = 0; | |
2614 | if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { | |
2615 | error = EINVAL; | |
2616 | break; | |
2617 | } | |
2618 | pr->name[0] = 0; | |
2619 | if (ruleset->anchor == NULL) { | |
2620 | /* XXX kludge for pf_main_ruleset */ | |
2621 | RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) | |
2622 | if (anchor->parent == NULL && nr++ == pr->nr) { | |
2623 | strlcpy(pr->name, anchor->name, | |
2624 | sizeof (pr->name)); | |
2625 | break; | |
2626 | } | |
2627 | } else { | |
2628 | RB_FOREACH(anchor, pf_anchor_node, | |
2629 | &ruleset->anchor->children) | |
2630 | if (nr++ == pr->nr) { | |
2631 | strlcpy(pr->name, anchor->name, | |
2632 | sizeof (pr->name)); | |
2633 | break; | |
2634 | } | |
2635 | } | |
2636 | if (!pr->name[0]) | |
2637 | error = EBUSY; | |
2638 | break; | |
2639 | } | |
2640 | ||
2641 | case DIOCRCLRTABLES: { | |
2642 | struct pfioc_table *io = (struct pfioc_table *)addr; | |
2643 | ||
2644 | if (io->pfrio_esize != 0) { | |
2645 | error = ENODEV; | |
2646 | break; | |
2647 | } | |
2648 | error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, | |
2649 | io->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2650 | break; | |
2651 | } | |
2652 | ||
2653 | case DIOCRADDTABLES: { | |
2654 | struct pfioc_table *io = (struct pfioc_table *)addr; | |
d1ecb069 | 2655 | user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer); |
b0d623f7 A |
2656 | |
2657 | if (io->pfrio_esize != sizeof (struct pfr_table)) { | |
2658 | error = ENODEV; | |
2659 | break; | |
2660 | } | |
d1ecb069 | 2661 | error = pfr_add_tables(buf, io->pfrio_size, |
b0d623f7 A |
2662 | &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); |
2663 | break; | |
2664 | } | |
2665 | ||
2666 | case DIOCRDELTABLES: { | |
2667 | struct pfioc_table *io = (struct pfioc_table *)addr; | |
d1ecb069 | 2668 | user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer); |
b0d623f7 A |
2669 | |
2670 | if (io->pfrio_esize != sizeof (struct pfr_table)) { | |
2671 | error = ENODEV; | |
2672 | break; | |
2673 | } | |
d1ecb069 | 2674 | error = pfr_del_tables(buf, io->pfrio_size, |
b0d623f7 A |
2675 | &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); |
2676 | break; | |
2677 | } | |
2678 | ||
2679 | case DIOCRGETTABLES: { | |
2680 | struct pfioc_table *io = (struct pfioc_table *)addr; | |
d1ecb069 | 2681 | user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer); |
b0d623f7 A |
2682 | |
2683 | if (io->pfrio_esize != sizeof (struct pfr_table)) { | |
2684 | error = ENODEV; | |
2685 | break; | |
2686 | } | |
d1ecb069 | 2687 | error = pfr_get_tables(&io->pfrio_table, buf, |
b0d623f7 A |
2688 | &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); |
2689 | break; | |
2690 | } | |
2691 | ||
2692 | case DIOCRGETTSTATS: { | |
2693 | struct pfioc_table *io = (struct pfioc_table *)addr; | |
d1ecb069 | 2694 | user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer); |
b0d623f7 A |
2695 | |
2696 | if (io->pfrio_esize != sizeof (struct pfr_tstats)) { | |
2697 | error = ENODEV; | |
2698 | break; | |
2699 | } | |
d1ecb069 | 2700 | error = pfr_get_tstats(&io->pfrio_table, buf, |
b0d623f7 A |
2701 | &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); |
2702 | break; | |
2703 | } | |
2704 | ||
2705 | case DIOCRCLRTSTATS: { | |
2706 | struct pfioc_table *io = (struct pfioc_table *)addr; | |
d1ecb069 | 2707 | user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer); |
b0d623f7 A |
2708 | |
2709 | if (io->pfrio_esize != sizeof (struct pfr_table)) { | |
2710 | error = ENODEV; | |
2711 | break; | |
2712 | } | |
d1ecb069 | 2713 | error = pfr_clr_tstats(buf, io->pfrio_size, |
b0d623f7 A |
2714 | &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); |
2715 | break; | |
2716 | } | |
2717 | ||
2718 | case DIOCRSETTFLAGS: { | |
2719 | struct pfioc_table *io = (struct pfioc_table *)addr; | |
d1ecb069 | 2720 | user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer); |
b0d623f7 A |
2721 | |
2722 | if (io->pfrio_esize != sizeof (struct pfr_table)) { | |
2723 | error = ENODEV; | |
2724 | break; | |
2725 | } | |
d1ecb069 | 2726 | error = pfr_set_tflags(buf, io->pfrio_size, |
b0d623f7 A |
2727 | io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, |
2728 | &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2729 | break; | |
2730 | } | |
2731 | ||
2732 | case DIOCRCLRADDRS: { | |
2733 | struct pfioc_table *io = (struct pfioc_table *)addr; | |
2734 | ||
2735 | if (io->pfrio_esize != 0) { | |
2736 | error = ENODEV; | |
2737 | break; | |
2738 | } | |
2739 | error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, | |
2740 | io->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2741 | break; | |
2742 | } | |
2743 | ||
2744 | case DIOCRADDADDRS: { | |
2745 | struct pfioc_table *io = (struct pfioc_table *)addr; | |
d1ecb069 | 2746 | user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer); |
b0d623f7 A |
2747 | |
2748 | if (io->pfrio_esize != sizeof (struct pfr_addr)) { | |
2749 | error = ENODEV; | |
2750 | break; | |
2751 | } | |
d1ecb069 | 2752 | error = pfr_add_addrs(&io->pfrio_table, buf, |
b0d623f7 A |
2753 | io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | |
2754 | PFR_FLAG_USERIOCTL); | |
2755 | break; | |
2756 | } | |
2757 | ||
2758 | case DIOCRDELADDRS: { | |
2759 | struct pfioc_table *io = (struct pfioc_table *)addr; | |
d1ecb069 | 2760 | user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer); |
b0d623f7 A |
2761 | |
2762 | if (io->pfrio_esize != sizeof (struct pfr_addr)) { | |
2763 | error = ENODEV; | |
2764 | break; | |
2765 | } | |
d1ecb069 | 2766 | error = pfr_del_addrs(&io->pfrio_table, buf, |
b0d623f7 A |
2767 | io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | |
2768 | PFR_FLAG_USERIOCTL); | |
2769 | break; | |
2770 | } | |
2771 | ||
2772 | case DIOCRSETADDRS: { | |
2773 | struct pfioc_table *io = (struct pfioc_table *)addr; | |
d1ecb069 | 2774 | user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer); |
b0d623f7 A |
2775 | |
2776 | if (io->pfrio_esize != sizeof (struct pfr_addr)) { | |
2777 | error = ENODEV; | |
2778 | break; | |
2779 | } | |
d1ecb069 | 2780 | error = pfr_set_addrs(&io->pfrio_table, buf, |
b0d623f7 A |
2781 | io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, |
2782 | &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | | |
2783 | PFR_FLAG_USERIOCTL, 0); | |
2784 | break; | |
2785 | } | |
2786 | ||
2787 | case DIOCRGETADDRS: { | |
2788 | struct pfioc_table *io = (struct pfioc_table *)addr; | |
d1ecb069 | 2789 | user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer); |
b0d623f7 A |
2790 | |
2791 | if (io->pfrio_esize != sizeof (struct pfr_addr)) { | |
2792 | error = ENODEV; | |
2793 | break; | |
2794 | } | |
d1ecb069 | 2795 | error = pfr_get_addrs(&io->pfrio_table, buf, |
b0d623f7 A |
2796 | &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); |
2797 | break; | |
2798 | } | |
2799 | ||
2800 | case DIOCRGETASTATS: { | |
2801 | struct pfioc_table *io = (struct pfioc_table *)addr; | |
d1ecb069 | 2802 | user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer); |
b0d623f7 A |
2803 | |
2804 | if (io->pfrio_esize != sizeof (struct pfr_astats)) { | |
2805 | error = ENODEV; | |
2806 | break; | |
2807 | } | |
d1ecb069 | 2808 | error = pfr_get_astats(&io->pfrio_table, buf, |
b0d623f7 A |
2809 | &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); |
2810 | break; | |
2811 | } | |
2812 | ||
2813 | case DIOCRCLRASTATS: { | |
2814 | struct pfioc_table *io = (struct pfioc_table *)addr; | |
d1ecb069 | 2815 | user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer); |
b0d623f7 A |
2816 | |
2817 | if (io->pfrio_esize != sizeof (struct pfr_addr)) { | |
2818 | error = ENODEV; | |
2819 | break; | |
2820 | } | |
d1ecb069 | 2821 | error = pfr_clr_astats(&io->pfrio_table, buf, |
b0d623f7 A |
2822 | io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | |
2823 | PFR_FLAG_USERIOCTL); | |
2824 | break; | |
2825 | } | |
2826 | ||
2827 | case DIOCRTSTADDRS: { | |
2828 | struct pfioc_table *io = (struct pfioc_table *)addr; | |
d1ecb069 | 2829 | user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer); |
b0d623f7 A |
2830 | |
2831 | if (io->pfrio_esize != sizeof (struct pfr_addr)) { | |
2832 | error = ENODEV; | |
2833 | break; | |
2834 | } | |
d1ecb069 | 2835 | error = pfr_tst_addrs(&io->pfrio_table, buf, |
b0d623f7 A |
2836 | io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | |
2837 | PFR_FLAG_USERIOCTL); | |
2838 | break; | |
2839 | } | |
2840 | ||
2841 | case DIOCRINADEFINE: { | |
2842 | struct pfioc_table *io = (struct pfioc_table *)addr; | |
d1ecb069 | 2843 | user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer); |
b0d623f7 A |
2844 | |
2845 | if (io->pfrio_esize != sizeof (struct pfr_addr)) { | |
2846 | error = ENODEV; | |
2847 | break; | |
2848 | } | |
d1ecb069 | 2849 | error = pfr_ina_define(&io->pfrio_table, buf, |
b0d623f7 A |
2850 | io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, |
2851 | io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2852 | break; | |
2853 | } | |
2854 | ||
2855 | case DIOCOSFPADD: { | |
2856 | struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; | |
2857 | error = pf_osfp_add(io); | |
2858 | break; | |
2859 | } | |
2860 | ||
2861 | case DIOCOSFPGET: { | |
2862 | struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; | |
2863 | error = pf_osfp_get(io); | |
2864 | break; | |
2865 | } | |
2866 | ||
2867 | case DIOCXBEGIN: { | |
2868 | struct pfioc_trans *io = (struct pfioc_trans *)addr; | |
2869 | struct pfioc_trans_e *ioe; | |
2870 | struct pfr_table *table; | |
d1ecb069 | 2871 | user_addr_t buf; |
b0d623f7 A |
2872 | int i; |
2873 | ||
2874 | if (io->esize != sizeof (*ioe)) { | |
2875 | error = ENODEV; | |
2876 | goto fail; | |
2877 | } | |
2878 | ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK); | |
2879 | table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK); | |
d1ecb069 A |
2880 | buf = PF_USER_ADDR(addr, pfioc_trans, array); |
2881 | for (i = 0; i < io->size; i++, buf += sizeof (*ioe)) { | |
2882 | if (copyin(buf, ioe, sizeof (*ioe))) { | |
b0d623f7 A |
2883 | _FREE(table, M_TEMP); |
2884 | _FREE(ioe, M_TEMP); | |
2885 | error = EFAULT; | |
2886 | goto fail; | |
2887 | } | |
2888 | switch (ioe->rs_num) { | |
2889 | case PF_RULESET_ALTQ: | |
2890 | #if ALTQ | |
2891 | if (ioe->anchor[0]) { | |
2892 | _FREE(table, M_TEMP); | |
2893 | _FREE(ioe, M_TEMP); | |
2894 | error = EINVAL; | |
2895 | goto fail; | |
2896 | } | |
2897 | if ((error = pf_begin_altq(&ioe->ticket))) { | |
2898 | _FREE(table, M_TEMP); | |
2899 | _FREE(ioe, M_TEMP); | |
2900 | goto fail; | |
2901 | } | |
2902 | #endif /* ALTQ */ | |
2903 | break; | |
2904 | case PF_RULESET_TABLE: | |
2905 | bzero(table, sizeof (*table)); | |
2906 | strlcpy(table->pfrt_anchor, ioe->anchor, | |
2907 | sizeof (table->pfrt_anchor)); | |
2908 | if ((error = pfr_ina_begin(table, | |
2909 | &ioe->ticket, NULL, 0))) { | |
2910 | _FREE(table, M_TEMP); | |
2911 | _FREE(ioe, M_TEMP); | |
2912 | goto fail; | |
2913 | } | |
2914 | break; | |
2915 | default: | |
2916 | if ((error = pf_begin_rules(&ioe->ticket, | |
2917 | ioe->rs_num, ioe->anchor))) { | |
2918 | _FREE(table, M_TEMP); | |
2919 | _FREE(ioe, M_TEMP); | |
2920 | goto fail; | |
2921 | } | |
2922 | break; | |
2923 | } | |
d1ecb069 | 2924 | if (copyout(ioe, buf, sizeof (*ioe))) { |
b0d623f7 A |
2925 | _FREE(table, M_TEMP); |
2926 | _FREE(ioe, M_TEMP); | |
2927 | error = EFAULT; | |
2928 | goto fail; | |
2929 | } | |
2930 | } | |
2931 | _FREE(table, M_TEMP); | |
2932 | _FREE(ioe, M_TEMP); | |
2933 | break; | |
2934 | } | |
2935 | ||
2936 | case DIOCXROLLBACK: { | |
2937 | struct pfioc_trans *io = (struct pfioc_trans *)addr; | |
2938 | struct pfioc_trans_e *ioe; | |
2939 | struct pfr_table *table; | |
d1ecb069 | 2940 | user_addr_t buf; |
b0d623f7 A |
2941 | int i; |
2942 | ||
2943 | if (io->esize != sizeof (*ioe)) { | |
2944 | error = ENODEV; | |
2945 | goto fail; | |
2946 | } | |
2947 | ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK); | |
2948 | table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK); | |
d1ecb069 A |
2949 | buf = PF_USER_ADDR(addr, pfioc_trans, array); |
2950 | for (i = 0; i < io->size; i++, buf += sizeof (*ioe)) { | |
2951 | if (copyin(buf, ioe, sizeof (*ioe))) { | |
b0d623f7 A |
2952 | _FREE(table, M_TEMP); |
2953 | _FREE(ioe, M_TEMP); | |
2954 | error = EFAULT; | |
2955 | goto fail; | |
2956 | } | |
2957 | switch (ioe->rs_num) { | |
2958 | case PF_RULESET_ALTQ: | |
2959 | #if ALTQ | |
2960 | if (ioe->anchor[0]) { | |
2961 | _FREE(table, M_TEMP); | |
2962 | _FREE(ioe, M_TEMP); | |
2963 | error = EINVAL; | |
2964 | goto fail; | |
2965 | } | |
2966 | if ((error = pf_rollback_altq(ioe->ticket))) { | |
2967 | _FREE(table, M_TEMP); | |
2968 | _FREE(ioe, M_TEMP); | |
2969 | goto fail; /* really bad */ | |
2970 | } | |
2971 | #endif /* ALTQ */ | |
2972 | break; | |
2973 | case PF_RULESET_TABLE: | |
2974 | bzero(table, sizeof (*table)); | |
2975 | strlcpy(table->pfrt_anchor, ioe->anchor, | |
2976 | sizeof (table->pfrt_anchor)); | |
2977 | if ((error = pfr_ina_rollback(table, | |
2978 | ioe->ticket, NULL, 0))) { | |
2979 | _FREE(table, M_TEMP); | |
2980 | _FREE(ioe, M_TEMP); | |
2981 | goto fail; /* really bad */ | |
2982 | } | |
2983 | break; | |
2984 | default: | |
2985 | if ((error = pf_rollback_rules(ioe->ticket, | |
2986 | ioe->rs_num, ioe->anchor))) { | |
2987 | _FREE(table, M_TEMP); | |
2988 | _FREE(ioe, M_TEMP); | |
2989 | goto fail; /* really bad */ | |
2990 | } | |
2991 | break; | |
2992 | } | |
2993 | } | |
2994 | _FREE(table, M_TEMP); | |
2995 | _FREE(ioe, M_TEMP); | |
2996 | break; | |
2997 | } | |
2998 | ||
2999 | case DIOCXCOMMIT: { | |
3000 | struct pfioc_trans *io = (struct pfioc_trans *)addr; | |
3001 | struct pfioc_trans_e *ioe; | |
3002 | struct pfr_table *table; | |
3003 | struct pf_ruleset *rs; | |
d1ecb069 | 3004 | user_addr_t _buf, buf; |
b0d623f7 A |
3005 | int i; |
3006 | ||
3007 | if (io->esize != sizeof (*ioe)) { | |
3008 | error = ENODEV; | |
3009 | goto fail; | |
3010 | } | |
3011 | ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK); | |
3012 | table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK); | |
d1ecb069 | 3013 | buf = _buf = PF_USER_ADDR(addr, pfioc_trans, array); |
b0d623f7 | 3014 | /* first makes sure everything will succeed */ |
d1ecb069 A |
3015 | for (i = 0; i < io->size; i++, buf += sizeof (*ioe)) { |
3016 | if (copyin(buf, ioe, sizeof (*ioe))) { | |
b0d623f7 A |
3017 | _FREE(table, M_TEMP); |
3018 | _FREE(ioe, M_TEMP); | |
3019 | error = EFAULT; | |
3020 | goto fail; | |
3021 | } | |
3022 | switch (ioe->rs_num) { | |
3023 | case PF_RULESET_ALTQ: | |
3024 | #if ALTQ | |
3025 | if (ioe->anchor[0]) { | |
3026 | _FREE(table, M_TEMP); | |
3027 | _FREE(ioe, M_TEMP); | |
3028 | error = EINVAL; | |
3029 | goto fail; | |
3030 | } | |
3031 | if (!altqs_inactive_open || ioe->ticket != | |
3032 | ticket_altqs_inactive) { | |
3033 | _FREE(table, M_TEMP); | |
3034 | _FREE(ioe, M_TEMP); | |
3035 | error = EBUSY; | |
3036 | goto fail; | |
3037 | } | |
3038 | #endif /* ALTQ */ | |
3039 | break; | |
3040 | case PF_RULESET_TABLE: | |
3041 | rs = pf_find_ruleset(ioe->anchor); | |
3042 | if (rs == NULL || !rs->topen || ioe->ticket != | |
3043 | rs->tticket) { | |
3044 | _FREE(table, M_TEMP); | |
3045 | _FREE(ioe, M_TEMP); | |
3046 | error = EBUSY; | |
3047 | goto fail; | |
3048 | } | |
3049 | break; | |
3050 | default: | |
3051 | if (ioe->rs_num < 0 || ioe->rs_num >= | |
3052 | PF_RULESET_MAX) { | |
3053 | _FREE(table, M_TEMP); | |
3054 | _FREE(ioe, M_TEMP); | |
3055 | error = EINVAL; | |
3056 | goto fail; | |
3057 | } | |
3058 | rs = pf_find_ruleset(ioe->anchor); | |
3059 | if (rs == NULL || | |
3060 | !rs->rules[ioe->rs_num].inactive.open || | |
3061 | rs->rules[ioe->rs_num].inactive.ticket != | |
3062 | ioe->ticket) { | |
3063 | _FREE(table, M_TEMP); | |
3064 | _FREE(ioe, M_TEMP); | |
3065 | error = EBUSY; | |
3066 | goto fail; | |
3067 | } | |
3068 | break; | |
3069 | } | |
3070 | } | |
d1ecb069 | 3071 | buf = _buf; |
b0d623f7 | 3072 | /* now do the commit - no errors should happen here */ |
d1ecb069 A |
3073 | for (i = 0; i < io->size; i++, buf += sizeof (*ioe)) { |
3074 | if (copyin(buf, ioe, sizeof (*ioe))) { | |
b0d623f7 A |
3075 | _FREE(table, M_TEMP); |
3076 | _FREE(ioe, M_TEMP); | |
3077 | error = EFAULT; | |
3078 | goto fail; | |
3079 | } | |
3080 | switch (ioe->rs_num) { | |
3081 | case PF_RULESET_ALTQ: | |
3082 | #if ALTQ | |
3083 | if ((error = pf_commit_altq(ioe->ticket))) { | |
3084 | _FREE(table, M_TEMP); | |
3085 | _FREE(ioe, M_TEMP); | |
3086 | goto fail; /* really bad */ | |
3087 | } | |
3088 | #endif /* ALTQ */ | |
3089 | break; | |
3090 | case PF_RULESET_TABLE: | |
3091 | bzero(table, sizeof (*table)); | |
3092 | strlcpy(table->pfrt_anchor, ioe->anchor, | |
3093 | sizeof (table->pfrt_anchor)); | |
3094 | if ((error = pfr_ina_commit(table, ioe->ticket, | |
3095 | NULL, NULL, 0))) { | |
3096 | _FREE(table, M_TEMP); | |
3097 | _FREE(ioe, M_TEMP); | |
3098 | goto fail; /* really bad */ | |
3099 | } | |
3100 | break; | |
3101 | default: | |
3102 | if ((error = pf_commit_rules(ioe->ticket, | |
3103 | ioe->rs_num, ioe->anchor))) { | |
3104 | _FREE(table, M_TEMP); | |
3105 | _FREE(ioe, M_TEMP); | |
3106 | goto fail; /* really bad */ | |
3107 | } | |
3108 | break; | |
3109 | } | |
3110 | } | |
3111 | _FREE(table, M_TEMP); | |
3112 | _FREE(ioe, M_TEMP); | |
3113 | break; | |
3114 | } | |
3115 | ||
3116 | case DIOCGETSRCNODES: { | |
3117 | struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; | |
d1ecb069 A |
3118 | struct pf_src_node *n, *pstore; |
3119 | user_addr_t buf; | |
b0d623f7 A |
3120 | u_int32_t nr = 0; |
3121 | int space = psn->psn_len; | |
3122 | ||
3123 | if (space == 0) { | |
3124 | RB_FOREACH(n, pf_src_tree, &tree_src_tracking) | |
3125 | nr++; | |
3126 | psn->psn_len = sizeof (struct pf_src_node) * nr; | |
3127 | break; | |
3128 | } | |
3129 | ||
3130 | pstore = _MALLOC(sizeof (*pstore), M_TEMP, M_WAITOK); | |
d1ecb069 | 3131 | buf = PF_USER_ADDR(addr, pfioc_src_nodes, psn_buf); |
b0d623f7 | 3132 | |
b0d623f7 A |
3133 | RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { |
3134 | uint64_t secs = pf_time_second(), diff; | |
3135 | ||
d1ecb069 A |
3136 | if ((nr + 1) * sizeof (*pstore) > |
3137 | (unsigned)psn->psn_len) | |
b0d623f7 A |
3138 | break; |
3139 | ||
3140 | bcopy(n, pstore, sizeof (*pstore)); | |
3141 | if (n->rule.ptr != NULL) | |
3142 | pstore->rule.nr = n->rule.ptr->nr; | |
3143 | pstore->creation = secs - pstore->creation; | |
3144 | if (pstore->expire > secs) | |
3145 | pstore->expire -= secs; | |
3146 | else | |
3147 | pstore->expire = 0; | |
3148 | ||
3149 | /* adjust the connection rate estimate */ | |
3150 | diff = secs - n->conn_rate.last; | |
3151 | if (diff >= n->conn_rate.seconds) | |
3152 | pstore->conn_rate.count = 0; | |
3153 | else | |
3154 | pstore->conn_rate.count -= | |
3155 | n->conn_rate.count * diff / | |
3156 | n->conn_rate.seconds; | |
3157 | ||
d1ecb069 | 3158 | error = copyout(pstore, buf, sizeof (*pstore)); |
b0d623f7 A |
3159 | if (error) { |
3160 | _FREE(pstore, M_TEMP); | |
3161 | goto fail; | |
3162 | } | |
d1ecb069 | 3163 | buf += sizeof (*pstore); |
b0d623f7 A |
3164 | nr++; |
3165 | } | |
3166 | psn->psn_len = sizeof (struct pf_src_node) * nr; | |
3167 | ||
3168 | _FREE(pstore, M_TEMP); | |
3169 | break; | |
3170 | } | |
3171 | ||
3172 | case DIOCCLRSRCNODES: { | |
3173 | struct pf_src_node *n; | |
3174 | struct pf_state *state; | |
3175 | ||
3176 | RB_FOREACH(state, pf_state_tree_id, &tree_id) { | |
3177 | state->src_node = NULL; | |
3178 | state->nat_src_node = NULL; | |
3179 | } | |
3180 | RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { | |
3181 | n->expire = 1; | |
3182 | n->states = 0; | |
3183 | } | |
3184 | pf_purge_expired_src_nodes(); | |
3185 | pf_status.src_nodes = 0; | |
3186 | break; | |
3187 | } | |
3188 | ||
3189 | case DIOCKILLSRCNODES: { | |
3190 | struct pf_src_node *sn; | |
3191 | struct pf_state *s; | |
3192 | struct pfioc_src_node_kill *psnk = | |
3193 | (struct pfioc_src_node_kill *)addr; | |
3194 | int killed = 0; | |
3195 | ||
3196 | RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { | |
3197 | if (PF_MATCHA(psnk->psnk_src.neg, | |
3198 | &psnk->psnk_src.addr.v.a.addr, | |
3199 | &psnk->psnk_src.addr.v.a.mask, | |
3200 | &sn->addr, sn->af) && | |
3201 | PF_MATCHA(psnk->psnk_dst.neg, | |
3202 | &psnk->psnk_dst.addr.v.a.addr, | |
3203 | &psnk->psnk_dst.addr.v.a.mask, | |
3204 | &sn->raddr, sn->af)) { | |
3205 | /* Handle state to src_node linkage */ | |
3206 | if (sn->states != 0) { | |
3207 | RB_FOREACH(s, pf_state_tree_id, | |
3208 | &tree_id) { | |
3209 | if (s->src_node == sn) | |
3210 | s->src_node = NULL; | |
3211 | if (s->nat_src_node == sn) | |
3212 | s->nat_src_node = NULL; | |
3213 | } | |
3214 | sn->states = 0; | |
3215 | } | |
3216 | sn->expire = 1; | |
3217 | killed++; | |
3218 | } | |
3219 | } | |
3220 | ||
3221 | if (killed > 0) | |
3222 | pf_purge_expired_src_nodes(); | |
3223 | ||
3224 | psnk->psnk_af = killed; | |
3225 | break; | |
3226 | } | |
3227 | ||
3228 | case DIOCSETHOSTID: { | |
3229 | u_int32_t *hid = (u_int32_t *)addr; | |
3230 | ||
3231 | if (*hid == 0) | |
3232 | pf_status.hostid = random(); | |
3233 | else | |
3234 | pf_status.hostid = *hid; | |
3235 | break; | |
3236 | } | |
3237 | ||
3238 | case DIOCOSFPFLUSH: | |
3239 | pf_osfp_flush(); | |
3240 | break; | |
3241 | ||
3242 | case DIOCIGETIFACES: { | |
3243 | struct pfioc_iface *io = (struct pfioc_iface *)addr; | |
d1ecb069 | 3244 | user_addr_t buf = PF_USER_ADDR(addr, pfioc_iface, pfiio_buffer); |
b0d623f7 | 3245 | |
d1ecb069 A |
3246 | /* esize must be that of the user space version of pfi_kif */ |
3247 | if (io->pfiio_esize != sizeof (struct pfi_uif)) { | |
b0d623f7 A |
3248 | error = ENODEV; |
3249 | break; | |
3250 | } | |
d1ecb069 A |
3251 | io->pfiio_name[sizeof (io->pfiio_name) - 1] = '\0'; |
3252 | error = pfi_get_ifaces(io->pfiio_name, buf, &io->pfiio_size); | |
b0d623f7 A |
3253 | break; |
3254 | } | |
3255 | ||
3256 | case DIOCSETIFFLAG: { | |
3257 | struct pfioc_iface *io = (struct pfioc_iface *)addr; | |
3258 | ||
d1ecb069 | 3259 | io->pfiio_name[sizeof (io->pfiio_name) - 1] = '\0'; |
b0d623f7 A |
3260 | error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); |
3261 | break; | |
3262 | } | |
3263 | ||
3264 | case DIOCCLRIFFLAG: { | |
3265 | struct pfioc_iface *io = (struct pfioc_iface *)addr; | |
3266 | ||
d1ecb069 | 3267 | io->pfiio_name[sizeof (io->pfiio_name) - 1] = '\0'; |
b0d623f7 A |
3268 | error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); |
3269 | break; | |
3270 | } | |
3271 | ||
3272 | default: | |
3273 | error = ENODEV; | |
3274 | break; | |
3275 | } | |
3276 | fail: | |
3277 | lck_mtx_unlock(pf_lock); | |
3278 | lck_rw_done(pf_perim_lock); | |
3279 | ||
3280 | return (error); | |
3281 | } | |
3282 | ||
3283 | int | |
3284 | pf_af_hook(struct ifnet *ifp, struct mbuf **mppn, struct mbuf **mp, | |
3285 | unsigned int af, int input) | |
3286 | { | |
3287 | int error = 0, reentry; | |
3288 | struct thread *curthread = current_thread(); | |
3289 | struct mbuf *nextpkt; | |
3290 | ||
3291 | reentry = (ifp->if_pf_curthread == curthread); | |
3292 | if (!reentry) { | |
3293 | lck_rw_lock_shared(pf_perim_lock); | |
d1ecb069 | 3294 | if (!pf_is_enabled) |
b0d623f7 A |
3295 | goto done; |
3296 | ||
3297 | lck_mtx_lock(pf_lock); | |
3298 | ifp->if_pf_curthread = curthread; | |
3299 | } | |
3300 | ||
3301 | if (mppn != NULL && *mppn != NULL) | |
3302 | VERIFY(*mppn == *mp); | |
3303 | if ((nextpkt = (*mp)->m_nextpkt) != NULL) | |
3304 | (*mp)->m_nextpkt = NULL; | |
3305 | ||
3306 | switch (af) { | |
3307 | #if INET | |
3308 | case AF_INET: { | |
3309 | error = pf_inet_hook(ifp, mp, input); | |
3310 | break; | |
3311 | } | |
3312 | #endif /* INET */ | |
3313 | #if INET6 | |
3314 | case AF_INET6: | |
3315 | error = pf_inet6_hook(ifp, mp, input); | |
3316 | break; | |
3317 | #endif /* INET6 */ | |
3318 | default: | |
3319 | break; | |
3320 | } | |
3321 | ||
3322 | if (nextpkt != NULL) { | |
3323 | if (*mp != NULL) { | |
3324 | struct mbuf *m = *mp; | |
3325 | while (m->m_nextpkt != NULL) | |
3326 | m = m->m_nextpkt; | |
3327 | m->m_nextpkt = nextpkt; | |
3328 | } else { | |
3329 | *mp = nextpkt; | |
3330 | } | |
3331 | } | |
3332 | if (mppn != NULL && *mppn != NULL) | |
3333 | *mppn = *mp; | |
3334 | ||
3335 | if (!reentry) { | |
3336 | ifp->if_pf_curthread = NULL; | |
3337 | lck_mtx_unlock(pf_lock); | |
3338 | } | |
3339 | done: | |
3340 | if (!reentry) | |
3341 | lck_rw_done(pf_perim_lock); | |
3342 | ||
3343 | return (error); | |
3344 | } | |
3345 | ||
3346 | ||
3347 | #if INET | |
3348 | static int | |
3349 | pf_inet_hook(struct ifnet *ifp, struct mbuf **mp, int input) | |
3350 | { | |
3351 | struct mbuf *m = *mp; | |
3352 | #if BYTE_ORDER != BIG_ENDIAN | |
3353 | struct ip *ip = mtod(m, struct ip *); | |
3354 | #endif | |
3355 | int error = 0; | |
3356 | ||
3357 | /* | |
3358 | * If the packet is outbound, is originated locally, is flagged for | |
3359 | * delayed UDP/TCP checksum calculation, and is about to be processed | |
3360 | * for an interface that doesn't support the appropriate checksum | |
3361 | * offloading, then calculated the checksum here so that PF can adjust | |
3362 | * it properly. | |
3363 | */ | |
3364 | if (!input && m->m_pkthdr.rcvif == NULL) { | |
3365 | static const int mask = CSUM_DELAY_DATA; | |
3366 | const int flags = m->m_pkthdr.csum_flags & | |
3367 | ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist); | |
3368 | ||
3369 | if (flags & mask) { | |
3370 | in_delayed_cksum(m); | |
3371 | m->m_pkthdr.csum_flags &= ~mask; | |
3372 | } | |
3373 | } | |
3374 | ||
3375 | #if BYTE_ORDER != BIG_ENDIAN | |
3376 | HTONS(ip->ip_len); | |
3377 | HTONS(ip->ip_off); | |
3378 | #endif | |
3379 | if (pf_test(input ? PF_IN : PF_OUT, ifp, mp, NULL) != PF_PASS) { | |
3380 | if (*mp != NULL) { | |
3381 | m_freem(*mp); | |
3382 | *mp = NULL; | |
3383 | error = EHOSTUNREACH; | |
3384 | } else { | |
3385 | error = ENOBUFS; | |
3386 | } | |
3387 | } | |
3388 | #if BYTE_ORDER != BIG_ENDIAN | |
3389 | else { | |
3390 | ip = mtod(*mp, struct ip *); | |
3391 | NTOHS(ip->ip_len); | |
3392 | NTOHS(ip->ip_off); | |
3393 | } | |
3394 | #endif | |
3395 | return (error); | |
3396 | } | |
3397 | #endif /* INET */ | |
3398 | ||
3399 | #if INET6 | |
3400 | int | |
3401 | pf_inet6_hook(struct ifnet *ifp, struct mbuf **mp, int input) | |
3402 | { | |
3403 | int error = 0; | |
3404 | ||
3405 | #if 0 | |
3406 | /* | |
3407 | * TODO: once we support IPv6 hardware checksum offload | |
3408 | */ | |
3409 | /* | |
3410 | * If the packet is outbound, is originated locally, is flagged for | |
3411 | * delayed UDP/TCP checksum calculation, and is about to be processed | |
3412 | * for an interface that doesn't support the appropriate checksum | |
3413 | * offloading, then calculated the checksum here so that PF can adjust | |
3414 | * it properly. | |
3415 | */ | |
3416 | if (!input && (*mp)->m_pkthdr.rcvif == NULL) { | |
3417 | static const int mask = CSUM_DELAY_DATA; | |
3418 | const int flags = (*mp)->m_pkthdr.csum_flags & | |
3419 | ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist); | |
3420 | ||
3421 | if (flags & mask) { | |
3422 | in6_delayed_cksum(*mp); | |
3423 | (*mp)->m_pkthdr.csum_flags &= ~mask; | |
3424 | } | |
3425 | } | |
3426 | #endif | |
3427 | ||
3428 | if (pf_test6(input ? PF_IN : PF_OUT, ifp, mp, NULL) != PF_PASS) { | |
3429 | if (*mp != NULL) { | |
3430 | m_freem(*mp); | |
3431 | *mp = NULL; | |
3432 | error = EHOSTUNREACH; | |
3433 | } else { | |
3434 | error = ENOBUFS; | |
3435 | } | |
3436 | } | |
3437 | return (error); | |
3438 | } | |
3439 | #endif /* INET6 */ | |
3440 | ||
3441 | int | |
3442 | pf_ifaddr_hook(struct ifnet *ifp, unsigned long cmd) | |
3443 | { | |
3444 | lck_rw_lock_shared(pf_perim_lock); | |
b0d623f7 A |
3445 | lck_mtx_lock(pf_lock); |
3446 | ||
3447 | switch (cmd) { | |
3448 | case SIOCSIFADDR: | |
3449 | case SIOCAIFADDR: | |
3450 | case SIOCDIFADDR: | |
3451 | #if INET6 | |
3452 | case SIOCAIFADDR_IN6: | |
3453 | case SIOCDIFADDR_IN6: | |
3454 | #endif /* INET6 */ | |
3455 | if (ifp->if_pf_kif != NULL) | |
3456 | pfi_kifaddr_update(ifp->if_pf_kif); | |
3457 | break; | |
3458 | default: | |
3459 | panic("%s: unexpected ioctl %lu", __func__, cmd); | |
3460 | /* NOTREACHED */ | |
3461 | } | |
3462 | ||
3463 | lck_mtx_unlock(pf_lock); | |
b0d623f7 A |
3464 | lck_rw_done(pf_perim_lock); |
3465 | return (0); | |
3466 | } | |
3467 | ||
3468 | /* | |
3469 | * Caller acquires dlil lock as writer (exclusive) | |
3470 | */ | |
3471 | void | |
3472 | pf_ifnet_hook(struct ifnet *ifp, int attach) | |
3473 | { | |
3474 | lck_rw_lock_shared(pf_perim_lock); | |
b0d623f7 A |
3475 | lck_mtx_lock(pf_lock); |
3476 | if (attach) | |
3477 | pfi_attach_ifnet(ifp); | |
3478 | else | |
3479 | pfi_detach_ifnet(ifp); | |
3480 | lck_mtx_unlock(pf_lock); | |
b0d623f7 A |
3481 | lck_rw_done(pf_perim_lock); |
3482 | } | |
3483 | ||
3484 | static void | |
3485 | pf_attach_hooks(void) | |
3486 | { | |
b0d623f7 | 3487 | ifnet_head_lock_shared(); |
d1ecb069 A |
3488 | /* |
3489 | * Check against ifnet_addrs[] before proceeding, in case this | |
3490 | * is called very early on, e.g. during dlil_init() before any | |
3491 | * network interface is attached. | |
3492 | */ | |
3493 | if (ifnet_addrs != NULL) { | |
3494 | int i; | |
3495 | ||
3496 | for (i = 0; i <= if_index; i++) { | |
3497 | struct ifnet *ifp = ifindex2ifnet[i]; | |
3498 | if (ifp != NULL) { | |
3499 | pfi_attach_ifnet(ifp); | |
3500 | } | |
b0d623f7 A |
3501 | } |
3502 | } | |
3503 | ifnet_head_done(); | |
b0d623f7 A |
3504 | } |
3505 | ||
d1ecb069 A |
3506 | #if 0 |
3507 | /* currently unused along with pfdetach() */ | |
b0d623f7 A |
3508 | static void |
3509 | pf_detach_hooks(void) | |
3510 | { | |
b0d623f7 | 3511 | ifnet_head_lock_shared(); |
d1ecb069 A |
3512 | if (ifnet_addrs != NULL) { |
3513 | for (i = 0; i <= if_index; i++) { | |
3514 | int i; | |
3515 | ||
3516 | struct ifnet *ifp = ifindex2ifnet[i]; | |
3517 | if (ifp != NULL && ifp->if_pf_kif != NULL) { | |
3518 | pfi_detach_ifnet(ifp); | |
3519 | } | |
b0d623f7 A |
3520 | } |
3521 | } | |
3522 | ifnet_head_done(); | |
b0d623f7 | 3523 | } |
d1ecb069 | 3524 | #endif |