]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2007-2015 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | /* $apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */ | |
30 | /* $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */ | |
31 | ||
32 | /* | |
33 | * Copyright (c) 2001 Daniel Hartmeier | |
34 | * Copyright (c) 2002,2003 Henning Brauer | |
35 | * All rights reserved. | |
36 | * | |
37 | * Redistribution and use in source and binary forms, with or without | |
38 | * modification, are permitted provided that the following conditions | |
39 | * are met: | |
40 | * | |
41 | * - Redistributions of source code must retain the above copyright | |
42 | * notice, this list of conditions and the following disclaimer. | |
43 | * - Redistributions in binary form must reproduce the above | |
44 | * copyright notice, this list of conditions and the following | |
45 | * disclaimer in the documentation and/or other materials provided | |
46 | * with the distribution. | |
47 | * | |
48 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
49 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
50 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
51 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | |
52 | * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | |
53 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | |
54 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
55 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | |
56 | * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
57 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | |
58 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
59 | * POSSIBILITY OF SUCH DAMAGE. | |
60 | * | |
61 | * Effort sponsored in part by the Defense Advanced Research Projects | |
62 | * Agency (DARPA) and Air Force Research Laboratory, Air Force | |
63 | * Materiel Command, USAF, under agreement number F30602-01-2-0537. | |
64 | * | |
65 | */ | |
66 | ||
67 | #include <machine/endian.h> | |
68 | #include <sys/param.h> | |
69 | #include <sys/systm.h> | |
70 | #include <sys/mbuf.h> | |
71 | #include <sys/filio.h> | |
72 | #include <sys/fcntl.h> | |
73 | #include <sys/socket.h> | |
74 | #include <sys/socketvar.h> | |
75 | #include <sys/kernel.h> | |
76 | #include <sys/time.h> | |
77 | #include <sys/proc_internal.h> | |
78 | #include <sys/malloc.h> | |
79 | #include <sys/kauth.h> | |
80 | #include <sys/conf.h> | |
81 | #include <sys/mcache.h> | |
82 | #include <sys/queue.h> | |
83 | ||
84 | #include <mach/vm_param.h> | |
85 | ||
86 | #include <net/dlil.h> | |
87 | #include <net/if.h> | |
88 | #include <net/if_types.h> | |
89 | #include <net/route.h> | |
90 | ||
91 | #include <netinet/in.h> | |
92 | #include <netinet/in_var.h> | |
93 | #include <netinet/in_systm.h> | |
94 | #include <netinet/ip.h> | |
95 | #include <netinet/ip_var.h> | |
96 | #include <netinet/ip_icmp.h> | |
97 | #include <netinet/if_ether.h> | |
98 | ||
99 | #if DUMMYNET | |
100 | #include <netinet/ip_dummynet.h> | |
101 | #else | |
102 | struct ip_fw_args; | |
103 | #endif /* DUMMYNET */ | |
104 | ||
105 | #include <libkern/crypto/md5.h> | |
106 | ||
107 | #include <machine/machine_routines.h> | |
108 | ||
109 | #include <miscfs/devfs/devfs.h> | |
110 | ||
111 | #include <net/pfvar.h> | |
112 | ||
113 | #if NPFSYNC | |
114 | #include <net/if_pfsync.h> | |
115 | #endif /* NPFSYNC */ | |
116 | ||
117 | #if PFLOG | |
118 | #include <net/if_pflog.h> | |
119 | #endif /* PFLOG */ | |
120 | ||
121 | #if INET6 | |
122 | #include <netinet/ip6.h> | |
123 | #include <netinet/in_pcb.h> | |
124 | #endif /* INET6 */ | |
125 | ||
126 | #if PF_ALTQ | |
127 | #include <net/altq/altq.h> | |
128 | #include <net/altq/altq_cbq.h> | |
129 | #include <net/classq/classq_red.h> | |
130 | #include <net/classq/classq_rio.h> | |
131 | #include <net/classq/classq_blue.h> | |
132 | #include <net/classq/classq_sfb.h> | |
133 | #endif /* PF_ALTQ */ | |
134 | ||
135 | #include <dev/random/randomdev.h> | |
136 | ||
137 | #if 0 | |
138 | static void pfdetach(void); | |
139 | #endif | |
140 | static int pfopen(dev_t, int, int, struct proc *); | |
141 | static int pfclose(dev_t, int, int, struct proc *); | |
142 | static int pfioctl(dev_t, u_long, caddr_t, int, struct proc *); | |
143 | static int pfioctl_ioc_table(u_long, struct pfioc_table_32 *, | |
144 | struct pfioc_table_64 *, struct proc *); | |
145 | static int pfioctl_ioc_tokens(u_long, struct pfioc_tokens_32 *, | |
146 | struct pfioc_tokens_64 *, struct proc *); | |
147 | static int pfioctl_ioc_rule(u_long, int, struct pfioc_rule *, struct proc *); | |
148 | static int pfioctl_ioc_state_kill(u_long, struct pfioc_state_kill *, | |
149 | struct proc *); | |
150 | static int pfioctl_ioc_state(u_long, struct pfioc_state *, struct proc *); | |
151 | static int pfioctl_ioc_states(u_long, struct pfioc_states_32 *, | |
152 | struct pfioc_states_64 *, struct proc *); | |
153 | static int pfioctl_ioc_natlook(u_long, struct pfioc_natlook *, struct proc *); | |
154 | static int pfioctl_ioc_tm(u_long, struct pfioc_tm *, struct proc *); | |
155 | static int pfioctl_ioc_limit(u_long, struct pfioc_limit *, struct proc *); | |
156 | static int pfioctl_ioc_pooladdr(u_long, struct pfioc_pooladdr *, struct proc *); | |
157 | static int pfioctl_ioc_ruleset(u_long, struct pfioc_ruleset *, struct proc *); | |
158 | static int pfioctl_ioc_trans(u_long, struct pfioc_trans_32 *, | |
159 | struct pfioc_trans_64 *, struct proc *); | |
160 | static int pfioctl_ioc_src_nodes(u_long, struct pfioc_src_nodes_32 *, | |
161 | struct pfioc_src_nodes_64 *, struct proc *); | |
162 | static int pfioctl_ioc_src_node_kill(u_long, struct pfioc_src_node_kill *, | |
163 | struct proc *); | |
164 | static int pfioctl_ioc_iface(u_long, struct pfioc_iface_32 *, | |
165 | struct pfioc_iface_64 *, struct proc *); | |
166 | static struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t, | |
167 | u_int8_t, u_int8_t, u_int8_t); | |
168 | static void pf_mv_pool(struct pf_palist *, struct pf_palist *); | |
169 | static void pf_empty_pool(struct pf_palist *); | |
170 | #if PF_ALTQ | |
171 | static int pf_begin_altq(u_int32_t *); | |
172 | static int pf_rollback_altq(u_int32_t); | |
173 | static int pf_commit_altq(u_int32_t); | |
174 | static int pf_enable_altq(struct pf_altq *); | |
175 | static int pf_disable_altq(struct pf_altq *); | |
176 | static void pf_altq_copyin(struct pf_altq *, struct pf_altq *); | |
177 | static void pf_altq_copyout(struct pf_altq *, struct pf_altq *); | |
178 | #endif /* PF_ALTQ */ | |
179 | static int pf_begin_rules(u_int32_t *, int, const char *); | |
180 | static int pf_rollback_rules(u_int32_t, int, char *); | |
181 | static int pf_setup_pfsync_matching(struct pf_ruleset *); | |
182 | static void pf_hash_rule(MD5_CTX *, struct pf_rule *); | |
183 | static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *, u_int8_t); | |
184 | static int pf_commit_rules(u_int32_t, int, char *); | |
185 | static void pf_rule_copyin(struct pf_rule *, struct pf_rule *, struct proc *, | |
186 | int); | |
187 | static void pf_rule_copyout(struct pf_rule *, struct pf_rule *); | |
188 | static void pf_state_export(struct pfsync_state *, struct pf_state_key *, | |
189 | struct pf_state *); | |
190 | static void pf_state_import(struct pfsync_state *, struct pf_state_key *, | |
191 | struct pf_state *); | |
192 | static void pf_pooladdr_copyin(struct pf_pooladdr *, struct pf_pooladdr *); | |
193 | static void pf_pooladdr_copyout(struct pf_pooladdr *, struct pf_pooladdr *); | |
194 | static void pf_expire_states_and_src_nodes(struct pf_rule *); | |
195 | static void pf_delete_rule_from_ruleset(struct pf_ruleset *, | |
196 | int, struct pf_rule *); | |
197 | static void pf_addrwrap_setup(struct pf_addr_wrap *); | |
198 | static int pf_rule_setup(struct pfioc_rule *, struct pf_rule *, | |
199 | struct pf_ruleset *); | |
200 | static void pf_delete_rule_by_owner(char *, u_int32_t); | |
201 | static int pf_delete_rule_by_ticket(struct pfioc_rule *, u_int32_t); | |
202 | static void pf_ruleset_cleanup(struct pf_ruleset *, int); | |
203 | static void pf_deleterule_anchor_step_out(struct pf_ruleset **, | |
204 | int, struct pf_rule **); | |
205 | ||
206 | #define PF_CDEV_MAJOR (-1) | |
207 | ||
208 | static struct cdevsw pf_cdevsw = { | |
209 | /* open */ pfopen, | |
210 | /* close */ pfclose, | |
211 | /* read */ eno_rdwrt, | |
212 | /* write */ eno_rdwrt, | |
213 | /* ioctl */ pfioctl, | |
214 | /* stop */ eno_stop, | |
215 | /* reset */ eno_reset, | |
216 | /* tty */ NULL, | |
217 | /* select */ eno_select, | |
218 | /* mmap */ eno_mmap, | |
219 | /* strategy */ eno_strat, | |
220 | /* getc */ eno_getc, | |
221 | /* putc */ eno_putc, | |
222 | /* type */ 0 | |
223 | }; | |
224 | ||
225 | static void pf_attach_hooks(void); | |
226 | #if 0 | |
227 | /* currently unused along with pfdetach() */ | |
228 | static void pf_detach_hooks(void); | |
229 | #endif | |
230 | ||
231 | /* | |
232 | * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer, | |
233 | * and used in pf_af_hook() for performance optimization, such that packets | |
234 | * will enter pf_test() or pf_test6() only when PF is running. | |
235 | */ | |
236 | int pf_is_enabled = 0; | |
237 | ||
238 | #if PF_ALTQ | |
239 | u_int32_t altq_allowed = 0; | |
240 | #endif /* PF_ALTQ */ | |
241 | ||
242 | u_int32_t pf_hash_seed; | |
243 | int16_t pf_nat64_configured = 0; | |
244 | ||
245 | /* | |
246 | * These are the pf enabled reference counting variables | |
247 | */ | |
248 | static u_int64_t pf_enabled_ref_count; | |
249 | static u_int32_t nr_tokens = 0; | |
250 | static u_int64_t pffwrules; | |
251 | static u_int32_t pfdevcnt; | |
252 | ||
253 | SLIST_HEAD(list_head, pfioc_kernel_token); | |
254 | static struct list_head token_list_head; | |
255 | ||
256 | struct pf_rule pf_default_rule; | |
257 | #if PF_ALTQ | |
258 | static int pf_altq_running; | |
259 | #endif /* PF_ALTQ */ | |
260 | ||
261 | #define TAGID_MAX 50000 | |
262 | #if !PF_ALTQ | |
263 | static TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = | |
264 | TAILQ_HEAD_INITIALIZER(pf_tags); | |
265 | #else /* PF_ALTQ */ | |
266 | static TAILQ_HEAD(pf_tags, pf_tagname) | |
267 | pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), | |
268 | pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); | |
269 | #endif /* PF_ALTQ */ | |
270 | ||
271 | #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) | |
272 | #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE | |
273 | #endif | |
274 | static u_int16_t tagname2tag(struct pf_tags *, char *); | |
275 | static void tag2tagname(struct pf_tags *, u_int16_t, char *); | |
276 | static void tag_unref(struct pf_tags *, u_int16_t); | |
277 | static int pf_rtlabel_add(struct pf_addr_wrap *); | |
278 | static void pf_rtlabel_remove(struct pf_addr_wrap *); | |
279 | static void pf_rtlabel_copyout(struct pf_addr_wrap *); | |
280 | ||
281 | #if INET | |
282 | static int pf_inet_hook(struct ifnet *, struct mbuf **, int, | |
283 | struct ip_fw_args *); | |
284 | #endif /* INET */ | |
285 | #if INET6 | |
286 | static int pf_inet6_hook(struct ifnet *, struct mbuf **, int, | |
287 | struct ip_fw_args *); | |
288 | #endif /* INET6 */ | |
289 | ||
290 | #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x | |
291 | ||
292 | /* | |
293 | * Helper macros for ioctl structures which vary in size (32-bit vs. 64-bit) | |
294 | */ | |
295 | #define PFIOCX_STRUCT_DECL(s) \ | |
296 | struct { \ | |
297 | union { \ | |
298 | struct s##_32 _s##_32; \ | |
299 | struct s##_64 _s##_64; \ | |
300 | } _u; \ | |
301 | } *s##_un = NULL \ | |
302 | ||
303 | #define PFIOCX_STRUCT_BEGIN(a, s, _action) { \ | |
304 | VERIFY(s##_un == NULL); \ | |
305 | s##_un = _MALLOC(sizeof (*s##_un), M_TEMP, M_WAITOK|M_ZERO); \ | |
306 | if (s##_un == NULL) { \ | |
307 | _action \ | |
308 | } else { \ | |
309 | if (p64) \ | |
310 | bcopy(a, &s##_un->_u._s##_64, \ | |
311 | sizeof (struct s##_64)); \ | |
312 | else \ | |
313 | bcopy(a, &s##_un->_u._s##_32, \ | |
314 | sizeof (struct s##_32)); \ | |
315 | } \ | |
316 | } | |
317 | ||
318 | #define PFIOCX_STRUCT_END(s, a) { \ | |
319 | VERIFY(s##_un != NULL); \ | |
320 | if (p64) \ | |
321 | bcopy(&s##_un->_u._s##_64, a, sizeof (struct s##_64)); \ | |
322 | else \ | |
323 | bcopy(&s##_un->_u._s##_32, a, sizeof (struct s##_32)); \ | |
324 | _FREE(s##_un, M_TEMP); \ | |
325 | s##_un = NULL; \ | |
326 | } | |
327 | ||
328 | #define PFIOCX_STRUCT_ADDR32(s) (&s##_un->_u._s##_32) | |
329 | #define PFIOCX_STRUCT_ADDR64(s) (&s##_un->_u._s##_64) | |
330 | ||
331 | /* | |
332 | * Helper macros for regular ioctl structures. | |
333 | */ | |
334 | #define PFIOC_STRUCT_BEGIN(a, v, _action) { \ | |
335 | VERIFY((v) == NULL); \ | |
336 | (v) = _MALLOC(sizeof (*(v)), M_TEMP, M_WAITOK|M_ZERO); \ | |
337 | if ((v) == NULL) { \ | |
338 | _action \ | |
339 | } else { \ | |
340 | bcopy(a, v, sizeof (*(v))); \ | |
341 | } \ | |
342 | } | |
343 | ||
344 | #define PFIOC_STRUCT_END(v, a) { \ | |
345 | VERIFY((v) != NULL); \ | |
346 | bcopy(v, a, sizeof (*(v))); \ | |
347 | _FREE(v, M_TEMP); \ | |
348 | (v) = NULL; \ | |
349 | } | |
350 | ||
351 | #define PFIOC_STRUCT_ADDR32(s) (&s##_un->_u._s##_32) | |
352 | #define PFIOC_STRUCT_ADDR64(s) (&s##_un->_u._s##_64) | |
353 | ||
354 | static lck_attr_t *pf_perim_lock_attr; | |
355 | static lck_grp_t *pf_perim_lock_grp; | |
356 | static lck_grp_attr_t *pf_perim_lock_grp_attr; | |
357 | ||
358 | static lck_attr_t *pf_lock_attr; | |
359 | static lck_grp_t *pf_lock_grp; | |
360 | static lck_grp_attr_t *pf_lock_grp_attr; | |
361 | ||
362 | struct thread *pf_purge_thread; | |
363 | ||
364 | extern void pfi_kifaddr_update(void *); | |
365 | ||
366 | /* pf enable ref-counting helper functions */ | |
367 | static u_int64_t generate_token(struct proc *); | |
368 | static int remove_token(struct pfioc_remove_token *); | |
369 | static void invalidate_all_tokens(void); | |
370 | ||
371 | static u_int64_t | |
372 | generate_token(struct proc *p) | |
373 | { | |
374 | u_int64_t token_value; | |
375 | struct pfioc_kernel_token *new_token; | |
376 | ||
377 | new_token = _MALLOC(sizeof (struct pfioc_kernel_token), M_TEMP, | |
378 | M_WAITOK|M_ZERO); | |
379 | ||
380 | lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED); | |
381 | ||
382 | if (new_token == NULL) { | |
383 | /* malloc failed! bail! */ | |
384 | printf("%s: unable to allocate pf token structure!", __func__); | |
385 | return (0); | |
386 | } | |
387 | ||
388 | token_value = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)new_token); | |
389 | ||
390 | new_token->token.token_value = token_value; | |
391 | new_token->token.pid = proc_pid(p); | |
392 | proc_name(new_token->token.pid, new_token->token.proc_name, | |
393 | sizeof (new_token->token.proc_name)); | |
394 | new_token->token.timestamp = pf_calendar_time_second(); | |
395 | ||
396 | SLIST_INSERT_HEAD(&token_list_head, new_token, next); | |
397 | nr_tokens++; | |
398 | ||
399 | return (token_value); | |
400 | } | |
401 | ||
402 | static int | |
403 | remove_token(struct pfioc_remove_token *tok) | |
404 | { | |
405 | struct pfioc_kernel_token *entry, *tmp; | |
406 | ||
407 | lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED); | |
408 | ||
409 | SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) { | |
410 | if (tok->token_value == entry->token.token_value) { | |
411 | SLIST_REMOVE(&token_list_head, entry, | |
412 | pfioc_kernel_token, next); | |
413 | _FREE(entry, M_TEMP); | |
414 | nr_tokens--; | |
415 | return (0); /* success */ | |
416 | } | |
417 | } | |
418 | ||
419 | printf("pf : remove failure\n"); | |
420 | return (ESRCH); /* failure */ | |
421 | } | |
422 | ||
423 | static void | |
424 | invalidate_all_tokens(void) | |
425 | { | |
426 | struct pfioc_kernel_token *entry, *tmp; | |
427 | ||
428 | lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED); | |
429 | ||
430 | SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) { | |
431 | SLIST_REMOVE(&token_list_head, entry, pfioc_kernel_token, next); | |
432 | _FREE(entry, M_TEMP); | |
433 | } | |
434 | ||
435 | nr_tokens = 0; | |
436 | } | |
437 | ||
438 | void | |
439 | pfinit(void) | |
440 | { | |
441 | u_int32_t *t = pf_default_rule.timeout; | |
442 | int maj; | |
443 | ||
444 | pf_perim_lock_grp_attr = lck_grp_attr_alloc_init(); | |
445 | pf_perim_lock_grp = lck_grp_alloc_init("pf_perim", | |
446 | pf_perim_lock_grp_attr); | |
447 | pf_perim_lock_attr = lck_attr_alloc_init(); | |
448 | lck_rw_init(pf_perim_lock, pf_perim_lock_grp, pf_perim_lock_attr); | |
449 | ||
450 | pf_lock_grp_attr = lck_grp_attr_alloc_init(); | |
451 | pf_lock_grp = lck_grp_alloc_init("pf", pf_lock_grp_attr); | |
452 | pf_lock_attr = lck_attr_alloc_init(); | |
453 | lck_mtx_init(pf_lock, pf_lock_grp, pf_lock_attr); | |
454 | ||
455 | pool_init(&pf_rule_pl, sizeof (struct pf_rule), 0, 0, 0, "pfrulepl", | |
456 | NULL); | |
457 | pool_init(&pf_src_tree_pl, sizeof (struct pf_src_node), 0, 0, 0, | |
458 | "pfsrctrpl", NULL); | |
459 | pool_init(&pf_state_pl, sizeof (struct pf_state), 0, 0, 0, "pfstatepl", | |
460 | NULL); | |
461 | pool_init(&pf_state_key_pl, sizeof (struct pf_state_key), 0, 0, 0, | |
462 | "pfstatekeypl", NULL); | |
463 | pool_init(&pf_app_state_pl, sizeof (struct pf_app_state), 0, 0, 0, | |
464 | "pfappstatepl", NULL); | |
465 | #if PF_ALTQ | |
466 | pool_init(&pf_altq_pl, sizeof (struct pf_altq), 0, 0, 0, "pfaltqpl", | |
467 | NULL); | |
468 | #endif /* PF_ALTQ */ | |
469 | pool_init(&pf_pooladdr_pl, sizeof (struct pf_pooladdr), 0, 0, 0, | |
470 | "pfpooladdrpl", NULL); | |
471 | pfr_initialize(); | |
472 | pfi_initialize(); | |
473 | pf_osfp_initialize(); | |
474 | ||
475 | pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, | |
476 | pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); | |
477 | ||
478 | if (max_mem <= 256*1024*1024) | |
479 | pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = | |
480 | PFR_KENTRY_HIWAT_SMALL; | |
481 | ||
482 | RB_INIT(&tree_src_tracking); | |
483 | RB_INIT(&pf_anchors); | |
484 | pf_init_ruleset(&pf_main_ruleset); | |
485 | TAILQ_INIT(&pf_pabuf); | |
486 | TAILQ_INIT(&state_list); | |
487 | #if PF_ALTQ | |
488 | TAILQ_INIT(&pf_altqs[0]); | |
489 | TAILQ_INIT(&pf_altqs[1]); | |
490 | pf_altqs_active = &pf_altqs[0]; | |
491 | pf_altqs_inactive = &pf_altqs[1]; | |
492 | ||
493 | PE_parse_boot_argn("altq", &altq_allowed, sizeof (altq_allowed)); | |
494 | ||
495 | _CASSERT(ALTRQ_PURGE == CLASSQRQ_PURGE); | |
496 | _CASSERT(ALTRQ_PURGE_SC == CLASSQRQ_PURGE_SC); | |
497 | _CASSERT(ALTRQ_EVENT == CLASSQRQ_EVENT); | |
498 | ||
499 | _CASSERT(ALTDQ_REMOVE == CLASSQDQ_REMOVE); | |
500 | _CASSERT(ALTDQ_POLL == CLASSQDQ_POLL); | |
501 | #endif /* PF_ALTQ */ | |
502 | ||
503 | _CASSERT((SC_BE & SCIDX_MASK) == SCIDX_BE); | |
504 | _CASSERT((SC_BK_SYS & SCIDX_MASK) == SCIDX_BK_SYS); | |
505 | _CASSERT((SC_BK & SCIDX_MASK) == SCIDX_BK); | |
506 | _CASSERT((SC_RD & SCIDX_MASK) == SCIDX_RD); | |
507 | _CASSERT((SC_OAM & SCIDX_MASK) == SCIDX_OAM); | |
508 | _CASSERT((SC_AV & SCIDX_MASK) == SCIDX_AV); | |
509 | _CASSERT((SC_RV & SCIDX_MASK) == SCIDX_RV); | |
510 | _CASSERT((SC_VI & SCIDX_MASK) == SCIDX_VI); | |
511 | _CASSERT((SC_VO & SCIDX_MASK) == SCIDX_VO); | |
512 | _CASSERT((SC_CTL & SCIDX_MASK) == SCIDX_CTL); | |
513 | ||
514 | /* default rule should never be garbage collected */ | |
515 | pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; | |
516 | pf_default_rule.action = PF_PASS; | |
517 | pf_default_rule.nr = -1; | |
518 | pf_default_rule.rtableid = IFSCOPE_NONE; | |
519 | ||
520 | /* initialize default timeouts */ | |
521 | t[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; | |
522 | t[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; | |
523 | t[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; | |
524 | t[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; | |
525 | t[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; | |
526 | t[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; | |
527 | t[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; | |
528 | t[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; | |
529 | t[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; | |
530 | t[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; | |
531 | t[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; | |
532 | t[PFTM_GREv1_FIRST_PACKET] = PFTM_GREv1_FIRST_PACKET_VAL; | |
533 | t[PFTM_GREv1_INITIATING] = PFTM_GREv1_INITIATING_VAL; | |
534 | t[PFTM_GREv1_ESTABLISHED] = PFTM_GREv1_ESTABLISHED_VAL; | |
535 | t[PFTM_ESP_FIRST_PACKET] = PFTM_ESP_FIRST_PACKET_VAL; | |
536 | t[PFTM_ESP_INITIATING] = PFTM_ESP_INITIATING_VAL; | |
537 | t[PFTM_ESP_ESTABLISHED] = PFTM_ESP_ESTABLISHED_VAL; | |
538 | t[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; | |
539 | t[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; | |
540 | t[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; | |
541 | t[PFTM_FRAG] = PFTM_FRAG_VAL; | |
542 | t[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; | |
543 | t[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; | |
544 | t[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; | |
545 | t[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; | |
546 | t[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; | |
547 | ||
548 | pf_normalize_init(); | |
549 | bzero(&pf_status, sizeof (pf_status)); | |
550 | pf_status.debug = PF_DEBUG_URGENT; | |
551 | pf_hash_seed = RandomULong(); | |
552 | ||
553 | /* XXX do our best to avoid a conflict */ | |
554 | pf_status.hostid = random(); | |
555 | ||
556 | if (kernel_thread_start(pf_purge_thread_fn, NULL, | |
557 | &pf_purge_thread) != 0) { | |
558 | printf("%s: unable to start purge thread!", __func__); | |
559 | return; | |
560 | } | |
561 | ||
562 | maj = cdevsw_add(PF_CDEV_MAJOR, &pf_cdevsw); | |
563 | if (maj == -1) { | |
564 | printf("%s: failed to allocate major number!\n", __func__); | |
565 | return; | |
566 | } | |
567 | (void) devfs_make_node(makedev(maj, PFDEV_PF), DEVFS_CHAR, | |
568 | UID_ROOT, GID_WHEEL, 0600, "pf", 0); | |
569 | ||
570 | (void) devfs_make_node(makedev(maj, PFDEV_PFM), DEVFS_CHAR, | |
571 | UID_ROOT, GID_WHEEL, 0600, "pfm", 0); | |
572 | ||
573 | pf_attach_hooks(); | |
574 | } | |
575 | ||
576 | #if 0 | |
577 | static void | |
578 | pfdetach(void) | |
579 | { | |
580 | struct pf_anchor *anchor; | |
581 | struct pf_state *state; | |
582 | struct pf_src_node *node; | |
583 | struct pfioc_table pt; | |
584 | u_int32_t ticket; | |
585 | int i; | |
586 | char r = '\0'; | |
587 | ||
588 | pf_detach_hooks(); | |
589 | ||
590 | pf_status.running = 0; | |
591 | wakeup(pf_purge_thread_fn); | |
592 | ||
593 | /* clear the rulesets */ | |
594 | for (i = 0; i < PF_RULESET_MAX; i++) | |
595 | if (pf_begin_rules(&ticket, i, &r) == 0) | |
596 | pf_commit_rules(ticket, i, &r); | |
597 | #if PF_ALTQ | |
598 | if (pf_begin_altq(&ticket) == 0) | |
599 | pf_commit_altq(ticket); | |
600 | #endif /* PF_ALTQ */ | |
601 | ||
602 | /* clear states */ | |
603 | RB_FOREACH(state, pf_state_tree_id, &tree_id) { | |
604 | state->timeout = PFTM_PURGE; | |
605 | #if NPFSYNC | |
606 | state->sync_flags = PFSTATE_NOSYNC; | |
607 | #endif | |
608 | } | |
609 | pf_purge_expired_states(pf_status.states); | |
610 | ||
611 | #if NPFSYNC | |
612 | pfsync_clear_states(pf_status.hostid, NULL); | |
613 | #endif | |
614 | ||
615 | /* clear source nodes */ | |
616 | RB_FOREACH(state, pf_state_tree_id, &tree_id) { | |
617 | state->src_node = NULL; | |
618 | state->nat_src_node = NULL; | |
619 | } | |
620 | RB_FOREACH(node, pf_src_tree, &tree_src_tracking) { | |
621 | node->expire = 1; | |
622 | node->states = 0; | |
623 | } | |
624 | pf_purge_expired_src_nodes(); | |
625 | ||
626 | /* clear tables */ | |
627 | memset(&pt, '\0', sizeof (pt)); | |
628 | pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags); | |
629 | ||
630 | /* destroy anchors */ | |
631 | while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) { | |
632 | for (i = 0; i < PF_RULESET_MAX; i++) | |
633 | if (pf_begin_rules(&ticket, i, anchor->name) == 0) | |
634 | pf_commit_rules(ticket, i, anchor->name); | |
635 | } | |
636 | ||
637 | /* destroy main ruleset */ | |
638 | pf_remove_if_empty_ruleset(&pf_main_ruleset); | |
639 | ||
640 | /* destroy the pools */ | |
641 | pool_destroy(&pf_pooladdr_pl); | |
642 | #if PF_ALTQ | |
643 | pool_destroy(&pf_altq_pl); | |
644 | #endif /* PF_ALTQ */ | |
645 | pool_destroy(&pf_state_pl); | |
646 | pool_destroy(&pf_rule_pl); | |
647 | pool_destroy(&pf_src_tree_pl); | |
648 | ||
649 | /* destroy subsystems */ | |
650 | pf_normalize_destroy(); | |
651 | pf_osfp_destroy(); | |
652 | pfr_destroy(); | |
653 | pfi_destroy(); | |
654 | } | |
655 | #endif | |
656 | ||
657 | static int | |
658 | pfopen(dev_t dev, int flags, int fmt, struct proc *p) | |
659 | { | |
660 | #pragma unused(flags, fmt, p) | |
661 | if (minor(dev) >= PFDEV_MAX) | |
662 | return (ENXIO); | |
663 | ||
664 | if (minor(dev) == PFDEV_PFM) { | |
665 | lck_mtx_lock(pf_lock); | |
666 | if (pfdevcnt != 0) { | |
667 | lck_mtx_unlock(pf_lock); | |
668 | return (EBUSY); | |
669 | } | |
670 | pfdevcnt++; | |
671 | lck_mtx_unlock(pf_lock); | |
672 | } | |
673 | return (0); | |
674 | } | |
675 | ||
676 | static int | |
677 | pfclose(dev_t dev, int flags, int fmt, struct proc *p) | |
678 | { | |
679 | #pragma unused(flags, fmt, p) | |
680 | if (minor(dev) >= PFDEV_MAX) | |
681 | return (ENXIO); | |
682 | ||
683 | if (minor(dev) == PFDEV_PFM) { | |
684 | lck_mtx_lock(pf_lock); | |
685 | VERIFY(pfdevcnt > 0); | |
686 | pfdevcnt--; | |
687 | lck_mtx_unlock(pf_lock); | |
688 | } | |
689 | return (0); | |
690 | } | |
691 | ||
692 | static struct pf_pool * | |
693 | pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action, | |
694 | u_int32_t rule_number, u_int8_t r_last, u_int8_t active, | |
695 | u_int8_t check_ticket) | |
696 | { | |
697 | struct pf_ruleset *ruleset; | |
698 | struct pf_rule *rule; | |
699 | int rs_num; | |
700 | ||
701 | ruleset = pf_find_ruleset(anchor); | |
702 | if (ruleset == NULL) | |
703 | return (NULL); | |
704 | rs_num = pf_get_ruleset_number(rule_action); | |
705 | if (rs_num >= PF_RULESET_MAX) | |
706 | return (NULL); | |
707 | if (active) { | |
708 | if (check_ticket && ticket != | |
709 | ruleset->rules[rs_num].active.ticket) | |
710 | return (NULL); | |
711 | if (r_last) | |
712 | rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, | |
713 | pf_rulequeue); | |
714 | else | |
715 | rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); | |
716 | } else { | |
717 | if (check_ticket && ticket != | |
718 | ruleset->rules[rs_num].inactive.ticket) | |
719 | return (NULL); | |
720 | if (r_last) | |
721 | rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, | |
722 | pf_rulequeue); | |
723 | else | |
724 | rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); | |
725 | } | |
726 | if (!r_last) { | |
727 | while ((rule != NULL) && (rule->nr != rule_number)) | |
728 | rule = TAILQ_NEXT(rule, entries); | |
729 | } | |
730 | if (rule == NULL) | |
731 | return (NULL); | |
732 | ||
733 | return (&rule->rpool); | |
734 | } | |
735 | ||
736 | static void | |
737 | pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) | |
738 | { | |
739 | struct pf_pooladdr *mv_pool_pa; | |
740 | ||
741 | while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { | |
742 | TAILQ_REMOVE(poola, mv_pool_pa, entries); | |
743 | TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); | |
744 | } | |
745 | } | |
746 | ||
747 | static void | |
748 | pf_empty_pool(struct pf_palist *poola) | |
749 | { | |
750 | struct pf_pooladdr *empty_pool_pa; | |
751 | ||
752 | while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) { | |
753 | pfi_dynaddr_remove(&empty_pool_pa->addr); | |
754 | pf_tbladdr_remove(&empty_pool_pa->addr); | |
755 | pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE); | |
756 | TAILQ_REMOVE(poola, empty_pool_pa, entries); | |
757 | pool_put(&pf_pooladdr_pl, empty_pool_pa); | |
758 | } | |
759 | } | |
760 | ||
761 | void | |
762 | pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) | |
763 | { | |
764 | if (rulequeue != NULL) { | |
765 | if (rule->states <= 0) { | |
766 | /* | |
767 | * XXX - we need to remove the table *before* detaching | |
768 | * the rule to make sure the table code does not delete | |
769 | * the anchor under our feet. | |
770 | */ | |
771 | pf_tbladdr_remove(&rule->src.addr); | |
772 | pf_tbladdr_remove(&rule->dst.addr); | |
773 | if (rule->overload_tbl) | |
774 | pfr_detach_table(rule->overload_tbl); | |
775 | } | |
776 | TAILQ_REMOVE(rulequeue, rule, entries); | |
777 | rule->entries.tqe_prev = NULL; | |
778 | rule->nr = -1; | |
779 | } | |
780 | ||
781 | if (rule->states > 0 || rule->src_nodes > 0 || | |
782 | rule->entries.tqe_prev != NULL) | |
783 | return; | |
784 | pf_tag_unref(rule->tag); | |
785 | pf_tag_unref(rule->match_tag); | |
786 | #if PF_ALTQ | |
787 | if (altq_allowed) { | |
788 | if (rule->pqid != rule->qid) | |
789 | pf_qid_unref(rule->pqid); | |
790 | pf_qid_unref(rule->qid); | |
791 | } | |
792 | #endif /* PF_ALTQ */ | |
793 | pf_rtlabel_remove(&rule->src.addr); | |
794 | pf_rtlabel_remove(&rule->dst.addr); | |
795 | pfi_dynaddr_remove(&rule->src.addr); | |
796 | pfi_dynaddr_remove(&rule->dst.addr); | |
797 | if (rulequeue == NULL) { | |
798 | pf_tbladdr_remove(&rule->src.addr); | |
799 | pf_tbladdr_remove(&rule->dst.addr); | |
800 | if (rule->overload_tbl) | |
801 | pfr_detach_table(rule->overload_tbl); | |
802 | } | |
803 | pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE); | |
804 | pf_anchor_remove(rule); | |
805 | pf_empty_pool(&rule->rpool.list); | |
806 | pool_put(&pf_rule_pl, rule); | |
807 | } | |
808 | ||
809 | static u_int16_t | |
810 | tagname2tag(struct pf_tags *head, char *tagname) | |
811 | { | |
812 | struct pf_tagname *tag, *p = NULL; | |
813 | u_int16_t new_tagid = 1; | |
814 | ||
815 | TAILQ_FOREACH(tag, head, entries) | |
816 | if (strcmp(tagname, tag->name) == 0) { | |
817 | tag->ref++; | |
818 | return (tag->tag); | |
819 | } | |
820 | ||
821 | /* | |
822 | * to avoid fragmentation, we do a linear search from the beginning | |
823 | * and take the first free slot we find. if there is none or the list | |
824 | * is empty, append a new entry at the end. | |
825 | */ | |
826 | ||
827 | /* new entry */ | |
828 | if (!TAILQ_EMPTY(head)) | |
829 | for (p = TAILQ_FIRST(head); p != NULL && | |
830 | p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) | |
831 | new_tagid = p->tag + 1; | |
832 | ||
833 | if (new_tagid > TAGID_MAX) | |
834 | return (0); | |
835 | ||
836 | /* allocate and fill new struct pf_tagname */ | |
837 | tag = _MALLOC(sizeof (*tag), M_TEMP, M_WAITOK|M_ZERO); | |
838 | if (tag == NULL) | |
839 | return (0); | |
840 | strlcpy(tag->name, tagname, sizeof (tag->name)); | |
841 | tag->tag = new_tagid; | |
842 | tag->ref++; | |
843 | ||
844 | if (p != NULL) /* insert new entry before p */ | |
845 | TAILQ_INSERT_BEFORE(p, tag, entries); | |
846 | else /* either list empty or no free slot in between */ | |
847 | TAILQ_INSERT_TAIL(head, tag, entries); | |
848 | ||
849 | return (tag->tag); | |
850 | } | |
851 | ||
852 | static void | |
853 | tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) | |
854 | { | |
855 | struct pf_tagname *tag; | |
856 | ||
857 | TAILQ_FOREACH(tag, head, entries) | |
858 | if (tag->tag == tagid) { | |
859 | strlcpy(p, tag->name, PF_TAG_NAME_SIZE); | |
860 | return; | |
861 | } | |
862 | } | |
863 | ||
864 | static void | |
865 | tag_unref(struct pf_tags *head, u_int16_t tag) | |
866 | { | |
867 | struct pf_tagname *p, *next; | |
868 | ||
869 | if (tag == 0) | |
870 | return; | |
871 | ||
872 | for (p = TAILQ_FIRST(head); p != NULL; p = next) { | |
873 | next = TAILQ_NEXT(p, entries); | |
874 | if (tag == p->tag) { | |
875 | if (--p->ref == 0) { | |
876 | TAILQ_REMOVE(head, p, entries); | |
877 | _FREE(p, M_TEMP); | |
878 | } | |
879 | break; | |
880 | } | |
881 | } | |
882 | } | |
883 | ||
884 | u_int16_t | |
885 | pf_tagname2tag(char *tagname) | |
886 | { | |
887 | return (tagname2tag(&pf_tags, tagname)); | |
888 | } | |
889 | ||
890 | void | |
891 | pf_tag2tagname(u_int16_t tagid, char *p) | |
892 | { | |
893 | tag2tagname(&pf_tags, tagid, p); | |
894 | } | |
895 | ||
896 | void | |
897 | pf_tag_ref(u_int16_t tag) | |
898 | { | |
899 | struct pf_tagname *t; | |
900 | ||
901 | TAILQ_FOREACH(t, &pf_tags, entries) | |
902 | if (t->tag == tag) | |
903 | break; | |
904 | if (t != NULL) | |
905 | t->ref++; | |
906 | } | |
907 | ||
908 | void | |
909 | pf_tag_unref(u_int16_t tag) | |
910 | { | |
911 | tag_unref(&pf_tags, tag); | |
912 | } | |
913 | ||
914 | static int | |
915 | pf_rtlabel_add(struct pf_addr_wrap *a) | |
916 | { | |
917 | #pragma unused(a) | |
918 | return (0); | |
919 | } | |
920 | ||
921 | static void | |
922 | pf_rtlabel_remove(struct pf_addr_wrap *a) | |
923 | { | |
924 | #pragma unused(a) | |
925 | } | |
926 | ||
927 | static void | |
928 | pf_rtlabel_copyout(struct pf_addr_wrap *a) | |
929 | { | |
930 | #pragma unused(a) | |
931 | } | |
932 | ||
933 | #if PF_ALTQ | |
934 | u_int32_t | |
935 | pf_qname2qid(char *qname) | |
936 | { | |
937 | lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED); | |
938 | ||
939 | return ((u_int32_t)tagname2tag(&pf_qids, qname)); | |
940 | } | |
941 | ||
942 | void | |
943 | pf_qid2qname(u_int32_t qid, char *p) | |
944 | { | |
945 | lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED); | |
946 | ||
947 | tag2tagname(&pf_qids, (u_int16_t)qid, p); | |
948 | } | |
949 | ||
950 | void | |
951 | pf_qid_unref(u_int32_t qid) | |
952 | { | |
953 | lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED); | |
954 | ||
955 | tag_unref(&pf_qids, (u_int16_t)qid); | |
956 | } | |
957 | ||
958 | static int | |
959 | pf_begin_altq(u_int32_t *ticket) | |
960 | { | |
961 | struct pf_altq *altq; | |
962 | int error = 0; | |
963 | ||
964 | lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED); | |
965 | ||
966 | /* Purge the old altq list */ | |
967 | while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { | |
968 | TAILQ_REMOVE(pf_altqs_inactive, altq, entries); | |
969 | if (altq->qname[0] == '\0') { | |
970 | /* detach and destroy the discipline */ | |
971 | error = altq_remove(altq); | |
972 | } else | |
973 | pf_qid_unref(altq->qid); | |
974 | pool_put(&pf_altq_pl, altq); | |
975 | } | |
976 | if (error) | |
977 | return (error); | |
978 | *ticket = ++ticket_altqs_inactive; | |
979 | altqs_inactive_open = 1; | |
980 | return (0); | |
981 | } | |
982 | ||
983 | static int | |
984 | pf_rollback_altq(u_int32_t ticket) | |
985 | { | |
986 | struct pf_altq *altq; | |
987 | int error = 0; | |
988 | ||
989 | lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED); | |
990 | ||
991 | if (!altqs_inactive_open || ticket != ticket_altqs_inactive) | |
992 | return (0); | |
993 | /* Purge the old altq list */ | |
994 | while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { | |
995 | TAILQ_REMOVE(pf_altqs_inactive, altq, entries); | |
996 | if (altq->qname[0] == '\0') { | |
997 | /* detach and destroy the discipline */ | |
998 | error = altq_remove(altq); | |
999 | } else | |
1000 | pf_qid_unref(altq->qid); | |
1001 | pool_put(&pf_altq_pl, altq); | |
1002 | } | |
1003 | altqs_inactive_open = 0; | |
1004 | return (error); | |
1005 | } | |
1006 | ||
1007 | static int | |
1008 | pf_commit_altq(u_int32_t ticket) | |
1009 | { | |
1010 | struct pf_altqqueue *old_altqs; | |
1011 | struct pf_altq *altq; | |
1012 | int err, error = 0; | |
1013 | ||
1014 | lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED); | |
1015 | ||
1016 | if (!altqs_inactive_open || ticket != ticket_altqs_inactive) | |
1017 | return (EBUSY); | |
1018 | ||
1019 | /* swap altqs, keep the old. */ | |
1020 | old_altqs = pf_altqs_active; | |
1021 | pf_altqs_active = pf_altqs_inactive; | |
1022 | pf_altqs_inactive = old_altqs; | |
1023 | ticket_altqs_active = ticket_altqs_inactive; | |
1024 | ||
1025 | /* Attach new disciplines */ | |
1026 | TAILQ_FOREACH(altq, pf_altqs_active, entries) { | |
1027 | if (altq->qname[0] == '\0') { | |
1028 | /* attach the discipline */ | |
1029 | error = altq_pfattach(altq); | |
1030 | if (error == 0 && pf_altq_running) | |
1031 | error = pf_enable_altq(altq); | |
1032 | if (error != 0) { | |
1033 | return (error); | |
1034 | } | |
1035 | } | |
1036 | } | |
1037 | ||
1038 | /* Purge the old altq list */ | |
1039 | while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { | |
1040 | TAILQ_REMOVE(pf_altqs_inactive, altq, entries); | |
1041 | if (altq->qname[0] == '\0') { | |
1042 | /* detach and destroy the discipline */ | |
1043 | if (pf_altq_running) | |
1044 | error = pf_disable_altq(altq); | |
1045 | err = altq_pfdetach(altq); | |
1046 | if (err != 0 && error == 0) | |
1047 | error = err; | |
1048 | err = altq_remove(altq); | |
1049 | if (err != 0 && error == 0) | |
1050 | error = err; | |
1051 | } else | |
1052 | pf_qid_unref(altq->qid); | |
1053 | pool_put(&pf_altq_pl, altq); | |
1054 | } | |
1055 | ||
1056 | altqs_inactive_open = 0; | |
1057 | return (error); | |
1058 | } | |
1059 | ||
1060 | static int | |
1061 | pf_enable_altq(struct pf_altq *altq) | |
1062 | { | |
1063 | struct ifnet *ifp; | |
1064 | struct ifclassq *ifq; | |
1065 | int error = 0; | |
1066 | ||
1067 | lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED); | |
1068 | ||
1069 | if ((ifp = ifunit(altq->ifname)) == NULL) | |
1070 | return (EINVAL); | |
1071 | ||
1072 | ifq = &ifp->if_snd; | |
1073 | IFCQ_LOCK(ifq); | |
1074 | if (IFCQ_ALTQ(ifq)->altq_type != ALTQT_NONE) | |
1075 | error = altq_enable(IFCQ_ALTQ(ifq)); | |
1076 | ||
1077 | /* set or clear tokenbucket regulator */ | |
1078 | if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(IFCQ_ALTQ(ifq))) { | |
1079 | struct tb_profile tb = { 0, 0, 0 }; | |
1080 | ||
1081 | if (altq->aflags & PF_ALTQF_TBR) { | |
1082 | if (altq->bwtype != PF_ALTQ_BW_ABSOLUTE && | |
1083 | altq->bwtype != PF_ALTQ_BW_PERCENT) { | |
1084 | error = EINVAL; | |
1085 | } else { | |
1086 | if (altq->bwtype == PF_ALTQ_BW_ABSOLUTE) | |
1087 | tb.rate = altq->ifbandwidth; | |
1088 | else | |
1089 | tb.percent = altq->ifbandwidth; | |
1090 | tb.depth = altq->tbrsize; | |
1091 | error = ifclassq_tbr_set(ifq, &tb, TRUE); | |
1092 | } | |
1093 | } else if (IFCQ_TBR_IS_ENABLED(ifq)) { | |
1094 | error = ifclassq_tbr_set(ifq, &tb, TRUE); | |
1095 | } | |
1096 | } | |
1097 | IFCQ_UNLOCK(ifq); | |
1098 | ||
1099 | return (error); | |
1100 | } | |
1101 | ||
1102 | static int | |
1103 | pf_disable_altq(struct pf_altq *altq) | |
1104 | { | |
1105 | struct ifnet *ifp; | |
1106 | struct ifclassq *ifq; | |
1107 | int error; | |
1108 | ||
1109 | lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED); | |
1110 | ||
1111 | if ((ifp = ifunit(altq->ifname)) == NULL) | |
1112 | return (EINVAL); | |
1113 | ||
1114 | /* | |
1115 | * when the discipline is no longer referenced, it was overridden | |
1116 | * by a new one. if so, just return. | |
1117 | */ | |
1118 | ifq = &ifp->if_snd; | |
1119 | IFCQ_LOCK(ifq); | |
1120 | if (altq->altq_disc != IFCQ_ALTQ(ifq)->altq_disc) { | |
1121 | IFCQ_UNLOCK(ifq); | |
1122 | return (0); | |
1123 | } | |
1124 | ||
1125 | error = altq_disable(IFCQ_ALTQ(ifq)); | |
1126 | ||
1127 | if (error == 0 && IFCQ_TBR_IS_ENABLED(ifq)) { | |
1128 | /* clear tokenbucket regulator */ | |
1129 | struct tb_profile tb = { 0, 0, 0 }; | |
1130 | error = ifclassq_tbr_set(ifq, &tb, TRUE); | |
1131 | } | |
1132 | IFCQ_UNLOCK(ifq); | |
1133 | ||
1134 | return (error); | |
1135 | } | |
1136 | ||
1137 | static void | |
1138 | pf_altq_copyin(struct pf_altq *src, struct pf_altq *dst) | |
1139 | { | |
1140 | bcopy(src, dst, sizeof (struct pf_altq)); | |
1141 | ||
1142 | dst->ifname[sizeof (dst->ifname) - 1] = '\0'; | |
1143 | dst->qname[sizeof (dst->qname) - 1] = '\0'; | |
1144 | dst->parent[sizeof (dst->parent) - 1] = '\0'; | |
1145 | dst->altq_disc = NULL; | |
1146 | dst->entries.tqe_next = NULL; | |
1147 | dst->entries.tqe_prev = NULL; | |
1148 | } | |
1149 | ||
1150 | static void | |
1151 | pf_altq_copyout(struct pf_altq *src, struct pf_altq *dst) | |
1152 | { | |
1153 | struct pf_altq pa; | |
1154 | ||
1155 | bcopy(src, &pa, sizeof (struct pf_altq)); | |
1156 | pa.altq_disc = NULL; | |
1157 | pa.entries.tqe_next = NULL; | |
1158 | pa.entries.tqe_prev = NULL; | |
1159 | bcopy(&pa, dst, sizeof (struct pf_altq)); | |
1160 | } | |
1161 | #endif /* PF_ALTQ */ | |
1162 | ||
1163 | static int | |
1164 | pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) | |
1165 | { | |
1166 | struct pf_ruleset *rs; | |
1167 | struct pf_rule *rule; | |
1168 | ||
1169 | if (rs_num < 0 || rs_num >= PF_RULESET_MAX) | |
1170 | return (EINVAL); | |
1171 | rs = pf_find_or_create_ruleset(anchor); | |
1172 | if (rs == NULL) | |
1173 | return (EINVAL); | |
1174 | while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { | |
1175 | pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); | |
1176 | rs->rules[rs_num].inactive.rcount--; | |
1177 | } | |
1178 | *ticket = ++rs->rules[rs_num].inactive.ticket; | |
1179 | rs->rules[rs_num].inactive.open = 1; | |
1180 | return (0); | |
1181 | } | |
1182 | ||
1183 | static int | |
1184 | pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) | |
1185 | { | |
1186 | struct pf_ruleset *rs; | |
1187 | struct pf_rule *rule; | |
1188 | ||
1189 | if (rs_num < 0 || rs_num >= PF_RULESET_MAX) | |
1190 | return (EINVAL); | |
1191 | rs = pf_find_ruleset(anchor); | |
1192 | if (rs == NULL || !rs->rules[rs_num].inactive.open || | |
1193 | rs->rules[rs_num].inactive.ticket != ticket) | |
1194 | return (0); | |
1195 | while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { | |
1196 | pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); | |
1197 | rs->rules[rs_num].inactive.rcount--; | |
1198 | } | |
1199 | rs->rules[rs_num].inactive.open = 0; | |
1200 | return (0); | |
1201 | } | |
1202 | ||
1203 | #define PF_MD5_UPD(st, elm) \ | |
1204 | MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm)) | |
1205 | ||
1206 | #define PF_MD5_UPD_STR(st, elm) \ | |
1207 | MD5Update(ctx, (u_int8_t *)(st)->elm, strlen((st)->elm)) | |
1208 | ||
1209 | #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ | |
1210 | (stor) = htonl((st)->elm); \ | |
1211 | MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t)); \ | |
1212 | } while (0) | |
1213 | ||
1214 | #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ | |
1215 | (stor) = htons((st)->elm); \ | |
1216 | MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t)); \ | |
1217 | } while (0) | |
1218 | ||
1219 | static void | |
1220 | pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr, u_int8_t proto) | |
1221 | { | |
1222 | PF_MD5_UPD(pfr, addr.type); | |
1223 | switch (pfr->addr.type) { | |
1224 | case PF_ADDR_DYNIFTL: | |
1225 | PF_MD5_UPD(pfr, addr.v.ifname); | |
1226 | PF_MD5_UPD(pfr, addr.iflags); | |
1227 | break; | |
1228 | case PF_ADDR_TABLE: | |
1229 | PF_MD5_UPD(pfr, addr.v.tblname); | |
1230 | break; | |
1231 | case PF_ADDR_ADDRMASK: | |
1232 | /* XXX ignore af? */ | |
1233 | PF_MD5_UPD(pfr, addr.v.a.addr.addr32); | |
1234 | PF_MD5_UPD(pfr, addr.v.a.mask.addr32); | |
1235 | break; | |
1236 | case PF_ADDR_RTLABEL: | |
1237 | PF_MD5_UPD(pfr, addr.v.rtlabelname); | |
1238 | break; | |
1239 | } | |
1240 | ||
1241 | switch (proto) { | |
1242 | case IPPROTO_TCP: | |
1243 | case IPPROTO_UDP: | |
1244 | PF_MD5_UPD(pfr, xport.range.port[0]); | |
1245 | PF_MD5_UPD(pfr, xport.range.port[1]); | |
1246 | PF_MD5_UPD(pfr, xport.range.op); | |
1247 | break; | |
1248 | ||
1249 | default: | |
1250 | break; | |
1251 | } | |
1252 | ||
1253 | PF_MD5_UPD(pfr, neg); | |
1254 | } | |
1255 | ||
1256 | static void | |
1257 | pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule) | |
1258 | { | |
1259 | u_int16_t x; | |
1260 | u_int32_t y; | |
1261 | ||
1262 | pf_hash_rule_addr(ctx, &rule->src, rule->proto); | |
1263 | pf_hash_rule_addr(ctx, &rule->dst, rule->proto); | |
1264 | PF_MD5_UPD_STR(rule, label); | |
1265 | PF_MD5_UPD_STR(rule, ifname); | |
1266 | PF_MD5_UPD_STR(rule, match_tagname); | |
1267 | PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ | |
1268 | PF_MD5_UPD_HTONL(rule, os_fingerprint, y); | |
1269 | PF_MD5_UPD_HTONL(rule, prob, y); | |
1270 | PF_MD5_UPD_HTONL(rule, uid.uid[0], y); | |
1271 | PF_MD5_UPD_HTONL(rule, uid.uid[1], y); | |
1272 | PF_MD5_UPD(rule, uid.op); | |
1273 | PF_MD5_UPD_HTONL(rule, gid.gid[0], y); | |
1274 | PF_MD5_UPD_HTONL(rule, gid.gid[1], y); | |
1275 | PF_MD5_UPD(rule, gid.op); | |
1276 | PF_MD5_UPD_HTONL(rule, rule_flag, y); | |
1277 | PF_MD5_UPD(rule, action); | |
1278 | PF_MD5_UPD(rule, direction); | |
1279 | PF_MD5_UPD(rule, af); | |
1280 | PF_MD5_UPD(rule, quick); | |
1281 | PF_MD5_UPD(rule, ifnot); | |
1282 | PF_MD5_UPD(rule, match_tag_not); | |
1283 | PF_MD5_UPD(rule, natpass); | |
1284 | PF_MD5_UPD(rule, keep_state); | |
1285 | PF_MD5_UPD(rule, proto); | |
1286 | PF_MD5_UPD(rule, type); | |
1287 | PF_MD5_UPD(rule, code); | |
1288 | PF_MD5_UPD(rule, flags); | |
1289 | PF_MD5_UPD(rule, flagset); | |
1290 | PF_MD5_UPD(rule, allow_opts); | |
1291 | PF_MD5_UPD(rule, rt); | |
1292 | PF_MD5_UPD(rule, tos); | |
1293 | } | |
1294 | ||
1295 | static int | |
1296 | pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) | |
1297 | { | |
1298 | struct pf_ruleset *rs; | |
1299 | struct pf_rule *rule, **old_array, *r; | |
1300 | struct pf_rulequeue *old_rules; | |
1301 | int error; | |
1302 | u_int32_t old_rcount; | |
1303 | ||
1304 | lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED); | |
1305 | ||
1306 | if (rs_num < 0 || rs_num >= PF_RULESET_MAX) | |
1307 | return (EINVAL); | |
1308 | rs = pf_find_ruleset(anchor); | |
1309 | if (rs == NULL || !rs->rules[rs_num].inactive.open || | |
1310 | ticket != rs->rules[rs_num].inactive.ticket) | |
1311 | return (EBUSY); | |
1312 | ||
1313 | /* Calculate checksum for the main ruleset */ | |
1314 | if (rs == &pf_main_ruleset) { | |
1315 | error = pf_setup_pfsync_matching(rs); | |
1316 | if (error != 0) | |
1317 | return (error); | |
1318 | } | |
1319 | ||
1320 | /* Swap rules, keep the old. */ | |
1321 | old_rules = rs->rules[rs_num].active.ptr; | |
1322 | old_rcount = rs->rules[rs_num].active.rcount; | |
1323 | old_array = rs->rules[rs_num].active.ptr_array; | |
1324 | ||
1325 | if(old_rcount != 0) { | |
1326 | r = TAILQ_FIRST(rs->rules[rs_num].active.ptr); | |
1327 | while (r) { | |
1328 | if (r->rule_flag & PFRULE_PFM) | |
1329 | pffwrules--; | |
1330 | r = TAILQ_NEXT(r, entries); | |
1331 | } | |
1332 | } | |
1333 | ||
1334 | ||
1335 | rs->rules[rs_num].active.ptr = | |
1336 | rs->rules[rs_num].inactive.ptr; | |
1337 | rs->rules[rs_num].active.ptr_array = | |
1338 | rs->rules[rs_num].inactive.ptr_array; | |
1339 | rs->rules[rs_num].active.rcount = | |
1340 | rs->rules[rs_num].inactive.rcount; | |
1341 | rs->rules[rs_num].inactive.ptr = old_rules; | |
1342 | rs->rules[rs_num].inactive.ptr_array = old_array; | |
1343 | rs->rules[rs_num].inactive.rcount = old_rcount; | |
1344 | ||
1345 | rs->rules[rs_num].active.ticket = | |
1346 | rs->rules[rs_num].inactive.ticket; | |
1347 | pf_calc_skip_steps(rs->rules[rs_num].active.ptr); | |
1348 | ||
1349 | ||
1350 | /* Purge the old rule list. */ | |
1351 | while ((rule = TAILQ_FIRST(old_rules)) != NULL) | |
1352 | pf_rm_rule(old_rules, rule); | |
1353 | if (rs->rules[rs_num].inactive.ptr_array) | |
1354 | _FREE(rs->rules[rs_num].inactive.ptr_array, M_TEMP); | |
1355 | rs->rules[rs_num].inactive.ptr_array = NULL; | |
1356 | rs->rules[rs_num].inactive.rcount = 0; | |
1357 | rs->rules[rs_num].inactive.open = 0; | |
1358 | pf_remove_if_empty_ruleset(rs); | |
1359 | return (0); | |
1360 | } | |
1361 | ||
1362 | static void | |
1363 | pf_rule_copyin(struct pf_rule *src, struct pf_rule *dst, struct proc *p, | |
1364 | int minordev) | |
1365 | { | |
1366 | bcopy(src, dst, sizeof (struct pf_rule)); | |
1367 | ||
1368 | dst->label[sizeof (dst->label) - 1] = '\0'; | |
1369 | dst->ifname[sizeof (dst->ifname) - 1] = '\0'; | |
1370 | dst->qname[sizeof (dst->qname) - 1] = '\0'; | |
1371 | dst->pqname[sizeof (dst->pqname) - 1] = '\0'; | |
1372 | dst->tagname[sizeof (dst->tagname) - 1] = '\0'; | |
1373 | dst->match_tagname[sizeof (dst->match_tagname) - 1] = '\0'; | |
1374 | dst->overload_tblname[sizeof (dst->overload_tblname) - 1] = '\0'; | |
1375 | ||
1376 | dst->cuid = kauth_cred_getuid(p->p_ucred); | |
1377 | dst->cpid = p->p_pid; | |
1378 | ||
1379 | dst->anchor = NULL; | |
1380 | dst->kif = NULL; | |
1381 | dst->overload_tbl = NULL; | |
1382 | ||
1383 | TAILQ_INIT(&dst->rpool.list); | |
1384 | dst->rpool.cur = NULL; | |
1385 | ||
1386 | /* initialize refcounting */ | |
1387 | dst->states = 0; | |
1388 | dst->src_nodes = 0; | |
1389 | ||
1390 | dst->entries.tqe_prev = NULL; | |
1391 | dst->entries.tqe_next = NULL; | |
1392 | if ((uint8_t)minordev == PFDEV_PFM) | |
1393 | dst->rule_flag |= PFRULE_PFM; | |
1394 | } | |
1395 | ||
1396 | static void | |
1397 | pf_rule_copyout(struct pf_rule *src, struct pf_rule *dst) | |
1398 | { | |
1399 | bcopy(src, dst, sizeof (struct pf_rule)); | |
1400 | ||
1401 | dst->anchor = NULL; | |
1402 | dst->kif = NULL; | |
1403 | dst->overload_tbl = NULL; | |
1404 | ||
1405 | TAILQ_INIT(&dst->rpool.list); | |
1406 | dst->rpool.cur = NULL; | |
1407 | ||
1408 | dst->entries.tqe_prev = NULL; | |
1409 | dst->entries.tqe_next = NULL; | |
1410 | } | |
1411 | ||
1412 | static void | |
1413 | pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk, | |
1414 | struct pf_state *s) | |
1415 | { | |
1416 | uint64_t secs = pf_time_second(); | |
1417 | bzero(sp, sizeof (struct pfsync_state)); | |
1418 | ||
1419 | /* copy from state key */ | |
1420 | sp->lan.addr = sk->lan.addr; | |
1421 | sp->lan.xport = sk->lan.xport; | |
1422 | sp->gwy.addr = sk->gwy.addr; | |
1423 | sp->gwy.xport = sk->gwy.xport; | |
1424 | sp->ext_lan.addr = sk->ext_lan.addr; | |
1425 | sp->ext_lan.xport = sk->ext_lan.xport; | |
1426 | sp->ext_gwy.addr = sk->ext_gwy.addr; | |
1427 | sp->ext_gwy.xport = sk->ext_gwy.xport; | |
1428 | sp->proto_variant = sk->proto_variant; | |
1429 | sp->tag = s->tag; | |
1430 | sp->proto = sk->proto; | |
1431 | sp->af_lan = sk->af_lan; | |
1432 | sp->af_gwy = sk->af_gwy; | |
1433 | sp->direction = sk->direction; | |
1434 | sp->flowhash = sk->flowhash; | |
1435 | ||
1436 | /* copy from state */ | |
1437 | memcpy(&sp->id, &s->id, sizeof (sp->id)); | |
1438 | sp->creatorid = s->creatorid; | |
1439 | strlcpy(sp->ifname, s->kif->pfik_name, sizeof (sp->ifname)); | |
1440 | pf_state_peer_to_pfsync(&s->src, &sp->src); | |
1441 | pf_state_peer_to_pfsync(&s->dst, &sp->dst); | |
1442 | ||
1443 | sp->rule = s->rule.ptr->nr; | |
1444 | sp->nat_rule = (s->nat_rule.ptr == NULL) ? | |
1445 | (unsigned)-1 : s->nat_rule.ptr->nr; | |
1446 | sp->anchor = (s->anchor.ptr == NULL) ? | |
1447 | (unsigned)-1 : s->anchor.ptr->nr; | |
1448 | ||
1449 | pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]); | |
1450 | pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]); | |
1451 | pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]); | |
1452 | pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]); | |
1453 | sp->creation = secs - s->creation; | |
1454 | sp->expire = pf_state_expires(s); | |
1455 | sp->log = s->log; | |
1456 | sp->allow_opts = s->allow_opts; | |
1457 | sp->timeout = s->timeout; | |
1458 | ||
1459 | if (s->src_node) | |
1460 | sp->sync_flags |= PFSYNC_FLAG_SRCNODE; | |
1461 | if (s->nat_src_node) | |
1462 | sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; | |
1463 | ||
1464 | if (sp->expire > secs) | |
1465 | sp->expire -= secs; | |
1466 | else | |
1467 | sp->expire = 0; | |
1468 | ||
1469 | } | |
1470 | ||
1471 | static void | |
1472 | pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk, | |
1473 | struct pf_state *s) | |
1474 | { | |
1475 | /* copy to state key */ | |
1476 | sk->lan.addr = sp->lan.addr; | |
1477 | sk->lan.xport = sp->lan.xport; | |
1478 | sk->gwy.addr = sp->gwy.addr; | |
1479 | sk->gwy.xport = sp->gwy.xport; | |
1480 | sk->ext_lan.addr = sp->ext_lan.addr; | |
1481 | sk->ext_lan.xport = sp->ext_lan.xport; | |
1482 | sk->ext_gwy.addr = sp->ext_gwy.addr; | |
1483 | sk->ext_gwy.xport = sp->ext_gwy.xport; | |
1484 | sk->proto_variant = sp->proto_variant; | |
1485 | s->tag = sp->tag; | |
1486 | sk->proto = sp->proto; | |
1487 | sk->af_lan = sp->af_lan; | |
1488 | sk->af_gwy = sp->af_gwy; | |
1489 | sk->direction = sp->direction; | |
1490 | sk->flowhash = pf_calc_state_key_flowhash(sk); | |
1491 | ||
1492 | /* copy to state */ | |
1493 | memcpy(&s->id, &sp->id, sizeof (sp->id)); | |
1494 | s->creatorid = sp->creatorid; | |
1495 | pf_state_peer_from_pfsync(&sp->src, &s->src); | |
1496 | pf_state_peer_from_pfsync(&sp->dst, &s->dst); | |
1497 | ||
1498 | s->rule.ptr = &pf_default_rule; | |
1499 | s->nat_rule.ptr = NULL; | |
1500 | s->anchor.ptr = NULL; | |
1501 | s->rt_kif = NULL; | |
1502 | s->creation = pf_time_second(); | |
1503 | s->expire = pf_time_second(); | |
1504 | if (sp->expire > 0) | |
1505 | s->expire -= pf_default_rule.timeout[sp->timeout] - sp->expire; | |
1506 | s->pfsync_time = 0; | |
1507 | s->packets[0] = s->packets[1] = 0; | |
1508 | s->bytes[0] = s->bytes[1] = 0; | |
1509 | } | |
1510 | ||
1511 | static void | |
1512 | pf_pooladdr_copyin(struct pf_pooladdr *src, struct pf_pooladdr *dst) | |
1513 | { | |
1514 | bcopy(src, dst, sizeof (struct pf_pooladdr)); | |
1515 | ||
1516 | dst->entries.tqe_prev = NULL; | |
1517 | dst->entries.tqe_next = NULL; | |
1518 | dst->ifname[sizeof (dst->ifname) - 1] = '\0'; | |
1519 | dst->kif = NULL; | |
1520 | } | |
1521 | ||
1522 | static void | |
1523 | pf_pooladdr_copyout(struct pf_pooladdr *src, struct pf_pooladdr *dst) | |
1524 | { | |
1525 | bcopy(src, dst, sizeof (struct pf_pooladdr)); | |
1526 | ||
1527 | dst->entries.tqe_prev = NULL; | |
1528 | dst->entries.tqe_next = NULL; | |
1529 | dst->kif = NULL; | |
1530 | } | |
1531 | ||
1532 | static int | |
1533 | pf_setup_pfsync_matching(struct pf_ruleset *rs) | |
1534 | { | |
1535 | MD5_CTX ctx; | |
1536 | struct pf_rule *rule; | |
1537 | int rs_cnt; | |
1538 | u_int8_t digest[PF_MD5_DIGEST_LENGTH]; | |
1539 | ||
1540 | MD5Init(&ctx); | |
1541 | for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { | |
1542 | /* XXX PF_RULESET_SCRUB as well? */ | |
1543 | if (rs_cnt == PF_RULESET_SCRUB) | |
1544 | continue; | |
1545 | ||
1546 | if (rs->rules[rs_cnt].inactive.ptr_array) | |
1547 | _FREE(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); | |
1548 | rs->rules[rs_cnt].inactive.ptr_array = NULL; | |
1549 | ||
1550 | if (rs->rules[rs_cnt].inactive.rcount) { | |
1551 | rs->rules[rs_cnt].inactive.ptr_array = | |
1552 | _MALLOC(sizeof (caddr_t) * | |
1553 | rs->rules[rs_cnt].inactive.rcount, | |
1554 | M_TEMP, M_WAITOK); | |
1555 | ||
1556 | if (!rs->rules[rs_cnt].inactive.ptr_array) | |
1557 | return (ENOMEM); | |
1558 | } | |
1559 | ||
1560 | TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, | |
1561 | entries) { | |
1562 | pf_hash_rule(&ctx, rule); | |
1563 | (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; | |
1564 | } | |
1565 | } | |
1566 | ||
1567 | MD5Final(digest, &ctx); | |
1568 | memcpy(pf_status.pf_chksum, digest, sizeof (pf_status.pf_chksum)); | |
1569 | return (0); | |
1570 | } | |
1571 | ||
1572 | static void | |
1573 | pf_start(void) | |
1574 | { | |
1575 | lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED); | |
1576 | ||
1577 | VERIFY(pf_is_enabled == 0); | |
1578 | ||
1579 | pf_is_enabled = 1; | |
1580 | pf_status.running = 1; | |
1581 | pf_status.since = pf_calendar_time_second(); | |
1582 | if (pf_status.stateid == 0) { | |
1583 | pf_status.stateid = pf_time_second(); | |
1584 | pf_status.stateid = pf_status.stateid << 32; | |
1585 | } | |
1586 | wakeup(pf_purge_thread_fn); | |
1587 | DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); | |
1588 | } | |
1589 | ||
1590 | static void | |
1591 | pf_stop(void) | |
1592 | { | |
1593 | lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED); | |
1594 | ||
1595 | VERIFY(pf_is_enabled); | |
1596 | ||
1597 | pf_status.running = 0; | |
1598 | pf_is_enabled = 0; | |
1599 | pf_status.since = pf_calendar_time_second(); | |
1600 | wakeup(pf_purge_thread_fn); | |
1601 | DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); | |
1602 | } | |
1603 | ||
1604 | static int | |
1605 | pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) | |
1606 | { | |
1607 | #pragma unused(dev) | |
1608 | int p64 = proc_is64bit(p); | |
1609 | int error = 0; | |
1610 | int minordev = minor(dev); | |
1611 | ||
1612 | if (kauth_cred_issuser(kauth_cred_get()) == 0) | |
1613 | return (EPERM); | |
1614 | ||
1615 | /* XXX keep in sync with switch() below */ | |
1616 | if (securelevel > 1) | |
1617 | switch (cmd) { | |
1618 | case DIOCGETRULES: | |
1619 | case DIOCGETRULE: | |
1620 | case DIOCGETADDRS: | |
1621 | case DIOCGETADDR: | |
1622 | case DIOCGETSTATE: | |
1623 | case DIOCSETSTATUSIF: | |
1624 | case DIOCGETSTATUS: | |
1625 | case DIOCCLRSTATUS: | |
1626 | case DIOCNATLOOK: | |
1627 | case DIOCSETDEBUG: | |
1628 | case DIOCGETSTATES: | |
1629 | case DIOCINSERTRULE: | |
1630 | case DIOCDELETERULE: | |
1631 | case DIOCGETTIMEOUT: | |
1632 | case DIOCCLRRULECTRS: | |
1633 | case DIOCGETLIMIT: | |
1634 | case DIOCGETALTQS: | |
1635 | case DIOCGETALTQ: | |
1636 | case DIOCGETQSTATS: | |
1637 | case DIOCGETRULESETS: | |
1638 | case DIOCGETRULESET: | |
1639 | case DIOCRGETTABLES: | |
1640 | case DIOCRGETTSTATS: | |
1641 | case DIOCRCLRTSTATS: | |
1642 | case DIOCRCLRADDRS: | |
1643 | case DIOCRADDADDRS: | |
1644 | case DIOCRDELADDRS: | |
1645 | case DIOCRSETADDRS: | |
1646 | case DIOCRGETADDRS: | |
1647 | case DIOCRGETASTATS: | |
1648 | case DIOCRCLRASTATS: | |
1649 | case DIOCRTSTADDRS: | |
1650 | case DIOCOSFPGET: | |
1651 | case DIOCGETSRCNODES: | |
1652 | case DIOCCLRSRCNODES: | |
1653 | case DIOCIGETIFACES: | |
1654 | case DIOCGIFSPEED: | |
1655 | case DIOCSETIFFLAG: | |
1656 | case DIOCCLRIFFLAG: | |
1657 | break; | |
1658 | case DIOCRCLRTABLES: | |
1659 | case DIOCRADDTABLES: | |
1660 | case DIOCRDELTABLES: | |
1661 | case DIOCRSETTFLAGS: { | |
1662 | int pfrio_flags; | |
1663 | ||
1664 | bcopy(&((struct pfioc_table *)(void *)addr)-> | |
1665 | pfrio_flags, &pfrio_flags, sizeof (pfrio_flags)); | |
1666 | ||
1667 | if (pfrio_flags & PFR_FLAG_DUMMY) | |
1668 | break; /* dummy operation ok */ | |
1669 | return (EPERM); | |
1670 | } | |
1671 | default: | |
1672 | return (EPERM); | |
1673 | } | |
1674 | ||
1675 | if (!(flags & FWRITE)) | |
1676 | switch (cmd) { | |
1677 | case DIOCSTART: | |
1678 | case DIOCSTARTREF: | |
1679 | case DIOCSTOP: | |
1680 | case DIOCSTOPREF: | |
1681 | case DIOCGETSTARTERS: | |
1682 | case DIOCGETRULES: | |
1683 | case DIOCGETADDRS: | |
1684 | case DIOCGETADDR: | |
1685 | case DIOCGETSTATE: | |
1686 | case DIOCGETSTATUS: | |
1687 | case DIOCGETSTATES: | |
1688 | case DIOCINSERTRULE: | |
1689 | case DIOCDELETERULE: | |
1690 | case DIOCGETTIMEOUT: | |
1691 | case DIOCGETLIMIT: | |
1692 | case DIOCGETALTQS: | |
1693 | case DIOCGETALTQ: | |
1694 | case DIOCGETQSTATS: | |
1695 | case DIOCGETRULESETS: | |
1696 | case DIOCGETRULESET: | |
1697 | case DIOCNATLOOK: | |
1698 | case DIOCRGETTABLES: | |
1699 | case DIOCRGETTSTATS: | |
1700 | case DIOCRGETADDRS: | |
1701 | case DIOCRGETASTATS: | |
1702 | case DIOCRTSTADDRS: | |
1703 | case DIOCOSFPGET: | |
1704 | case DIOCGETSRCNODES: | |
1705 | case DIOCIGETIFACES: | |
1706 | case DIOCGIFSPEED: | |
1707 | break; | |
1708 | case DIOCRCLRTABLES: | |
1709 | case DIOCRADDTABLES: | |
1710 | case DIOCRDELTABLES: | |
1711 | case DIOCRCLRTSTATS: | |
1712 | case DIOCRCLRADDRS: | |
1713 | case DIOCRADDADDRS: | |
1714 | case DIOCRDELADDRS: | |
1715 | case DIOCRSETADDRS: | |
1716 | case DIOCRSETTFLAGS: { | |
1717 | int pfrio_flags; | |
1718 | ||
1719 | bcopy(&((struct pfioc_table *)(void *)addr)-> | |
1720 | pfrio_flags, &pfrio_flags, sizeof (pfrio_flags)); | |
1721 | ||
1722 | if (pfrio_flags & PFR_FLAG_DUMMY) { | |
1723 | flags |= FWRITE; /* need write lock for dummy */ | |
1724 | break; /* dummy operation ok */ | |
1725 | } | |
1726 | return (EACCES); | |
1727 | } | |
1728 | case DIOCGETRULE: { | |
1729 | u_int32_t action; | |
1730 | ||
1731 | bcopy(&((struct pfioc_rule *)(void *)addr)->action, | |
1732 | &action, sizeof (action)); | |
1733 | ||
1734 | if (action == PF_GET_CLR_CNTR) | |
1735 | return (EACCES); | |
1736 | break; | |
1737 | } | |
1738 | default: | |
1739 | return (EACCES); | |
1740 | } | |
1741 | ||
1742 | #if PF_ALTQ | |
1743 | switch (cmd) { | |
1744 | case DIOCSTARTALTQ: | |
1745 | case DIOCSTOPALTQ: | |
1746 | case DIOCADDALTQ: | |
1747 | case DIOCGETALTQS: | |
1748 | case DIOCGETALTQ: | |
1749 | case DIOCCHANGEALTQ: | |
1750 | case DIOCGETQSTATS: | |
1751 | /* fail if ALTQ is disabled */ | |
1752 | if (!altq_allowed) | |
1753 | return (ENODEV); | |
1754 | break; | |
1755 | } | |
1756 | #endif /* PF_ALTQ */ | |
1757 | ||
1758 | if (flags & FWRITE) | |
1759 | lck_rw_lock_exclusive(pf_perim_lock); | |
1760 | else | |
1761 | lck_rw_lock_shared(pf_perim_lock); | |
1762 | ||
1763 | lck_mtx_lock(pf_lock); | |
1764 | ||
1765 | switch (cmd) { | |
1766 | ||
1767 | case DIOCSTART: | |
1768 | if (pf_status.running) { | |
1769 | /* | |
1770 | * Increment the reference for a simple -e enable, so | |
1771 | * that even if other processes drop their references, | |
1772 | * pf will still be available to processes that turned | |
1773 | * it on without taking a reference | |
1774 | */ | |
1775 | if (nr_tokens == pf_enabled_ref_count) { | |
1776 | pf_enabled_ref_count++; | |
1777 | VERIFY(pf_enabled_ref_count != 0); | |
1778 | } | |
1779 | error = EEXIST; | |
1780 | } else if (pf_purge_thread == NULL) { | |
1781 | error = ENOMEM; | |
1782 | } else { | |
1783 | pf_start(); | |
1784 | pf_enabled_ref_count++; | |
1785 | VERIFY(pf_enabled_ref_count != 0); | |
1786 | } | |
1787 | break; | |
1788 | ||
1789 | case DIOCSTARTREF: /* u_int64_t */ | |
1790 | if (pf_purge_thread == NULL) { | |
1791 | error = ENOMEM; | |
1792 | } else { | |
1793 | u_int64_t token; | |
1794 | ||
1795 | /* small enough to be on stack */ | |
1796 | if ((token = generate_token(p)) != 0) { | |
1797 | if (pf_is_enabled == 0) { | |
1798 | pf_start(); | |
1799 | } | |
1800 | pf_enabled_ref_count++; | |
1801 | VERIFY(pf_enabled_ref_count != 0); | |
1802 | } else { | |
1803 | error = ENOMEM; | |
1804 | DPFPRINTF(PF_DEBUG_URGENT, | |
1805 | ("pf: unable to generate token\n")); | |
1806 | } | |
1807 | bcopy(&token, addr, sizeof (token)); | |
1808 | } | |
1809 | break; | |
1810 | ||
1811 | case DIOCSTOP: | |
1812 | if (!pf_status.running) { | |
1813 | error = ENOENT; | |
1814 | } else { | |
1815 | pf_stop(); | |
1816 | pf_enabled_ref_count = 0; | |
1817 | invalidate_all_tokens(); | |
1818 | } | |
1819 | break; | |
1820 | ||
1821 | case DIOCSTOPREF: /* struct pfioc_remove_token */ | |
1822 | if (!pf_status.running) { | |
1823 | error = ENOENT; | |
1824 | } else { | |
1825 | struct pfioc_remove_token pfrt; | |
1826 | ||
1827 | /* small enough to be on stack */ | |
1828 | bcopy(addr, &pfrt, sizeof (pfrt)); | |
1829 | if ((error = remove_token(&pfrt)) == 0) { | |
1830 | VERIFY(pf_enabled_ref_count != 0); | |
1831 | pf_enabled_ref_count--; | |
1832 | /* return currently held references */ | |
1833 | pfrt.refcount = pf_enabled_ref_count; | |
1834 | DPFPRINTF(PF_DEBUG_MISC, | |
1835 | ("pf: enabled refcount decremented\n")); | |
1836 | } else { | |
1837 | error = EINVAL; | |
1838 | DPFPRINTF(PF_DEBUG_URGENT, | |
1839 | ("pf: token mismatch\n")); | |
1840 | } | |
1841 | bcopy(&pfrt, addr, sizeof (pfrt)); | |
1842 | ||
1843 | if (error == 0 && pf_enabled_ref_count == 0) | |
1844 | pf_stop(); | |
1845 | } | |
1846 | break; | |
1847 | ||
1848 | case DIOCGETSTARTERS: { /* struct pfioc_tokens */ | |
1849 | PFIOCX_STRUCT_DECL(pfioc_tokens); | |
1850 | ||
1851 | PFIOCX_STRUCT_BEGIN(addr, pfioc_tokens, error = ENOMEM; break;); | |
1852 | error = pfioctl_ioc_tokens(cmd, | |
1853 | PFIOCX_STRUCT_ADDR32(pfioc_tokens), | |
1854 | PFIOCX_STRUCT_ADDR64(pfioc_tokens), p); | |
1855 | PFIOCX_STRUCT_END(pfioc_tokens, addr); | |
1856 | break; | |
1857 | } | |
1858 | ||
1859 | case DIOCADDRULE: /* struct pfioc_rule */ | |
1860 | case DIOCGETRULES: /* struct pfioc_rule */ | |
1861 | case DIOCGETRULE: /* struct pfioc_rule */ | |
1862 | case DIOCCHANGERULE: /* struct pfioc_rule */ | |
1863 | case DIOCINSERTRULE: /* struct pfioc_rule */ | |
1864 | case DIOCDELETERULE: { /* struct pfioc_rule */ | |
1865 | struct pfioc_rule *pr = NULL; | |
1866 | ||
1867 | PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break;); | |
1868 | error = pfioctl_ioc_rule(cmd, minordev, pr, p); | |
1869 | PFIOC_STRUCT_END(pr, addr); | |
1870 | break; | |
1871 | } | |
1872 | ||
1873 | case DIOCCLRSTATES: /* struct pfioc_state_kill */ | |
1874 | case DIOCKILLSTATES: { /* struct pfioc_state_kill */ | |
1875 | struct pfioc_state_kill *psk = NULL; | |
1876 | ||
1877 | PFIOC_STRUCT_BEGIN(addr, psk, error = ENOMEM; break;); | |
1878 | error = pfioctl_ioc_state_kill(cmd, psk, p); | |
1879 | PFIOC_STRUCT_END(psk, addr); | |
1880 | break; | |
1881 | } | |
1882 | ||
1883 | case DIOCADDSTATE: /* struct pfioc_state */ | |
1884 | case DIOCGETSTATE: { /* struct pfioc_state */ | |
1885 | struct pfioc_state *ps = NULL; | |
1886 | ||
1887 | PFIOC_STRUCT_BEGIN(addr, ps, error = ENOMEM; break;); | |
1888 | error = pfioctl_ioc_state(cmd, ps, p); | |
1889 | PFIOC_STRUCT_END(ps, addr); | |
1890 | break; | |
1891 | } | |
1892 | ||
1893 | case DIOCGETSTATES: { /* struct pfioc_states */ | |
1894 | PFIOCX_STRUCT_DECL(pfioc_states); | |
1895 | ||
1896 | PFIOCX_STRUCT_BEGIN(addr, pfioc_states, error = ENOMEM; break;); | |
1897 | error = pfioctl_ioc_states(cmd, | |
1898 | PFIOCX_STRUCT_ADDR32(pfioc_states), | |
1899 | PFIOCX_STRUCT_ADDR64(pfioc_states), p); | |
1900 | PFIOCX_STRUCT_END(pfioc_states, addr); | |
1901 | break; | |
1902 | } | |
1903 | ||
1904 | case DIOCGETSTATUS: { /* struct pf_status */ | |
1905 | struct pf_status *s = NULL; | |
1906 | ||
1907 | PFIOC_STRUCT_BEGIN(&pf_status, s, error = ENOMEM; break;); | |
1908 | pfi_update_status(s->ifname, s); | |
1909 | PFIOC_STRUCT_END(s, addr); | |
1910 | break; | |
1911 | } | |
1912 | ||
1913 | case DIOCSETSTATUSIF: { /* struct pfioc_if */ | |
1914 | struct pfioc_if *pi = (struct pfioc_if *)(void *)addr; | |
1915 | ||
1916 | /* OK for unaligned accesses */ | |
1917 | if (pi->ifname[0] == 0) { | |
1918 | bzero(pf_status.ifname, IFNAMSIZ); | |
1919 | break; | |
1920 | } | |
1921 | strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ); | |
1922 | break; | |
1923 | } | |
1924 | ||
1925 | case DIOCCLRSTATUS: { | |
1926 | bzero(pf_status.counters, sizeof (pf_status.counters)); | |
1927 | bzero(pf_status.fcounters, sizeof (pf_status.fcounters)); | |
1928 | bzero(pf_status.scounters, sizeof (pf_status.scounters)); | |
1929 | pf_status.since = pf_calendar_time_second(); | |
1930 | if (*pf_status.ifname) | |
1931 | pfi_update_status(pf_status.ifname, NULL); | |
1932 | break; | |
1933 | } | |
1934 | ||
1935 | case DIOCNATLOOK: { /* struct pfioc_natlook */ | |
1936 | struct pfioc_natlook *pnl = NULL; | |
1937 | ||
1938 | PFIOC_STRUCT_BEGIN(addr, pnl, error = ENOMEM; break;); | |
1939 | error = pfioctl_ioc_natlook(cmd, pnl, p); | |
1940 | PFIOC_STRUCT_END(pnl, addr); | |
1941 | break; | |
1942 | } | |
1943 | ||
1944 | case DIOCSETTIMEOUT: /* struct pfioc_tm */ | |
1945 | case DIOCGETTIMEOUT: { /* struct pfioc_tm */ | |
1946 | struct pfioc_tm pt; | |
1947 | ||
1948 | /* small enough to be on stack */ | |
1949 | bcopy(addr, &pt, sizeof (pt)); | |
1950 | error = pfioctl_ioc_tm(cmd, &pt, p); | |
1951 | bcopy(&pt, addr, sizeof (pt)); | |
1952 | break; | |
1953 | } | |
1954 | ||
1955 | case DIOCGETLIMIT: /* struct pfioc_limit */ | |
1956 | case DIOCSETLIMIT: { /* struct pfioc_limit */ | |
1957 | struct pfioc_limit pl; | |
1958 | ||
1959 | /* small enough to be on stack */ | |
1960 | bcopy(addr, &pl, sizeof (pl)); | |
1961 | error = pfioctl_ioc_limit(cmd, &pl, p); | |
1962 | bcopy(&pl, addr, sizeof (pl)); | |
1963 | break; | |
1964 | } | |
1965 | ||
1966 | case DIOCSETDEBUG: { /* u_int32_t */ | |
1967 | bcopy(addr, &pf_status.debug, sizeof (u_int32_t)); | |
1968 | break; | |
1969 | } | |
1970 | ||
1971 | case DIOCCLRRULECTRS: { | |
1972 | /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ | |
1973 | struct pf_ruleset *ruleset = &pf_main_ruleset; | |
1974 | struct pf_rule *rule; | |
1975 | ||
1976 | TAILQ_FOREACH(rule, | |
1977 | ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { | |
1978 | rule->evaluations = 0; | |
1979 | rule->packets[0] = rule->packets[1] = 0; | |
1980 | rule->bytes[0] = rule->bytes[1] = 0; | |
1981 | } | |
1982 | break; | |
1983 | } | |
1984 | ||
1985 | case DIOCGIFSPEED: { | |
1986 | struct pf_ifspeed *psp = (struct pf_ifspeed *)(void *)addr; | |
1987 | struct pf_ifspeed ps; | |
1988 | struct ifnet *ifp; | |
1989 | u_int64_t baudrate; | |
1990 | ||
1991 | if (psp->ifname[0] != '\0') { | |
1992 | /* Can we completely trust user-land? */ | |
1993 | strlcpy(ps.ifname, psp->ifname, IFNAMSIZ); | |
1994 | ps.ifname[IFNAMSIZ - 1] = '\0'; | |
1995 | ifp = ifunit(ps.ifname); | |
1996 | if (ifp != NULL) { | |
1997 | baudrate = ifp->if_output_bw.max_bw; | |
1998 | bcopy(&baudrate, &psp->baudrate, | |
1999 | sizeof (baudrate)); | |
2000 | } else { | |
2001 | error = EINVAL; | |
2002 | } | |
2003 | } else { | |
2004 | error = EINVAL; | |
2005 | } | |
2006 | break; | |
2007 | } | |
2008 | ||
2009 | #if PF_ALTQ | |
2010 | case DIOCSTARTALTQ: { | |
2011 | struct pf_altq *altq; | |
2012 | ||
2013 | VERIFY(altq_allowed); | |
2014 | /* enable all altq interfaces on active list */ | |
2015 | TAILQ_FOREACH(altq, pf_altqs_active, entries) { | |
2016 | if (altq->qname[0] == '\0') { | |
2017 | error = pf_enable_altq(altq); | |
2018 | if (error != 0) | |
2019 | break; | |
2020 | } | |
2021 | } | |
2022 | if (error == 0) | |
2023 | pf_altq_running = 1; | |
2024 | DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); | |
2025 | break; | |
2026 | } | |
2027 | ||
2028 | case DIOCSTOPALTQ: { | |
2029 | struct pf_altq *altq; | |
2030 | ||
2031 | VERIFY(altq_allowed); | |
2032 | /* disable all altq interfaces on active list */ | |
2033 | TAILQ_FOREACH(altq, pf_altqs_active, entries) { | |
2034 | if (altq->qname[0] == '\0') { | |
2035 | error = pf_disable_altq(altq); | |
2036 | if (error != 0) | |
2037 | break; | |
2038 | } | |
2039 | } | |
2040 | if (error == 0) | |
2041 | pf_altq_running = 0; | |
2042 | DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); | |
2043 | break; | |
2044 | } | |
2045 | ||
2046 | case DIOCADDALTQ: { /* struct pfioc_altq */ | |
2047 | struct pfioc_altq *pa = (struct pfioc_altq *)(void *)addr; | |
2048 | struct pf_altq *altq, *a; | |
2049 | u_int32_t ticket; | |
2050 | ||
2051 | VERIFY(altq_allowed); | |
2052 | bcopy(&pa->ticket, &ticket, sizeof (ticket)); | |
2053 | if (ticket != ticket_altqs_inactive) { | |
2054 | error = EBUSY; | |
2055 | break; | |
2056 | } | |
2057 | altq = pool_get(&pf_altq_pl, PR_WAITOK); | |
2058 | if (altq == NULL) { | |
2059 | error = ENOMEM; | |
2060 | break; | |
2061 | } | |
2062 | pf_altq_copyin(&pa->altq, altq); | |
2063 | ||
2064 | /* | |
2065 | * if this is for a queue, find the discipline and | |
2066 | * copy the necessary fields | |
2067 | */ | |
2068 | if (altq->qname[0] != '\0') { | |
2069 | if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { | |
2070 | error = EBUSY; | |
2071 | pool_put(&pf_altq_pl, altq); | |
2072 | break; | |
2073 | } | |
2074 | altq->altq_disc = NULL; | |
2075 | TAILQ_FOREACH(a, pf_altqs_inactive, entries) { | |
2076 | if (strncmp(a->ifname, altq->ifname, | |
2077 | IFNAMSIZ) == 0 && a->qname[0] == '\0') { | |
2078 | altq->altq_disc = a->altq_disc; | |
2079 | break; | |
2080 | } | |
2081 | } | |
2082 | } | |
2083 | ||
2084 | error = altq_add(altq); | |
2085 | if (error) { | |
2086 | pool_put(&pf_altq_pl, altq); | |
2087 | break; | |
2088 | } | |
2089 | ||
2090 | TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries); | |
2091 | pf_altq_copyout(altq, &pa->altq); | |
2092 | break; | |
2093 | } | |
2094 | ||
2095 | case DIOCGETALTQS: { | |
2096 | struct pfioc_altq *pa = (struct pfioc_altq *)(void *)addr; | |
2097 | struct pf_altq *altq; | |
2098 | u_int32_t nr; | |
2099 | ||
2100 | VERIFY(altq_allowed); | |
2101 | nr = 0; | |
2102 | TAILQ_FOREACH(altq, pf_altqs_active, entries) | |
2103 | nr++; | |
2104 | bcopy(&nr, &pa->nr, sizeof (nr)); | |
2105 | bcopy(&ticket_altqs_active, &pa->ticket, sizeof (pa->ticket)); | |
2106 | break; | |
2107 | } | |
2108 | ||
2109 | case DIOCGETALTQ: { | |
2110 | struct pfioc_altq *pa = (struct pfioc_altq *)(void *)addr; | |
2111 | struct pf_altq *altq; | |
2112 | u_int32_t nr, pa_nr, ticket; | |
2113 | ||
2114 | VERIFY(altq_allowed); | |
2115 | bcopy(&pa->ticket, &ticket, sizeof (ticket)); | |
2116 | if (ticket != ticket_altqs_active) { | |
2117 | error = EBUSY; | |
2118 | break; | |
2119 | } | |
2120 | bcopy(&pa->nr, &pa_nr, sizeof (pa_nr)); | |
2121 | nr = 0; | |
2122 | altq = TAILQ_FIRST(pf_altqs_active); | |
2123 | while ((altq != NULL) && (nr < pa_nr)) { | |
2124 | altq = TAILQ_NEXT(altq, entries); | |
2125 | nr++; | |
2126 | } | |
2127 | if (altq == NULL) { | |
2128 | error = EBUSY; | |
2129 | break; | |
2130 | } | |
2131 | pf_altq_copyout(altq, &pa->altq); | |
2132 | break; | |
2133 | } | |
2134 | ||
2135 | case DIOCCHANGEALTQ: | |
2136 | VERIFY(altq_allowed); | |
2137 | /* CHANGEALTQ not supported yet! */ | |
2138 | error = ENODEV; | |
2139 | break; | |
2140 | ||
2141 | case DIOCGETQSTATS: { | |
2142 | struct pfioc_qstats *pq = (struct pfioc_qstats *)(void *)addr; | |
2143 | struct pf_altq *altq; | |
2144 | u_int32_t nr, pq_nr, ticket; | |
2145 | int nbytes; | |
2146 | ||
2147 | VERIFY(altq_allowed); | |
2148 | bcopy(&pq->ticket, &ticket, sizeof (ticket)); | |
2149 | if (ticket != ticket_altqs_active) { | |
2150 | error = EBUSY; | |
2151 | break; | |
2152 | } | |
2153 | bcopy(&pq->nr, &pq_nr, sizeof (pq_nr)); | |
2154 | nr = 0; | |
2155 | altq = TAILQ_FIRST(pf_altqs_active); | |
2156 | while ((altq != NULL) && (nr < pq_nr)) { | |
2157 | altq = TAILQ_NEXT(altq, entries); | |
2158 | nr++; | |
2159 | } | |
2160 | if (altq == NULL) { | |
2161 | error = EBUSY; | |
2162 | break; | |
2163 | } | |
2164 | bcopy(&pq->nbytes, &nbytes, sizeof (nbytes)); | |
2165 | error = altq_getqstats(altq, pq->buf, &nbytes); | |
2166 | if (error == 0) { | |
2167 | pq->scheduler = altq->scheduler; | |
2168 | bcopy(&nbytes, &pq->nbytes, sizeof (nbytes)); | |
2169 | } | |
2170 | break; | |
2171 | } | |
2172 | #endif /* PF_ALTQ */ | |
2173 | ||
2174 | case DIOCBEGINADDRS: /* struct pfioc_pooladdr */ | |
2175 | case DIOCADDADDR: /* struct pfioc_pooladdr */ | |
2176 | case DIOCGETADDRS: /* struct pfioc_pooladdr */ | |
2177 | case DIOCGETADDR: /* struct pfioc_pooladdr */ | |
2178 | case DIOCCHANGEADDR: { /* struct pfioc_pooladdr */ | |
2179 | struct pfioc_pooladdr *pp = NULL; | |
2180 | ||
2181 | PFIOC_STRUCT_BEGIN(addr, pp, error = ENOMEM; break;) | |
2182 | error = pfioctl_ioc_pooladdr(cmd, pp, p); | |
2183 | PFIOC_STRUCT_END(pp, addr); | |
2184 | break; | |
2185 | } | |
2186 | ||
2187 | case DIOCGETRULESETS: /* struct pfioc_ruleset */ | |
2188 | case DIOCGETRULESET: { /* struct pfioc_ruleset */ | |
2189 | struct pfioc_ruleset *pr = NULL; | |
2190 | ||
2191 | PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break;); | |
2192 | error = pfioctl_ioc_ruleset(cmd, pr, p); | |
2193 | PFIOC_STRUCT_END(pr, addr); | |
2194 | break; | |
2195 | } | |
2196 | ||
2197 | case DIOCRCLRTABLES: /* struct pfioc_table */ | |
2198 | case DIOCRADDTABLES: /* struct pfioc_table */ | |
2199 | case DIOCRDELTABLES: /* struct pfioc_table */ | |
2200 | case DIOCRGETTABLES: /* struct pfioc_table */ | |
2201 | case DIOCRGETTSTATS: /* struct pfioc_table */ | |
2202 | case DIOCRCLRTSTATS: /* struct pfioc_table */ | |
2203 | case DIOCRSETTFLAGS: /* struct pfioc_table */ | |
2204 | case DIOCRCLRADDRS: /* struct pfioc_table */ | |
2205 | case DIOCRADDADDRS: /* struct pfioc_table */ | |
2206 | case DIOCRDELADDRS: /* struct pfioc_table */ | |
2207 | case DIOCRSETADDRS: /* struct pfioc_table */ | |
2208 | case DIOCRGETADDRS: /* struct pfioc_table */ | |
2209 | case DIOCRGETASTATS: /* struct pfioc_table */ | |
2210 | case DIOCRCLRASTATS: /* struct pfioc_table */ | |
2211 | case DIOCRTSTADDRS: /* struct pfioc_table */ | |
2212 | case DIOCRINADEFINE: { /* struct pfioc_table */ | |
2213 | PFIOCX_STRUCT_DECL(pfioc_table); | |
2214 | ||
2215 | PFIOCX_STRUCT_BEGIN(addr, pfioc_table, error = ENOMEM; break;); | |
2216 | error = pfioctl_ioc_table(cmd, | |
2217 | PFIOCX_STRUCT_ADDR32(pfioc_table), | |
2218 | PFIOCX_STRUCT_ADDR64(pfioc_table), p); | |
2219 | PFIOCX_STRUCT_END(pfioc_table, addr); | |
2220 | break; | |
2221 | } | |
2222 | ||
2223 | case DIOCOSFPADD: /* struct pf_osfp_ioctl */ | |
2224 | case DIOCOSFPGET: { /* struct pf_osfp_ioctl */ | |
2225 | struct pf_osfp_ioctl *io = NULL; | |
2226 | ||
2227 | PFIOC_STRUCT_BEGIN(addr, io, error = ENOMEM; break;); | |
2228 | if (cmd == DIOCOSFPADD) { | |
2229 | error = pf_osfp_add(io); | |
2230 | } else { | |
2231 | VERIFY(cmd == DIOCOSFPGET); | |
2232 | error = pf_osfp_get(io); | |
2233 | } | |
2234 | PFIOC_STRUCT_END(io, addr); | |
2235 | break; | |
2236 | } | |
2237 | ||
2238 | case DIOCXBEGIN: /* struct pfioc_trans */ | |
2239 | case DIOCXROLLBACK: /* struct pfioc_trans */ | |
2240 | case DIOCXCOMMIT: { /* struct pfioc_trans */ | |
2241 | PFIOCX_STRUCT_DECL(pfioc_trans); | |
2242 | ||
2243 | PFIOCX_STRUCT_BEGIN(addr, pfioc_trans, error = ENOMEM; break;); | |
2244 | error = pfioctl_ioc_trans(cmd, | |
2245 | PFIOCX_STRUCT_ADDR32(pfioc_trans), | |
2246 | PFIOCX_STRUCT_ADDR64(pfioc_trans), p); | |
2247 | PFIOCX_STRUCT_END(pfioc_trans, addr); | |
2248 | break; | |
2249 | } | |
2250 | ||
2251 | case DIOCGETSRCNODES: { /* struct pfioc_src_nodes */ | |
2252 | PFIOCX_STRUCT_DECL(pfioc_src_nodes); | |
2253 | ||
2254 | PFIOCX_STRUCT_BEGIN(addr, pfioc_src_nodes, | |
2255 | error = ENOMEM; break;); | |
2256 | error = pfioctl_ioc_src_nodes(cmd, | |
2257 | PFIOCX_STRUCT_ADDR32(pfioc_src_nodes), | |
2258 | PFIOCX_STRUCT_ADDR64(pfioc_src_nodes), p); | |
2259 | PFIOCX_STRUCT_END(pfioc_src_nodes, addr); | |
2260 | break; | |
2261 | } | |
2262 | ||
2263 | case DIOCCLRSRCNODES: { | |
2264 | struct pf_src_node *n; | |
2265 | struct pf_state *state; | |
2266 | ||
2267 | RB_FOREACH(state, pf_state_tree_id, &tree_id) { | |
2268 | state->src_node = NULL; | |
2269 | state->nat_src_node = NULL; | |
2270 | } | |
2271 | RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { | |
2272 | n->expire = 1; | |
2273 | n->states = 0; | |
2274 | } | |
2275 | pf_purge_expired_src_nodes(); | |
2276 | pf_status.src_nodes = 0; | |
2277 | break; | |
2278 | } | |
2279 | ||
2280 | case DIOCKILLSRCNODES: { /* struct pfioc_src_node_kill */ | |
2281 | struct pfioc_src_node_kill *psnk = NULL; | |
2282 | ||
2283 | PFIOC_STRUCT_BEGIN(addr, psnk, error = ENOMEM; break;); | |
2284 | error = pfioctl_ioc_src_node_kill(cmd, psnk, p); | |
2285 | PFIOC_STRUCT_END(psnk, addr); | |
2286 | break; | |
2287 | } | |
2288 | ||
2289 | case DIOCSETHOSTID: { /* u_int32_t */ | |
2290 | u_int32_t hid; | |
2291 | ||
2292 | /* small enough to be on stack */ | |
2293 | bcopy(addr, &hid, sizeof (hid)); | |
2294 | if (hid == 0) | |
2295 | pf_status.hostid = random(); | |
2296 | else | |
2297 | pf_status.hostid = hid; | |
2298 | break; | |
2299 | } | |
2300 | ||
2301 | case DIOCOSFPFLUSH: | |
2302 | pf_osfp_flush(); | |
2303 | break; | |
2304 | ||
2305 | case DIOCIGETIFACES: /* struct pfioc_iface */ | |
2306 | case DIOCSETIFFLAG: /* struct pfioc_iface */ | |
2307 | case DIOCCLRIFFLAG: { /* struct pfioc_iface */ | |
2308 | PFIOCX_STRUCT_DECL(pfioc_iface); | |
2309 | ||
2310 | PFIOCX_STRUCT_BEGIN(addr, pfioc_iface, error = ENOMEM; break;); | |
2311 | error = pfioctl_ioc_iface(cmd, | |
2312 | PFIOCX_STRUCT_ADDR32(pfioc_iface), | |
2313 | PFIOCX_STRUCT_ADDR64(pfioc_iface), p); | |
2314 | PFIOCX_STRUCT_END(pfioc_iface, addr); | |
2315 | break; | |
2316 | } | |
2317 | ||
2318 | default: | |
2319 | error = ENODEV; | |
2320 | break; | |
2321 | } | |
2322 | ||
2323 | lck_mtx_unlock(pf_lock); | |
2324 | lck_rw_done(pf_perim_lock); | |
2325 | ||
2326 | return (error); | |
2327 | } | |
2328 | ||
2329 | static int | |
2330 | pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32, | |
2331 | struct pfioc_table_64 *io64, struct proc *p) | |
2332 | { | |
2333 | int p64 = proc_is64bit(p); | |
2334 | int error = 0; | |
2335 | ||
2336 | if (!p64) | |
2337 | goto struct32; | |
2338 | ||
2339 | /* | |
2340 | * 64-bit structure processing | |
2341 | */ | |
2342 | switch (cmd) { | |
2343 | case DIOCRCLRTABLES: | |
2344 | if (io64->pfrio_esize != 0) { | |
2345 | error = ENODEV; | |
2346 | break; | |
2347 | } | |
2348 | pfr_table_copyin_cleanup(&io64->pfrio_table); | |
2349 | error = pfr_clr_tables(&io64->pfrio_table, &io64->pfrio_ndel, | |
2350 | io64->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2351 | break; | |
2352 | ||
2353 | case DIOCRADDTABLES: | |
2354 | if (io64->pfrio_esize != sizeof (struct pfr_table)) { | |
2355 | error = ENODEV; | |
2356 | break; | |
2357 | } | |
2358 | error = pfr_add_tables(io64->pfrio_buffer, io64->pfrio_size, | |
2359 | &io64->pfrio_nadd, io64->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2360 | break; | |
2361 | ||
2362 | case DIOCRDELTABLES: | |
2363 | if (io64->pfrio_esize != sizeof (struct pfr_table)) { | |
2364 | error = ENODEV; | |
2365 | break; | |
2366 | } | |
2367 | error = pfr_del_tables(io64->pfrio_buffer, io64->pfrio_size, | |
2368 | &io64->pfrio_ndel, io64->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2369 | break; | |
2370 | ||
2371 | case DIOCRGETTABLES: | |
2372 | if (io64->pfrio_esize != sizeof (struct pfr_table)) { | |
2373 | error = ENODEV; | |
2374 | break; | |
2375 | } | |
2376 | pfr_table_copyin_cleanup(&io64->pfrio_table); | |
2377 | error = pfr_get_tables(&io64->pfrio_table, io64->pfrio_buffer, | |
2378 | &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2379 | break; | |
2380 | ||
2381 | case DIOCRGETTSTATS: | |
2382 | if (io64->pfrio_esize != sizeof (struct pfr_tstats)) { | |
2383 | error = ENODEV; | |
2384 | break; | |
2385 | } | |
2386 | pfr_table_copyin_cleanup(&io64->pfrio_table); | |
2387 | error = pfr_get_tstats(&io64->pfrio_table, io64->pfrio_buffer, | |
2388 | &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2389 | break; | |
2390 | ||
2391 | case DIOCRCLRTSTATS: | |
2392 | if (io64->pfrio_esize != sizeof (struct pfr_table)) { | |
2393 | error = ENODEV; | |
2394 | break; | |
2395 | } | |
2396 | error = pfr_clr_tstats(io64->pfrio_buffer, io64->pfrio_size, | |
2397 | &io64->pfrio_nzero, io64->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2398 | break; | |
2399 | ||
2400 | case DIOCRSETTFLAGS: | |
2401 | if (io64->pfrio_esize != sizeof (struct pfr_table)) { | |
2402 | error = ENODEV; | |
2403 | break; | |
2404 | } | |
2405 | error = pfr_set_tflags(io64->pfrio_buffer, io64->pfrio_size, | |
2406 | io64->pfrio_setflag, io64->pfrio_clrflag, | |
2407 | &io64->pfrio_nchange, &io64->pfrio_ndel, | |
2408 | io64->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2409 | break; | |
2410 | ||
2411 | case DIOCRCLRADDRS: | |
2412 | if (io64->pfrio_esize != 0) { | |
2413 | error = ENODEV; | |
2414 | break; | |
2415 | } | |
2416 | pfr_table_copyin_cleanup(&io64->pfrio_table); | |
2417 | error = pfr_clr_addrs(&io64->pfrio_table, &io64->pfrio_ndel, | |
2418 | io64->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2419 | break; | |
2420 | ||
2421 | case DIOCRADDADDRS: | |
2422 | if (io64->pfrio_esize != sizeof (struct pfr_addr)) { | |
2423 | error = ENODEV; | |
2424 | break; | |
2425 | } | |
2426 | pfr_table_copyin_cleanup(&io64->pfrio_table); | |
2427 | error = pfr_add_addrs(&io64->pfrio_table, io64->pfrio_buffer, | |
2428 | io64->pfrio_size, &io64->pfrio_nadd, io64->pfrio_flags | | |
2429 | PFR_FLAG_USERIOCTL); | |
2430 | break; | |
2431 | ||
2432 | case DIOCRDELADDRS: | |
2433 | if (io64->pfrio_esize != sizeof (struct pfr_addr)) { | |
2434 | error = ENODEV; | |
2435 | break; | |
2436 | } | |
2437 | pfr_table_copyin_cleanup(&io64->pfrio_table); | |
2438 | error = pfr_del_addrs(&io64->pfrio_table, io64->pfrio_buffer, | |
2439 | io64->pfrio_size, &io64->pfrio_ndel, io64->pfrio_flags | | |
2440 | PFR_FLAG_USERIOCTL); | |
2441 | break; | |
2442 | ||
2443 | case DIOCRSETADDRS: | |
2444 | if (io64->pfrio_esize != sizeof (struct pfr_addr)) { | |
2445 | error = ENODEV; | |
2446 | break; | |
2447 | } | |
2448 | pfr_table_copyin_cleanup(&io64->pfrio_table); | |
2449 | error = pfr_set_addrs(&io64->pfrio_table, io64->pfrio_buffer, | |
2450 | io64->pfrio_size, &io64->pfrio_size2, &io64->pfrio_nadd, | |
2451 | &io64->pfrio_ndel, &io64->pfrio_nchange, io64->pfrio_flags | | |
2452 | PFR_FLAG_USERIOCTL, 0); | |
2453 | break; | |
2454 | ||
2455 | case DIOCRGETADDRS: | |
2456 | if (io64->pfrio_esize != sizeof (struct pfr_addr)) { | |
2457 | error = ENODEV; | |
2458 | break; | |
2459 | } | |
2460 | pfr_table_copyin_cleanup(&io64->pfrio_table); | |
2461 | error = pfr_get_addrs(&io64->pfrio_table, io64->pfrio_buffer, | |
2462 | &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2463 | break; | |
2464 | ||
2465 | case DIOCRGETASTATS: | |
2466 | if (io64->pfrio_esize != sizeof (struct pfr_astats)) { | |
2467 | error = ENODEV; | |
2468 | break; | |
2469 | } | |
2470 | pfr_table_copyin_cleanup(&io64->pfrio_table); | |
2471 | error = pfr_get_astats(&io64->pfrio_table, io64->pfrio_buffer, | |
2472 | &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2473 | break; | |
2474 | ||
2475 | case DIOCRCLRASTATS: | |
2476 | if (io64->pfrio_esize != sizeof (struct pfr_addr)) { | |
2477 | error = ENODEV; | |
2478 | break; | |
2479 | } | |
2480 | pfr_table_copyin_cleanup(&io64->pfrio_table); | |
2481 | error = pfr_clr_astats(&io64->pfrio_table, io64->pfrio_buffer, | |
2482 | io64->pfrio_size, &io64->pfrio_nzero, io64->pfrio_flags | | |
2483 | PFR_FLAG_USERIOCTL); | |
2484 | break; | |
2485 | ||
2486 | case DIOCRTSTADDRS: | |
2487 | if (io64->pfrio_esize != sizeof (struct pfr_addr)) { | |
2488 | error = ENODEV; | |
2489 | break; | |
2490 | } | |
2491 | pfr_table_copyin_cleanup(&io64->pfrio_table); | |
2492 | error = pfr_tst_addrs(&io64->pfrio_table, io64->pfrio_buffer, | |
2493 | io64->pfrio_size, &io64->pfrio_nmatch, io64->pfrio_flags | | |
2494 | PFR_FLAG_USERIOCTL); | |
2495 | break; | |
2496 | ||
2497 | case DIOCRINADEFINE: | |
2498 | if (io64->pfrio_esize != sizeof (struct pfr_addr)) { | |
2499 | error = ENODEV; | |
2500 | break; | |
2501 | } | |
2502 | pfr_table_copyin_cleanup(&io64->pfrio_table); | |
2503 | error = pfr_ina_define(&io64->pfrio_table, io64->pfrio_buffer, | |
2504 | io64->pfrio_size, &io64->pfrio_nadd, &io64->pfrio_naddr, | |
2505 | io64->pfrio_ticket, io64->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2506 | break; | |
2507 | ||
2508 | default: | |
2509 | VERIFY(0); | |
2510 | /* NOTREACHED */ | |
2511 | } | |
2512 | goto done; | |
2513 | ||
2514 | struct32: | |
2515 | /* | |
2516 | * 32-bit structure processing | |
2517 | */ | |
2518 | switch (cmd) { | |
2519 | case DIOCRCLRTABLES: | |
2520 | if (io32->pfrio_esize != 0) { | |
2521 | error = ENODEV; | |
2522 | break; | |
2523 | } | |
2524 | pfr_table_copyin_cleanup(&io32->pfrio_table); | |
2525 | error = pfr_clr_tables(&io32->pfrio_table, &io32->pfrio_ndel, | |
2526 | io32->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2527 | break; | |
2528 | ||
2529 | case DIOCRADDTABLES: | |
2530 | if (io32->pfrio_esize != sizeof (struct pfr_table)) { | |
2531 | error = ENODEV; | |
2532 | break; | |
2533 | } | |
2534 | error = pfr_add_tables(io32->pfrio_buffer, io32->pfrio_size, | |
2535 | &io32->pfrio_nadd, io32->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2536 | break; | |
2537 | ||
2538 | case DIOCRDELTABLES: | |
2539 | if (io32->pfrio_esize != sizeof (struct pfr_table)) { | |
2540 | error = ENODEV; | |
2541 | break; | |
2542 | } | |
2543 | error = pfr_del_tables(io32->pfrio_buffer, io32->pfrio_size, | |
2544 | &io32->pfrio_ndel, io32->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2545 | break; | |
2546 | ||
2547 | case DIOCRGETTABLES: | |
2548 | if (io32->pfrio_esize != sizeof (struct pfr_table)) { | |
2549 | error = ENODEV; | |
2550 | break; | |
2551 | } | |
2552 | pfr_table_copyin_cleanup(&io32->pfrio_table); | |
2553 | error = pfr_get_tables(&io32->pfrio_table, io32->pfrio_buffer, | |
2554 | &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2555 | break; | |
2556 | ||
2557 | case DIOCRGETTSTATS: | |
2558 | if (io32->pfrio_esize != sizeof (struct pfr_tstats)) { | |
2559 | error = ENODEV; | |
2560 | break; | |
2561 | } | |
2562 | pfr_table_copyin_cleanup(&io32->pfrio_table); | |
2563 | error = pfr_get_tstats(&io32->pfrio_table, io32->pfrio_buffer, | |
2564 | &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2565 | break; | |
2566 | ||
2567 | case DIOCRCLRTSTATS: | |
2568 | if (io32->pfrio_esize != sizeof (struct pfr_table)) { | |
2569 | error = ENODEV; | |
2570 | break; | |
2571 | } | |
2572 | error = pfr_clr_tstats(io32->pfrio_buffer, io32->pfrio_size, | |
2573 | &io32->pfrio_nzero, io32->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2574 | break; | |
2575 | ||
2576 | case DIOCRSETTFLAGS: | |
2577 | if (io32->pfrio_esize != sizeof (struct pfr_table)) { | |
2578 | error = ENODEV; | |
2579 | break; | |
2580 | } | |
2581 | error = pfr_set_tflags(io32->pfrio_buffer, io32->pfrio_size, | |
2582 | io32->pfrio_setflag, io32->pfrio_clrflag, | |
2583 | &io32->pfrio_nchange, &io32->pfrio_ndel, | |
2584 | io32->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2585 | break; | |
2586 | ||
2587 | case DIOCRCLRADDRS: | |
2588 | if (io32->pfrio_esize != 0) { | |
2589 | error = ENODEV; | |
2590 | break; | |
2591 | } | |
2592 | pfr_table_copyin_cleanup(&io32->pfrio_table); | |
2593 | error = pfr_clr_addrs(&io32->pfrio_table, &io32->pfrio_ndel, | |
2594 | io32->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2595 | break; | |
2596 | ||
2597 | case DIOCRADDADDRS: | |
2598 | if (io32->pfrio_esize != sizeof (struct pfr_addr)) { | |
2599 | error = ENODEV; | |
2600 | break; | |
2601 | } | |
2602 | pfr_table_copyin_cleanup(&io32->pfrio_table); | |
2603 | error = pfr_add_addrs(&io32->pfrio_table, io32->pfrio_buffer, | |
2604 | io32->pfrio_size, &io32->pfrio_nadd, io32->pfrio_flags | | |
2605 | PFR_FLAG_USERIOCTL); | |
2606 | break; | |
2607 | ||
2608 | case DIOCRDELADDRS: | |
2609 | if (io32->pfrio_esize != sizeof (struct pfr_addr)) { | |
2610 | error = ENODEV; | |
2611 | break; | |
2612 | } | |
2613 | pfr_table_copyin_cleanup(&io32->pfrio_table); | |
2614 | error = pfr_del_addrs(&io32->pfrio_table, io32->pfrio_buffer, | |
2615 | io32->pfrio_size, &io32->pfrio_ndel, io32->pfrio_flags | | |
2616 | PFR_FLAG_USERIOCTL); | |
2617 | break; | |
2618 | ||
2619 | case DIOCRSETADDRS: | |
2620 | if (io32->pfrio_esize != sizeof (struct pfr_addr)) { | |
2621 | error = ENODEV; | |
2622 | break; | |
2623 | } | |
2624 | pfr_table_copyin_cleanup(&io32->pfrio_table); | |
2625 | error = pfr_set_addrs(&io32->pfrio_table, io32->pfrio_buffer, | |
2626 | io32->pfrio_size, &io32->pfrio_size2, &io32->pfrio_nadd, | |
2627 | &io32->pfrio_ndel, &io32->pfrio_nchange, io32->pfrio_flags | | |
2628 | PFR_FLAG_USERIOCTL, 0); | |
2629 | break; | |
2630 | ||
2631 | case DIOCRGETADDRS: | |
2632 | if (io32->pfrio_esize != sizeof (struct pfr_addr)) { | |
2633 | error = ENODEV; | |
2634 | break; | |
2635 | } | |
2636 | pfr_table_copyin_cleanup(&io32->pfrio_table); | |
2637 | error = pfr_get_addrs(&io32->pfrio_table, io32->pfrio_buffer, | |
2638 | &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2639 | break; | |
2640 | ||
2641 | case DIOCRGETASTATS: | |
2642 | if (io32->pfrio_esize != sizeof (struct pfr_astats)) { | |
2643 | error = ENODEV; | |
2644 | break; | |
2645 | } | |
2646 | pfr_table_copyin_cleanup(&io32->pfrio_table); | |
2647 | error = pfr_get_astats(&io32->pfrio_table, io32->pfrio_buffer, | |
2648 | &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2649 | break; | |
2650 | ||
2651 | case DIOCRCLRASTATS: | |
2652 | if (io32->pfrio_esize != sizeof (struct pfr_addr)) { | |
2653 | error = ENODEV; | |
2654 | break; | |
2655 | } | |
2656 | pfr_table_copyin_cleanup(&io32->pfrio_table); | |
2657 | error = pfr_clr_astats(&io32->pfrio_table, io32->pfrio_buffer, | |
2658 | io32->pfrio_size, &io32->pfrio_nzero, io32->pfrio_flags | | |
2659 | PFR_FLAG_USERIOCTL); | |
2660 | break; | |
2661 | ||
2662 | case DIOCRTSTADDRS: | |
2663 | if (io32->pfrio_esize != sizeof (struct pfr_addr)) { | |
2664 | error = ENODEV; | |
2665 | break; | |
2666 | } | |
2667 | pfr_table_copyin_cleanup(&io32->pfrio_table); | |
2668 | error = pfr_tst_addrs(&io32->pfrio_table, io32->pfrio_buffer, | |
2669 | io32->pfrio_size, &io32->pfrio_nmatch, io32->pfrio_flags | | |
2670 | PFR_FLAG_USERIOCTL); | |
2671 | break; | |
2672 | ||
2673 | case DIOCRINADEFINE: | |
2674 | if (io32->pfrio_esize != sizeof (struct pfr_addr)) { | |
2675 | error = ENODEV; | |
2676 | break; | |
2677 | } | |
2678 | pfr_table_copyin_cleanup(&io32->pfrio_table); | |
2679 | error = pfr_ina_define(&io32->pfrio_table, io32->pfrio_buffer, | |
2680 | io32->pfrio_size, &io32->pfrio_nadd, &io32->pfrio_naddr, | |
2681 | io32->pfrio_ticket, io32->pfrio_flags | PFR_FLAG_USERIOCTL); | |
2682 | break; | |
2683 | ||
2684 | default: | |
2685 | VERIFY(0); | |
2686 | /* NOTREACHED */ | |
2687 | } | |
2688 | ||
2689 | done: | |
2690 | return (error); | |
2691 | } | |
2692 | ||
2693 | static int | |
2694 | pfioctl_ioc_tokens(u_long cmd, struct pfioc_tokens_32 *tok32, | |
2695 | struct pfioc_tokens_64 *tok64, struct proc *p) | |
2696 | { | |
2697 | struct pfioc_token *tokens; | |
2698 | struct pfioc_kernel_token *entry, *tmp; | |
2699 | user_addr_t token_buf; | |
2700 | int ocnt, cnt, error = 0, p64 = proc_is64bit(p); | |
2701 | char *ptr; | |
2702 | ||
2703 | switch (cmd) { | |
2704 | case DIOCGETSTARTERS: { | |
2705 | int size; | |
2706 | ||
2707 | if (nr_tokens == 0) { | |
2708 | error = ENOENT; | |
2709 | break; | |
2710 | } | |
2711 | ||
2712 | size = sizeof (struct pfioc_token) * nr_tokens; | |
2713 | ocnt = cnt = (p64 ? tok64->size : tok32->size); | |
2714 | if (cnt == 0) { | |
2715 | if (p64) | |
2716 | tok64->size = size; | |
2717 | else | |
2718 | tok32->size = size; | |
2719 | break; | |
2720 | } | |
2721 | ||
2722 | token_buf = (p64 ? tok64->pgt_buf : tok32->pgt_buf); | |
2723 | tokens = _MALLOC(size, M_TEMP, M_WAITOK|M_ZERO); | |
2724 | if (tokens == NULL) { | |
2725 | error = ENOMEM; | |
2726 | break; | |
2727 | } | |
2728 | ||
2729 | ptr = (void *)tokens; | |
2730 | SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) { | |
2731 | struct pfioc_token *t; | |
2732 | ||
2733 | if ((unsigned)cnt < sizeof (*tokens)) | |
2734 | break; /* no more buffer space left */ | |
2735 | ||
2736 | t = (struct pfioc_token *)(void *)ptr; | |
2737 | t->token_value = entry->token.token_value; | |
2738 | t->timestamp = entry->token.timestamp; | |
2739 | t->pid = entry->token.pid; | |
2740 | bcopy(entry->token.proc_name, t->proc_name, | |
2741 | PFTOK_PROCNAME_LEN); | |
2742 | ptr += sizeof (struct pfioc_token); | |
2743 | ||
2744 | cnt -= sizeof (struct pfioc_token); | |
2745 | } | |
2746 | ||
2747 | if (cnt < ocnt) | |
2748 | error = copyout(tokens, token_buf, ocnt - cnt); | |
2749 | ||
2750 | if (p64) | |
2751 | tok64->size = ocnt - cnt; | |
2752 | else | |
2753 | tok32->size = ocnt - cnt; | |
2754 | ||
2755 | _FREE(tokens, M_TEMP); | |
2756 | break; | |
2757 | } | |
2758 | ||
2759 | default: | |
2760 | VERIFY(0); | |
2761 | /* NOTREACHED */ | |
2762 | } | |
2763 | ||
2764 | return (error); | |
2765 | } | |
2766 | ||
2767 | static void | |
2768 | pf_expire_states_and_src_nodes(struct pf_rule *rule) | |
2769 | { | |
2770 | struct pf_state *state; | |
2771 | struct pf_src_node *sn; | |
2772 | int killed = 0; | |
2773 | ||
2774 | /* expire the states */ | |
2775 | state = TAILQ_FIRST(&state_list); | |
2776 | while (state) { | |
2777 | if (state->rule.ptr == rule) | |
2778 | state->timeout = PFTM_PURGE; | |
2779 | state = TAILQ_NEXT(state, entry_list); | |
2780 | } | |
2781 | pf_purge_expired_states(pf_status.states); | |
2782 | ||
2783 | /* expire the src_nodes */ | |
2784 | RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { | |
2785 | if (sn->rule.ptr != rule) | |
2786 | continue; | |
2787 | if (sn->states != 0) { | |
2788 | RB_FOREACH(state, pf_state_tree_id, | |
2789 | &tree_id) { | |
2790 | if (state->src_node == sn) | |
2791 | state->src_node = NULL; | |
2792 | if (state->nat_src_node == sn) | |
2793 | state->nat_src_node = NULL; | |
2794 | } | |
2795 | sn->states = 0; | |
2796 | } | |
2797 | sn->expire = 1; | |
2798 | killed++; | |
2799 | } | |
2800 | if (killed) | |
2801 | pf_purge_expired_src_nodes(); | |
2802 | } | |
2803 | ||
2804 | static void | |
2805 | pf_delete_rule_from_ruleset(struct pf_ruleset *ruleset, int rs_num, | |
2806 | struct pf_rule *rule) | |
2807 | { | |
2808 | struct pf_rule *r; | |
2809 | int nr = 0; | |
2810 | ||
2811 | pf_expire_states_and_src_nodes(rule); | |
2812 | ||
2813 | pf_rm_rule(ruleset->rules[rs_num].active.ptr, rule); | |
2814 | if (ruleset->rules[rs_num].active.rcount-- == 0) | |
2815 | panic("%s: rcount value broken!", __func__); | |
2816 | r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); | |
2817 | ||
2818 | while (r) { | |
2819 | r->nr = nr++; | |
2820 | r = TAILQ_NEXT(r, entries); | |
2821 | } | |
2822 | } | |
2823 | ||
2824 | ||
2825 | static void | |
2826 | pf_ruleset_cleanup(struct pf_ruleset *ruleset, int rs) | |
2827 | { | |
2828 | pf_calc_skip_steps(ruleset->rules[rs].active.ptr); | |
2829 | ruleset->rules[rs].active.ticket = | |
2830 | ++ruleset->rules[rs].inactive.ticket; | |
2831 | } | |
2832 | ||
2833 | /* | |
2834 | * req_dev encodes the PF interface. Currently, possible values are | |
2835 | * 0 or PFRULE_PFM | |
2836 | */ | |
2837 | static int | |
2838 | pf_delete_rule_by_ticket(struct pfioc_rule *pr, u_int32_t req_dev) | |
2839 | { | |
2840 | struct pf_ruleset *ruleset; | |
2841 | struct pf_rule *rule = NULL; | |
2842 | int is_anchor; | |
2843 | int error; | |
2844 | int i; | |
2845 | ||
2846 | is_anchor = (pr->anchor_call[0] != '\0'); | |
2847 | if ((ruleset = pf_find_ruleset_with_owner(pr->anchor, | |
2848 | pr->rule.owner, is_anchor, &error)) == NULL) | |
2849 | return (error); | |
2850 | ||
2851 | for (i = 0; i < PF_RULESET_MAX && rule == NULL; i++) { | |
2852 | rule = TAILQ_FIRST(ruleset->rules[i].active.ptr); | |
2853 | while (rule && (rule->ticket != pr->rule.ticket)) | |
2854 | rule = TAILQ_NEXT(rule, entries); | |
2855 | } | |
2856 | if (rule == NULL) | |
2857 | return (ENOENT); | |
2858 | else | |
2859 | i--; | |
2860 | ||
2861 | if (strcmp(rule->owner, pr->rule.owner)) | |
2862 | return (EACCES); | |
2863 | ||
2864 | delete_rule: | |
2865 | if (rule->anchor && (ruleset != &pf_main_ruleset) && | |
2866 | ((strcmp(ruleset->anchor->owner, "")) == 0) && | |
2867 | ((ruleset->rules[i].active.rcount - 1) == 0)) { | |
2868 | /* set rule & ruleset to parent and repeat */ | |
2869 | struct pf_rule *delete_rule = rule; | |
2870 | struct pf_ruleset *delete_ruleset = ruleset; | |
2871 | ||
2872 | #define parent_ruleset ruleset->anchor->parent->ruleset | |
2873 | if (ruleset->anchor->parent == NULL) | |
2874 | ruleset = &pf_main_ruleset; | |
2875 | else | |
2876 | ruleset = &parent_ruleset; | |
2877 | ||
2878 | rule = TAILQ_FIRST(ruleset->rules[i].active.ptr); | |
2879 | while (rule && | |
2880 | (rule->anchor != delete_ruleset->anchor)) | |
2881 | rule = TAILQ_NEXT(rule, entries); | |
2882 | if (rule == NULL) | |
2883 | panic("%s: rule not found!", __func__); | |
2884 | ||
2885 | /* | |
2886 | * if reqest device != rule's device, bail : | |
2887 | * with error if ticket matches; | |
2888 | * without error if ticket doesn't match (i.e. its just cleanup) | |
2889 | */ | |
2890 | if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) { | |
2891 | if (rule->ticket != pr->rule.ticket) { | |
2892 | return (0); | |
2893 | } else { | |
2894 | return EACCES; | |
2895 | } | |
2896 | } | |
2897 | ||
2898 | if (delete_rule->rule_flag & PFRULE_PFM) { | |
2899 | pffwrules--; | |
2900 | } | |
2901 | ||
2902 | pf_delete_rule_from_ruleset(delete_ruleset, | |
2903 | i, delete_rule); | |
2904 | delete_ruleset->rules[i].active.ticket = | |
2905 | ++delete_ruleset->rules[i].inactive.ticket; | |
2906 | goto delete_rule; | |
2907 | } else { | |
2908 | /* | |
2909 | * process deleting rule only if device that added the | |
2910 | * rule matches device that issued the request | |
2911 | */ | |
2912 | if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) | |
2913 | return EACCES; | |
2914 | if (rule->rule_flag & PFRULE_PFM) | |
2915 | pffwrules--; | |
2916 | pf_delete_rule_from_ruleset(ruleset, i, | |
2917 | rule); | |
2918 | pf_ruleset_cleanup(ruleset, i); | |
2919 | } | |
2920 | ||
2921 | return (0); | |
2922 | } | |
2923 | ||
2924 | /* | |
2925 | * req_dev encodes the PF interface. Currently, possible values are | |
2926 | * 0 or PFRULE_PFM | |
2927 | */ | |
2928 | static void | |
2929 | pf_delete_rule_by_owner(char *owner, u_int32_t req_dev) | |
2930 | { | |
2931 | struct pf_ruleset *ruleset; | |
2932 | struct pf_rule *rule, *next; | |
2933 | int deleted = 0; | |
2934 | ||
2935 | for (int rs = 0; rs < PF_RULESET_MAX; rs++) { | |
2936 | rule = TAILQ_FIRST(pf_main_ruleset.rules[rs].active.ptr); | |
2937 | ruleset = &pf_main_ruleset; | |
2938 | while (rule) { | |
2939 | next = TAILQ_NEXT(rule, entries); | |
2940 | /* | |
2941 | * process deleting rule only if device that added the | |
2942 | * rule matches device that issued the request | |
2943 | */ | |
2944 | if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) { | |
2945 | rule = next; | |
2946 | continue; | |
2947 | } | |
2948 | if (rule->anchor) { | |
2949 | if (((strcmp(rule->owner, owner)) == 0) || | |
2950 | ((strcmp(rule->owner, "")) == 0)) { | |
2951 | if (rule->anchor->ruleset.rules[rs].active.rcount > 0) { | |
2952 | if (deleted) { | |
2953 | pf_ruleset_cleanup(ruleset, rs); | |
2954 | deleted = 0; | |
2955 | } | |
2956 | /* step into anchor */ | |
2957 | ruleset = | |
2958 | &rule->anchor->ruleset; | |
2959 | rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr); | |
2960 | continue; | |
2961 | } else { | |
2962 | if (rule->rule_flag & | |
2963 | PFRULE_PFM) | |
2964 | pffwrules--; | |
2965 | pf_delete_rule_from_ruleset(ruleset, rs, rule); | |
2966 | deleted = 1; | |
2967 | rule = next; | |
2968 | } | |
2969 | } else | |
2970 | rule = next; | |
2971 | } else { | |
2972 | if (((strcmp(rule->owner, owner)) == 0)) { | |
2973 | /* delete rule */ | |
2974 | if (rule->rule_flag & PFRULE_PFM) | |
2975 | pffwrules--; | |
2976 | pf_delete_rule_from_ruleset(ruleset, | |
2977 | rs, rule); | |
2978 | deleted = 1; | |
2979 | } | |
2980 | rule = next; | |
2981 | } | |
2982 | if (rule == NULL) { | |
2983 | if (deleted) { | |
2984 | pf_ruleset_cleanup(ruleset, rs); | |
2985 | deleted = 0; | |
2986 | } | |
2987 | if (ruleset != &pf_main_ruleset) | |
2988 | pf_deleterule_anchor_step_out(&ruleset, | |
2989 | rs, &rule); | |
2990 | } | |
2991 | } | |
2992 | } | |
2993 | } | |
2994 | ||
2995 | static void | |
2996 | pf_deleterule_anchor_step_out(struct pf_ruleset **ruleset_ptr, | |
2997 | int rs, struct pf_rule **rule_ptr) | |
2998 | { | |
2999 | struct pf_ruleset *ruleset = *ruleset_ptr; | |
3000 | struct pf_rule *rule = *rule_ptr; | |
3001 | ||
3002 | /* step out of anchor */ | |
3003 | struct pf_ruleset *rs_copy = ruleset; | |
3004 | ruleset = ruleset->anchor->parent? | |
3005 | &ruleset->anchor->parent->ruleset:&pf_main_ruleset; | |
3006 | ||
3007 | rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr); | |
3008 | while (rule && (rule->anchor != rs_copy->anchor)) | |
3009 | rule = TAILQ_NEXT(rule, entries); | |
3010 | if (rule == NULL) | |
3011 | panic("%s: parent rule of anchor not found!", __func__); | |
3012 | if (rule->anchor->ruleset.rules[rs].active.rcount > 0) | |
3013 | rule = TAILQ_NEXT(rule, entries); | |
3014 | ||
3015 | *ruleset_ptr = ruleset; | |
3016 | *rule_ptr = rule; | |
3017 | } | |
3018 | ||
3019 | static void | |
3020 | pf_addrwrap_setup(struct pf_addr_wrap *aw) | |
3021 | { | |
3022 | VERIFY(aw); | |
3023 | bzero(&aw->p, sizeof aw->p); | |
3024 | } | |
3025 | ||
3026 | static int | |
3027 | pf_rule_setup(struct pfioc_rule *pr, struct pf_rule *rule, | |
3028 | struct pf_ruleset *ruleset) { | |
3029 | struct pf_pooladdr *apa; | |
3030 | int error = 0; | |
3031 | ||
3032 | if (rule->ifname[0]) { | |
3033 | rule->kif = pfi_kif_get(rule->ifname); | |
3034 | if (rule->kif == NULL) { | |
3035 | pool_put(&pf_rule_pl, rule); | |
3036 | return (EINVAL); | |
3037 | } | |
3038 | pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE); | |
3039 | } | |
3040 | #if PF_ALTQ | |
3041 | /* set queue IDs */ | |
3042 | if (altq_allowed && rule->qname[0] != '\0') { | |
3043 | if ((rule->qid = pf_qname2qid(rule->qname)) == 0) | |
3044 | error = EBUSY; | |
3045 | else if (rule->pqname[0] != '\0') { | |
3046 | if ((rule->pqid = | |
3047 | pf_qname2qid(rule->pqname)) == 0) | |
3048 | error = EBUSY; | |
3049 | } else | |
3050 | rule->pqid = rule->qid; | |
3051 | } | |
3052 | #endif /* PF_ALTQ */ | |
3053 | if (rule->tagname[0]) | |
3054 | if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) | |
3055 | error = EBUSY; | |
3056 | if (rule->match_tagname[0]) | |
3057 | if ((rule->match_tag = | |
3058 | pf_tagname2tag(rule->match_tagname)) == 0) | |
3059 | error = EBUSY; | |
3060 | if (rule->rt && !rule->direction) | |
3061 | error = EINVAL; | |
3062 | #if PFLOG | |
3063 | if (!rule->log) | |
3064 | rule->logif = 0; | |
3065 | if (rule->logif >= PFLOGIFS_MAX) | |
3066 | error = EINVAL; | |
3067 | #endif /* PFLOG */ | |
3068 | pf_addrwrap_setup(&rule->src.addr); | |
3069 | pf_addrwrap_setup(&rule->dst.addr); | |
3070 | if (pf_rtlabel_add(&rule->src.addr) || | |
3071 | pf_rtlabel_add(&rule->dst.addr)) | |
3072 | error = EBUSY; | |
3073 | if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) | |
3074 | error = EINVAL; | |
3075 | if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) | |
3076 | error = EINVAL; | |
3077 | if (pf_tbladdr_setup(ruleset, &rule->src.addr)) | |
3078 | error = EINVAL; | |
3079 | if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) | |
3080 | error = EINVAL; | |
3081 | if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) | |
3082 | error = EINVAL; | |
3083 | TAILQ_FOREACH(apa, &pf_pabuf, entries) | |
3084 | if (pf_tbladdr_setup(ruleset, &apa->addr)) | |
3085 | error = EINVAL; | |
3086 | ||
3087 | if (rule->overload_tblname[0]) { | |
3088 | if ((rule->overload_tbl = pfr_attach_table(ruleset, | |
3089 | rule->overload_tblname)) == NULL) | |
3090 | error = EINVAL; | |
3091 | else | |
3092 | rule->overload_tbl->pfrkt_flags |= | |
3093 | PFR_TFLAG_ACTIVE; | |
3094 | } | |
3095 | ||
3096 | pf_mv_pool(&pf_pabuf, &rule->rpool.list); | |
3097 | ||
3098 | if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || | |
3099 | (rule->action == PF_BINAT) || (rule->action == PF_NAT64)) && | |
3100 | rule->anchor == NULL) || | |
3101 | (rule->rt > PF_FASTROUTE)) && | |
3102 | (TAILQ_FIRST(&rule->rpool.list) == NULL)) | |
3103 | error = EINVAL; | |
3104 | ||
3105 | if (error) { | |
3106 | pf_rm_rule(NULL, rule); | |
3107 | return (error); | |
3108 | } | |
3109 | /* For a NAT64 rule the rule's address family is AF_INET6 whereas | |
3110 | * the address pool's family will be AF_INET | |
3111 | */ | |
3112 | rule->rpool.af = (rule->action == PF_NAT64) ? AF_INET: rule->af; | |
3113 | rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); | |
3114 | rule->evaluations = rule->packets[0] = rule->packets[1] = | |
3115 | rule->bytes[0] = rule->bytes[1] = 0; | |
3116 | ||
3117 | return (0); | |
3118 | } | |
3119 | ||
3120 | static int | |
3121 | pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p) | |
3122 | { | |
3123 | int error = 0; | |
3124 | u_int32_t req_dev = 0; | |
3125 | ||
3126 | switch (cmd) { | |
3127 | case DIOCADDRULE: { | |
3128 | struct pf_ruleset *ruleset; | |
3129 | struct pf_rule *rule, *tail; | |
3130 | int rs_num; | |
3131 | ||
3132 | pr->anchor[sizeof (pr->anchor) - 1] = '\0'; | |
3133 | pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0'; | |
3134 | ruleset = pf_find_ruleset(pr->anchor); | |
3135 | if (ruleset == NULL) { | |
3136 | error = EINVAL; | |
3137 | break; | |
3138 | } | |
3139 | rs_num = pf_get_ruleset_number(pr->rule.action); | |
3140 | if (rs_num >= PF_RULESET_MAX) { | |
3141 | error = EINVAL; | |
3142 | break; | |
3143 | } | |
3144 | if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { | |
3145 | error = EINVAL; | |
3146 | break; | |
3147 | } | |
3148 | if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) { | |
3149 | error = EBUSY; | |
3150 | break; | |
3151 | } | |
3152 | if (pr->pool_ticket != ticket_pabuf) { | |
3153 | error = EBUSY; | |
3154 | break; | |
3155 | } | |
3156 | rule = pool_get(&pf_rule_pl, PR_WAITOK); | |
3157 | if (rule == NULL) { | |
3158 | error = ENOMEM; | |
3159 | break; | |
3160 | } | |
3161 | pf_rule_copyin(&pr->rule, rule, p, minordev); | |
3162 | #if !INET | |
3163 | if (rule->af == AF_INET) { | |
3164 | pool_put(&pf_rule_pl, rule); | |
3165 | error = EAFNOSUPPORT; | |
3166 | break; | |
3167 | } | |
3168 | #endif /* INET */ | |
3169 | #if !INET6 | |
3170 | if (rule->af == AF_INET6) { | |
3171 | pool_put(&pf_rule_pl, rule); | |
3172 | error = EAFNOSUPPORT; | |
3173 | break; | |
3174 | } | |
3175 | #endif /* INET6 */ | |
3176 | tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, | |
3177 | pf_rulequeue); | |
3178 | if (tail) | |
3179 | rule->nr = tail->nr + 1; | |
3180 | else | |
3181 | rule->nr = 0; | |
3182 | ||
3183 | if ((error = pf_rule_setup(pr, rule, ruleset))) | |
3184 | break; | |
3185 | ||
3186 | TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, | |
3187 | rule, entries); | |
3188 | ruleset->rules[rs_num].inactive.rcount++; | |
3189 | if (rule->rule_flag & PFRULE_PFM) | |
3190 | pffwrules++; | |
3191 | ||
3192 | if (rule->action == PF_NAT64) | |
3193 | atomic_add_16(&pf_nat64_configured, 1); | |
3194 | break; | |
3195 | } | |
3196 | ||
3197 | case DIOCGETRULES: { | |
3198 | struct pf_ruleset *ruleset; | |
3199 | struct pf_rule *tail; | |
3200 | int rs_num; | |
3201 | ||
3202 | pr->anchor[sizeof (pr->anchor) - 1] = '\0'; | |
3203 | pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0'; | |
3204 | ruleset = pf_find_ruleset(pr->anchor); | |
3205 | if (ruleset == NULL) { | |
3206 | error = EINVAL; | |
3207 | break; | |
3208 | } | |
3209 | rs_num = pf_get_ruleset_number(pr->rule.action); | |
3210 | if (rs_num >= PF_RULESET_MAX) { | |
3211 | error = EINVAL; | |
3212 | break; | |
3213 | } | |
3214 | tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, | |
3215 | pf_rulequeue); | |
3216 | if (tail) | |
3217 | pr->nr = tail->nr + 1; | |
3218 | else | |
3219 | pr->nr = 0; | |
3220 | pr->ticket = ruleset->rules[rs_num].active.ticket; | |
3221 | break; | |
3222 | } | |
3223 | ||
3224 | case DIOCGETRULE: { | |
3225 | struct pf_ruleset *ruleset; | |
3226 | struct pf_rule *rule; | |
3227 | int rs_num, i; | |
3228 | ||
3229 | pr->anchor[sizeof (pr->anchor) - 1] = '\0'; | |
3230 | pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0'; | |
3231 | ruleset = pf_find_ruleset(pr->anchor); | |
3232 | if (ruleset == NULL) { | |
3233 | error = EINVAL; | |
3234 | break; | |
3235 | } | |
3236 | rs_num = pf_get_ruleset_number(pr->rule.action); | |
3237 | if (rs_num >= PF_RULESET_MAX) { | |
3238 | error = EINVAL; | |
3239 | break; | |
3240 | } | |
3241 | if (pr->ticket != ruleset->rules[rs_num].active.ticket) { | |
3242 | error = EBUSY; | |
3243 | break; | |
3244 | } | |
3245 | rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); | |
3246 | while ((rule != NULL) && (rule->nr != pr->nr)) | |
3247 | rule = TAILQ_NEXT(rule, entries); | |
3248 | if (rule == NULL) { | |
3249 | error = EBUSY; | |
3250 | break; | |
3251 | } | |
3252 | pf_rule_copyout(rule, &pr->rule); | |
3253 | if (pf_anchor_copyout(ruleset, rule, pr)) { | |
3254 | error = EBUSY; | |
3255 | break; | |
3256 | } | |
3257 | pfi_dynaddr_copyout(&pr->rule.src.addr); | |
3258 | pfi_dynaddr_copyout(&pr->rule.dst.addr); | |
3259 | pf_tbladdr_copyout(&pr->rule.src.addr); | |
3260 | pf_tbladdr_copyout(&pr->rule.dst.addr); | |
3261 | pf_rtlabel_copyout(&pr->rule.src.addr); | |
3262 | pf_rtlabel_copyout(&pr->rule.dst.addr); | |
3263 | for (i = 0; i < PF_SKIP_COUNT; ++i) | |
3264 | if (rule->skip[i].ptr == NULL) | |
3265 | pr->rule.skip[i].nr = -1; | |
3266 | else | |
3267 | pr->rule.skip[i].nr = | |
3268 | rule->skip[i].ptr->nr; | |
3269 | ||
3270 | if (pr->action == PF_GET_CLR_CNTR) { | |
3271 | rule->evaluations = 0; | |
3272 | rule->packets[0] = rule->packets[1] = 0; | |
3273 | rule->bytes[0] = rule->bytes[1] = 0; | |
3274 | } | |
3275 | break; | |
3276 | } | |
3277 | ||
3278 | case DIOCCHANGERULE: { | |
3279 | struct pfioc_rule *pcr = pr; | |
3280 | struct pf_ruleset *ruleset; | |
3281 | struct pf_rule *oldrule = NULL, *newrule = NULL; | |
3282 | struct pf_pooladdr *pa; | |
3283 | u_int32_t nr = 0; | |
3284 | int rs_num; | |
3285 | ||
3286 | if (!(pcr->action == PF_CHANGE_REMOVE || | |
3287 | pcr->action == PF_CHANGE_GET_TICKET) && | |
3288 | pcr->pool_ticket != ticket_pabuf) { | |
3289 | error = EBUSY; | |
3290 | break; | |
3291 | } | |
3292 | ||
3293 | if (pcr->action < PF_CHANGE_ADD_HEAD || | |
3294 | pcr->action > PF_CHANGE_GET_TICKET) { | |
3295 | error = EINVAL; | |
3296 | break; | |
3297 | } | |
3298 | pcr->anchor[sizeof (pcr->anchor) - 1] = '\0'; | |
3299 | pcr->anchor_call[sizeof (pcr->anchor_call) - 1] = '\0'; | |
3300 | ruleset = pf_find_ruleset(pcr->anchor); | |
3301 | if (ruleset == NULL) { | |
3302 | error = EINVAL; | |
3303 | break; | |
3304 | } | |
3305 | rs_num = pf_get_ruleset_number(pcr->rule.action); | |
3306 | if (rs_num >= PF_RULESET_MAX) { | |
3307 | error = EINVAL; | |
3308 | break; | |
3309 | } | |
3310 | ||
3311 | if (pcr->action == PF_CHANGE_GET_TICKET) { | |
3312 | pcr->ticket = ++ruleset->rules[rs_num].active.ticket; | |
3313 | break; | |
3314 | } else { | |
3315 | if (pcr->ticket != | |
3316 | ruleset->rules[rs_num].active.ticket) { | |
3317 | error = EINVAL; | |
3318 | break; | |
3319 | } | |
3320 | if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { | |
3321 | error = EINVAL; | |
3322 | break; | |
3323 | } | |
3324 | } | |
3325 | ||
3326 | if (pcr->action != PF_CHANGE_REMOVE) { | |
3327 | newrule = pool_get(&pf_rule_pl, PR_WAITOK); | |
3328 | if (newrule == NULL) { | |
3329 | error = ENOMEM; | |
3330 | break; | |
3331 | } | |
3332 | pf_rule_copyin(&pcr->rule, newrule, p, minordev); | |
3333 | #if !INET | |
3334 | if (newrule->af == AF_INET) { | |
3335 | pool_put(&pf_rule_pl, newrule); | |
3336 | error = EAFNOSUPPORT; | |
3337 | break; | |
3338 | } | |
3339 | #endif /* INET */ | |
3340 | #if !INET6 | |
3341 | if (newrule->af == AF_INET6) { | |
3342 | pool_put(&pf_rule_pl, newrule); | |
3343 | error = EAFNOSUPPORT; | |
3344 | break; | |
3345 | } | |
3346 | #endif /* INET6 */ | |
3347 | if (newrule->ifname[0]) { | |
3348 | newrule->kif = pfi_kif_get(newrule->ifname); | |
3349 | if (newrule->kif == NULL) { | |
3350 | pool_put(&pf_rule_pl, newrule); | |
3351 | error = EINVAL; | |
3352 | break; | |
3353 | } | |
3354 | pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE); | |
3355 | } else | |
3356 | newrule->kif = NULL; | |
3357 | ||
3358 | #if PF_ALTQ | |
3359 | /* set queue IDs */ | |
3360 | if (altq_allowed && newrule->qname[0] != '\0') { | |
3361 | if ((newrule->qid = | |
3362 | pf_qname2qid(newrule->qname)) == 0) | |
3363 | error = EBUSY; | |
3364 | else if (newrule->pqname[0] != '\0') { | |
3365 | if ((newrule->pqid = | |
3366 | pf_qname2qid(newrule->pqname)) == 0) | |
3367 | error = EBUSY; | |
3368 | } else | |
3369 | newrule->pqid = newrule->qid; | |
3370 | } | |
3371 | #endif /* PF_ALTQ */ | |
3372 | if (newrule->tagname[0]) | |
3373 | if ((newrule->tag = | |
3374 | pf_tagname2tag(newrule->tagname)) == 0) | |
3375 | error = EBUSY; | |
3376 | if (newrule->match_tagname[0]) | |
3377 | if ((newrule->match_tag = pf_tagname2tag( | |
3378 | newrule->match_tagname)) == 0) | |
3379 | error = EBUSY; | |
3380 | if (newrule->rt && !newrule->direction) | |
3381 | error = EINVAL; | |
3382 | #if PFLOG | |
3383 | if (!newrule->log) | |
3384 | newrule->logif = 0; | |
3385 | if (newrule->logif >= PFLOGIFS_MAX) | |
3386 | error = EINVAL; | |
3387 | #endif /* PFLOG */ | |
3388 | pf_addrwrap_setup(&newrule->src.addr); | |
3389 | pf_addrwrap_setup(&newrule->dst.addr); | |
3390 | if (pf_rtlabel_add(&newrule->src.addr) || | |
3391 | pf_rtlabel_add(&newrule->dst.addr)) | |
3392 | error = EBUSY; | |
3393 | if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) | |
3394 | error = EINVAL; | |
3395 | if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) | |
3396 | error = EINVAL; | |
3397 | if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) | |
3398 | error = EINVAL; | |
3399 | if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) | |
3400 | error = EINVAL; | |
3401 | if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) | |
3402 | error = EINVAL; | |
3403 | TAILQ_FOREACH(pa, &pf_pabuf, entries) | |
3404 | if (pf_tbladdr_setup(ruleset, &pa->addr)) | |
3405 | error = EINVAL; | |
3406 | ||
3407 | if (newrule->overload_tblname[0]) { | |
3408 | if ((newrule->overload_tbl = pfr_attach_table( | |
3409 | ruleset, newrule->overload_tblname)) == | |
3410 | NULL) | |
3411 | error = EINVAL; | |
3412 | else | |
3413 | newrule->overload_tbl->pfrkt_flags |= | |
3414 | PFR_TFLAG_ACTIVE; | |
3415 | } | |
3416 | ||
3417 | pf_mv_pool(&pf_pabuf, &newrule->rpool.list); | |
3418 | if (((((newrule->action == PF_NAT) || | |
3419 | (newrule->action == PF_RDR) || | |
3420 | (newrule->action == PF_BINAT) || | |
3421 | (newrule->rt > PF_FASTROUTE)) && | |
3422 | !newrule->anchor)) && | |
3423 | (TAILQ_FIRST(&newrule->rpool.list) == NULL)) | |
3424 | error = EINVAL; | |
3425 | ||
3426 | if (error) { | |
3427 | pf_rm_rule(NULL, newrule); | |
3428 | break; | |
3429 | } | |
3430 | newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); | |
3431 | newrule->evaluations = 0; | |
3432 | newrule->packets[0] = newrule->packets[1] = 0; | |
3433 | newrule->bytes[0] = newrule->bytes[1] = 0; | |
3434 | } | |
3435 | pf_empty_pool(&pf_pabuf); | |
3436 | ||
3437 | if (pcr->action == PF_CHANGE_ADD_HEAD) | |
3438 | oldrule = TAILQ_FIRST( | |
3439 | ruleset->rules[rs_num].active.ptr); | |
3440 | else if (pcr->action == PF_CHANGE_ADD_TAIL) | |
3441 | oldrule = TAILQ_LAST( | |
3442 | ruleset->rules[rs_num].active.ptr, pf_rulequeue); | |
3443 | else { | |
3444 | oldrule = TAILQ_FIRST( | |
3445 | ruleset->rules[rs_num].active.ptr); | |
3446 | while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) | |
3447 | oldrule = TAILQ_NEXT(oldrule, entries); | |
3448 | if (oldrule == NULL) { | |
3449 | if (newrule != NULL) | |
3450 | pf_rm_rule(NULL, newrule); | |
3451 | error = EINVAL; | |
3452 | break; | |
3453 | } | |
3454 | } | |
3455 | ||
3456 | if (pcr->action == PF_CHANGE_REMOVE) { | |
3457 | pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule); | |
3458 | ruleset->rules[rs_num].active.rcount--; | |
3459 | } else { | |
3460 | if (oldrule == NULL) | |
3461 | TAILQ_INSERT_TAIL( | |
3462 | ruleset->rules[rs_num].active.ptr, | |
3463 | newrule, entries); | |
3464 | else if (pcr->action == PF_CHANGE_ADD_HEAD || | |
3465 | pcr->action == PF_CHANGE_ADD_BEFORE) | |
3466 | TAILQ_INSERT_BEFORE(oldrule, newrule, entries); | |
3467 | else | |
3468 | TAILQ_INSERT_AFTER( | |
3469 | ruleset->rules[rs_num].active.ptr, | |
3470 | oldrule, newrule, entries); | |
3471 | ruleset->rules[rs_num].active.rcount++; | |
3472 | } | |
3473 | ||
3474 | nr = 0; | |
3475 | TAILQ_FOREACH(oldrule, | |
3476 | ruleset->rules[rs_num].active.ptr, entries) | |
3477 | oldrule->nr = nr++; | |
3478 | ||
3479 | ruleset->rules[rs_num].active.ticket++; | |
3480 | ||
3481 | pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); | |
3482 | pf_remove_if_empty_ruleset(ruleset); | |
3483 | ||
3484 | break; | |
3485 | } | |
3486 | ||
3487 | case DIOCINSERTRULE: { | |
3488 | struct pf_ruleset *ruleset; | |
3489 | struct pf_rule *rule, *tail, *r; | |
3490 | int rs_num; | |
3491 | int is_anchor; | |
3492 | ||
3493 | pr->anchor[sizeof (pr->anchor) - 1] = '\0'; | |
3494 | pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0'; | |
3495 | is_anchor = (pr->anchor_call[0] != '\0'); | |
3496 | ||
3497 | if ((ruleset = pf_find_ruleset_with_owner(pr->anchor, | |
3498 | pr->rule.owner, is_anchor, &error)) == NULL) | |
3499 | break; | |
3500 | ||
3501 | rs_num = pf_get_ruleset_number(pr->rule.action); | |
3502 | if (rs_num >= PF_RULESET_MAX) { | |
3503 | error = EINVAL; | |
3504 | break; | |
3505 | } | |
3506 | if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { | |
3507 | error = EINVAL; | |
3508 | break; | |
3509 | } | |
3510 | ||
3511 | /* make sure this anchor rule doesn't exist already */ | |
3512 | if (is_anchor) { | |
3513 | r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); | |
3514 | while (r) { | |
3515 | if (r->anchor && | |
3516 | ((strcmp(r->anchor->name, | |
3517 | pr->anchor_call)) == 0)) { | |
3518 | if (((strcmp(pr->rule.owner, | |
3519 | r->owner)) == 0) || | |
3520 | ((strcmp(r->owner, "")) == 0)) | |
3521 | error = EEXIST; | |
3522 | else | |
3523 | error = EPERM; | |
3524 | break; | |
3525 | } | |
3526 | r = TAILQ_NEXT(r, entries); | |
3527 | } | |
3528 | if (error != 0) | |
3529 | return (error); | |
3530 | } | |
3531 | ||
3532 | rule = pool_get(&pf_rule_pl, PR_WAITOK); | |
3533 | if (rule == NULL) { | |
3534 | error = ENOMEM; | |
3535 | break; | |
3536 | } | |
3537 | pf_rule_copyin(&pr->rule, rule, p, minordev); | |
3538 | #if !INET | |
3539 | if (rule->af == AF_INET) { | |
3540 | pool_put(&pf_rule_pl, rule); | |
3541 | error = EAFNOSUPPORT; | |
3542 | break; | |
3543 | } | |
3544 | #endif /* INET */ | |
3545 | #if !INET6 | |
3546 | if (rule->af == AF_INET6) { | |
3547 | pool_put(&pf_rule_pl, rule); | |
3548 | error = EAFNOSUPPORT; | |
3549 | break; | |
3550 | } | |
3551 | ||
3552 | #endif /* INET6 */ | |
3553 | r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); | |
3554 | while ((r != NULL) && (rule->priority >= (unsigned)r->priority)) | |
3555 | r = TAILQ_NEXT(r, entries); | |
3556 | if (r == NULL) { | |
3557 | if ((tail = | |
3558 | TAILQ_LAST(ruleset->rules[rs_num].active.ptr, | |
3559 | pf_rulequeue)) != NULL) | |
3560 | rule->nr = tail->nr + 1; | |
3561 | else | |
3562 | rule->nr = 0; | |
3563 | } else { | |
3564 | rule->nr = r->nr; | |
3565 | } | |
3566 | ||
3567 | if ((error = pf_rule_setup(pr, rule, ruleset))) | |
3568 | break; | |
3569 | ||
3570 | if (rule->anchor != NULL) | |
3571 | strlcpy(rule->anchor->owner, rule->owner, | |
3572 | PF_OWNER_NAME_SIZE); | |
3573 | ||
3574 | if (r) { | |
3575 | TAILQ_INSERT_BEFORE(r, rule, entries); | |
3576 | while (r && ++r->nr) | |
3577 | r = TAILQ_NEXT(r, entries); | |
3578 | } else | |
3579 | TAILQ_INSERT_TAIL(ruleset->rules[rs_num].active.ptr, | |
3580 | rule, entries); | |
3581 | ruleset->rules[rs_num].active.rcount++; | |
3582 | ||
3583 | /* Calculate checksum for the main ruleset */ | |
3584 | if (ruleset == &pf_main_ruleset) | |
3585 | error = pf_setup_pfsync_matching(ruleset); | |
3586 | ||
3587 | pf_ruleset_cleanup(ruleset, rs_num); | |
3588 | rule->ticket = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)rule); | |
3589 | ||
3590 | pr->rule.ticket = rule->ticket; | |
3591 | pf_rule_copyout(rule, &pr->rule); | |
3592 | if (rule->rule_flag & PFRULE_PFM) | |
3593 | pffwrules++; | |
3594 | if (rule->action == PF_NAT64) | |
3595 | atomic_add_16(&pf_nat64_configured, 1); | |
3596 | break; | |
3597 | } | |
3598 | ||
3599 | case DIOCDELETERULE: { | |
3600 | pr->anchor[sizeof (pr->anchor) - 1] = '\0'; | |
3601 | pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0'; | |
3602 | ||
3603 | if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { | |
3604 | error = EINVAL; | |
3605 | break; | |
3606 | } | |
3607 | ||
3608 | /* get device through which request is made */ | |
3609 | if ((uint8_t)minordev == PFDEV_PFM) | |
3610 | req_dev |= PFRULE_PFM; | |
3611 | ||
3612 | if (pr->rule.ticket) { | |
3613 | if ((error = pf_delete_rule_by_ticket(pr, req_dev))) | |
3614 | break; | |
3615 | } else | |
3616 | pf_delete_rule_by_owner(pr->rule.owner, req_dev); | |
3617 | pr->nr = pffwrules; | |
3618 | if (pr->rule.action == PF_NAT64) | |
3619 | atomic_add_16(&pf_nat64_configured, -1); | |
3620 | break; | |
3621 | } | |
3622 | ||
3623 | default: | |
3624 | VERIFY(0); | |
3625 | /* NOTREACHED */ | |
3626 | } | |
3627 | ||
3628 | return (error); | |
3629 | } | |
3630 | ||
3631 | static int | |
3632 | pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p) | |
3633 | { | |
3634 | #pragma unused(p) | |
3635 | int error = 0; | |
3636 | ||
3637 | psk->psk_ifname[sizeof (psk->psk_ifname) - 1] = '\0'; | |
3638 | psk->psk_ownername[sizeof(psk->psk_ownername) - 1] = '\0'; | |
3639 | ||
3640 | bool ifname_matched = true; | |
3641 | bool owner_matched = true; | |
3642 | ||
3643 | switch (cmd) { | |
3644 | case DIOCCLRSTATES: { | |
3645 | struct pf_state *s, *nexts; | |
3646 | int killed = 0; | |
3647 | ||
3648 | for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) { | |
3649 | nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); | |
3650 | /* | |
3651 | * Purge all states only when neither ifname | |
3652 | * or owner is provided. If any of these are provided | |
3653 | * we purge only the states with meta data that match | |
3654 | */ | |
3655 | bool unlink_state = false; | |
3656 | ifname_matched = true; | |
3657 | owner_matched = true; | |
3658 | ||
3659 | if (psk->psk_ifname[0] && | |
3660 | strcmp(psk->psk_ifname, s->kif->pfik_name)) { | |
3661 | ifname_matched = false; | |
3662 | } | |
3663 | ||
3664 | if (psk->psk_ownername[0] && | |
3665 | ((NULL == s->rule.ptr) || | |
3666 | strcmp(psk->psk_ownername, s->rule.ptr->owner))) { | |
3667 | owner_matched = false; | |
3668 | } | |
3669 | ||
3670 | unlink_state = ifname_matched && owner_matched; | |
3671 | ||
3672 | if (unlink_state) { | |
3673 | #if NPFSYNC | |
3674 | /* don't send out individual delete messages */ | |
3675 | s->sync_flags = PFSTATE_NOSYNC; | |
3676 | #endif | |
3677 | pf_unlink_state(s); | |
3678 | killed++; | |
3679 | } | |
3680 | } | |
3681 | psk->psk_af = killed; | |
3682 | #if NPFSYNC | |
3683 | pfsync_clear_states(pf_status.hostid, psk->psk_ifname); | |
3684 | #endif | |
3685 | break; | |
3686 | } | |
3687 | ||
3688 | case DIOCKILLSTATES: { | |
3689 | struct pf_state *s, *nexts; | |
3690 | struct pf_state_key *sk; | |
3691 | struct pf_state_host *src, *dst; | |
3692 | int killed = 0; | |
3693 | ||
3694 | for (s = RB_MIN(pf_state_tree_id, &tree_id); s; | |
3695 | s = nexts) { | |
3696 | nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); | |
3697 | sk = s->state_key; | |
3698 | ifname_matched = true; | |
3699 | owner_matched = true; | |
3700 | ||
3701 | if (psk->psk_ifname[0] && | |
3702 | strcmp(psk->psk_ifname, s->kif->pfik_name)) { | |
3703 | ifname_matched = false; | |
3704 | } | |
3705 | ||
3706 | if (psk->psk_ownername[0] && | |
3707 | ((NULL == s->rule.ptr) || | |
3708 | strcmp(psk->psk_ownername, s->rule.ptr->owner))) { | |
3709 | owner_matched = false; | |
3710 | } | |
3711 | ||
3712 | if (sk->direction == PF_OUT) { | |
3713 | src = &sk->lan; | |
3714 | dst = &sk->ext_lan; | |
3715 | } else { | |
3716 | src = &sk->ext_lan; | |
3717 | dst = &sk->lan; | |
3718 | } | |
3719 | if ((!psk->psk_af || sk->af_lan == psk->psk_af) && | |
3720 | (!psk->psk_proto || psk->psk_proto == sk->proto) && | |
3721 | PF_MATCHA(psk->psk_src.neg, | |
3722 | &psk->psk_src.addr.v.a.addr, | |
3723 | &psk->psk_src.addr.v.a.mask, | |
3724 | &src->addr, sk->af_lan) && | |
3725 | PF_MATCHA(psk->psk_dst.neg, | |
3726 | &psk->psk_dst.addr.v.a.addr, | |
3727 | &psk->psk_dst.addr.v.a.mask, | |
3728 | &dst->addr, sk->af_lan) && | |
3729 | (pf_match_xport(psk->psk_proto, | |
3730 | psk->psk_proto_variant, &psk->psk_src.xport, | |
3731 | &src->xport)) && | |
3732 | (pf_match_xport(psk->psk_proto, | |
3733 | psk->psk_proto_variant, &psk->psk_dst.xport, | |
3734 | &dst->xport)) && | |
3735 | ifname_matched && | |
3736 | owner_matched) { | |
3737 | #if NPFSYNC | |
3738 | /* send immediate delete of state */ | |
3739 | pfsync_delete_state(s); | |
3740 | s->sync_flags |= PFSTATE_NOSYNC; | |
3741 | #endif | |
3742 | pf_unlink_state(s); | |
3743 | killed++; | |
3744 | } | |
3745 | } | |
3746 | psk->psk_af = killed; | |
3747 | break; | |
3748 | } | |
3749 | ||
3750 | default: | |
3751 | VERIFY(0); | |
3752 | /* NOTREACHED */ | |
3753 | } | |
3754 | ||
3755 | return (error); | |
3756 | } | |
3757 | ||
3758 | static int | |
3759 | pfioctl_ioc_state(u_long cmd, struct pfioc_state *ps, struct proc *p) | |
3760 | { | |
3761 | #pragma unused(p) | |
3762 | int error = 0; | |
3763 | ||
3764 | switch (cmd) { | |
3765 | case DIOCADDSTATE: { | |
3766 | struct pfsync_state *sp = &ps->state; | |
3767 | struct pf_state *s; | |
3768 | struct pf_state_key *sk; | |
3769 | struct pfi_kif *kif; | |
3770 | ||
3771 | if (sp->timeout >= PFTM_MAX) { | |
3772 | error = EINVAL; | |
3773 | break; | |
3774 | } | |
3775 | s = pool_get(&pf_state_pl, PR_WAITOK); | |
3776 | if (s == NULL) { | |
3777 | error = ENOMEM; | |
3778 | break; | |
3779 | } | |
3780 | bzero(s, sizeof (struct pf_state)); | |
3781 | if ((sk = pf_alloc_state_key(s, NULL)) == NULL) { | |
3782 | pool_put(&pf_state_pl, s); | |
3783 | error = ENOMEM; | |
3784 | break; | |
3785 | } | |
3786 | pf_state_import(sp, sk, s); | |
3787 | kif = pfi_kif_get(sp->ifname); | |
3788 | if (kif == NULL) { | |
3789 | pool_put(&pf_state_pl, s); | |
3790 | pool_put(&pf_state_key_pl, sk); | |
3791 | error = ENOENT; | |
3792 | break; | |
3793 | } | |
3794 | TAILQ_INIT(&s->unlink_hooks); | |
3795 | s->state_key->app_state = 0; | |
3796 | if (pf_insert_state(kif, s)) { | |
3797 | pfi_kif_unref(kif, PFI_KIF_REF_NONE); | |
3798 | pool_put(&pf_state_pl, s); | |
3799 | error = EEXIST; | |
3800 | break; | |
3801 | } | |
3802 | pf_default_rule.states++; | |
3803 | VERIFY(pf_default_rule.states != 0); | |
3804 | break; | |
3805 | } | |
3806 | ||
3807 | case DIOCGETSTATE: { | |
3808 | struct pf_state *s; | |
3809 | struct pf_state_cmp id_key; | |
3810 | ||
3811 | bcopy(ps->state.id, &id_key.id, sizeof (id_key.id)); | |
3812 | id_key.creatorid = ps->state.creatorid; | |
3813 | ||
3814 | s = pf_find_state_byid(&id_key); | |
3815 | if (s == NULL) { | |
3816 | error = ENOENT; | |
3817 | break; | |
3818 | } | |
3819 | ||
3820 | pf_state_export(&ps->state, s->state_key, s); | |
3821 | break; | |
3822 | } | |
3823 | ||
3824 | default: | |
3825 | VERIFY(0); | |
3826 | /* NOTREACHED */ | |
3827 | } | |
3828 | ||
3829 | return (error); | |
3830 | } | |
3831 | ||
3832 | static int | |
3833 | pfioctl_ioc_states(u_long cmd, struct pfioc_states_32 *ps32, | |
3834 | struct pfioc_states_64 *ps64, struct proc *p) | |
3835 | { | |
3836 | int p64 = proc_is64bit(p); | |
3837 | int error = 0; | |
3838 | ||
3839 | switch (cmd) { | |
3840 | case DIOCGETSTATES: { /* struct pfioc_states */ | |
3841 | struct pf_state *state; | |
3842 | struct pfsync_state *pstore; | |
3843 | user_addr_t buf; | |
3844 | u_int32_t nr = 0; | |
3845 | int len, size; | |
3846 | ||
3847 | len = (p64 ? ps64->ps_len : ps32->ps_len); | |
3848 | if (len == 0) { | |
3849 | size = sizeof (struct pfsync_state) * pf_status.states; | |
3850 | if (p64) | |
3851 | ps64->ps_len = size; | |
3852 | else | |
3853 | ps32->ps_len = size; | |
3854 | break; | |
3855 | } | |
3856 | ||
3857 | pstore = _MALLOC(sizeof (*pstore), M_TEMP, M_WAITOK); | |
3858 | if (pstore == NULL) { | |
3859 | error = ENOMEM; | |
3860 | break; | |
3861 | } | |
3862 | buf = (p64 ? ps64->ps_buf : ps32->ps_buf); | |
3863 | ||
3864 | state = TAILQ_FIRST(&state_list); | |
3865 | while (state) { | |
3866 | if (state->timeout != PFTM_UNLINKED) { | |
3867 | if ((nr + 1) * sizeof (*pstore) > (unsigned)len) | |
3868 | break; | |
3869 | ||
3870 | pf_state_export(pstore, | |
3871 | state->state_key, state); | |
3872 | error = copyout(pstore, buf, sizeof (*pstore)); | |
3873 | if (error) { | |
3874 | _FREE(pstore, M_TEMP); | |
3875 | goto fail; | |
3876 | } | |
3877 | buf += sizeof (*pstore); | |
3878 | nr++; | |
3879 | } | |
3880 | state = TAILQ_NEXT(state, entry_list); | |
3881 | } | |
3882 | ||
3883 | size = sizeof (struct pfsync_state) * nr; | |
3884 | if (p64) | |
3885 | ps64->ps_len = size; | |
3886 | else | |
3887 | ps32->ps_len = size; | |
3888 | ||
3889 | _FREE(pstore, M_TEMP); | |
3890 | break; | |
3891 | } | |
3892 | ||
3893 | default: | |
3894 | VERIFY(0); | |
3895 | /* NOTREACHED */ | |
3896 | } | |
3897 | fail: | |
3898 | return (error); | |
3899 | } | |
3900 | ||
3901 | static int | |
3902 | pfioctl_ioc_natlook(u_long cmd, struct pfioc_natlook *pnl, struct proc *p) | |
3903 | { | |
3904 | #pragma unused(p) | |
3905 | int error = 0; | |
3906 | ||
3907 | switch (cmd) { | |
3908 | case DIOCNATLOOK: { | |
3909 | struct pf_state_key *sk; | |
3910 | struct pf_state *state; | |
3911 | struct pf_state_key_cmp key; | |
3912 | int m = 0, direction = pnl->direction; | |
3913 | ||
3914 | key.proto = pnl->proto; | |
3915 | key.proto_variant = pnl->proto_variant; | |
3916 | ||
3917 | if (!pnl->proto || | |
3918 | PF_AZERO(&pnl->saddr, pnl->af) || | |
3919 | PF_AZERO(&pnl->daddr, pnl->af) || | |
3920 | ((pnl->proto == IPPROTO_TCP || | |
3921 | pnl->proto == IPPROTO_UDP) && | |
3922 | (!pnl->dxport.port || !pnl->sxport.port))) | |
3923 | error = EINVAL; | |
3924 | else { | |
3925 | /* | |
3926 | * userland gives us source and dest of connection, | |
3927 | * reverse the lookup so we ask for what happens with | |
3928 | * the return traffic, enabling us to find it in the | |
3929 | * state tree. | |
3930 | */ | |
3931 | if (direction == PF_IN) { | |
3932 | key.af_gwy = pnl->af; | |
3933 | PF_ACPY(&key.ext_gwy.addr, &pnl->daddr, | |
3934 | pnl->af); | |
3935 | memcpy(&key.ext_gwy.xport, &pnl->dxport, | |
3936 | sizeof (key.ext_gwy.xport)); | |
3937 | PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af); | |
3938 | memcpy(&key.gwy.xport, &pnl->sxport, | |
3939 | sizeof (key.gwy.xport)); | |
3940 | state = pf_find_state_all(&key, PF_IN, &m); | |
3941 | } else { | |
3942 | key.af_lan = pnl->af; | |
3943 | PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af); | |
3944 | memcpy(&key.lan.xport, &pnl->dxport, | |
3945 | sizeof (key.lan.xport)); | |
3946 | PF_ACPY(&key.ext_lan.addr, &pnl->saddr, | |
3947 | pnl->af); | |
3948 | memcpy(&key.ext_lan.xport, &pnl->sxport, | |
3949 | sizeof (key.ext_lan.xport)); | |
3950 | state = pf_find_state_all(&key, PF_OUT, &m); | |
3951 | } | |
3952 | if (m > 1) | |
3953 | error = E2BIG; /* more than one state */ | |
3954 | else if (state != NULL) { | |
3955 | sk = state->state_key; | |
3956 | if (direction == PF_IN) { | |
3957 | PF_ACPY(&pnl->rsaddr, &sk->lan.addr, | |
3958 | sk->af_lan); | |
3959 | memcpy(&pnl->rsxport, &sk->lan.xport, | |
3960 | sizeof (pnl->rsxport)); | |
3961 | PF_ACPY(&pnl->rdaddr, &pnl->daddr, | |
3962 | pnl->af); | |
3963 | memcpy(&pnl->rdxport, &pnl->dxport, | |
3964 | sizeof (pnl->rdxport)); | |
3965 | } else { | |
3966 | PF_ACPY(&pnl->rdaddr, &sk->gwy.addr, | |
3967 | sk->af_gwy); | |
3968 | memcpy(&pnl->rdxport, &sk->gwy.xport, | |
3969 | sizeof (pnl->rdxport)); | |
3970 | PF_ACPY(&pnl->rsaddr, &pnl->saddr, | |
3971 | pnl->af); | |
3972 | memcpy(&pnl->rsxport, &pnl->sxport, | |
3973 | sizeof (pnl->rsxport)); | |
3974 | } | |
3975 | } else | |
3976 | error = ENOENT; | |
3977 | } | |
3978 | break; | |
3979 | } | |
3980 | ||
3981 | default: | |
3982 | VERIFY(0); | |
3983 | /* NOTREACHED */ | |
3984 | } | |
3985 | ||
3986 | return (error); | |
3987 | } | |
3988 | ||
3989 | static int | |
3990 | pfioctl_ioc_tm(u_long cmd, struct pfioc_tm *pt, struct proc *p) | |
3991 | { | |
3992 | #pragma unused(p) | |
3993 | int error = 0; | |
3994 | ||
3995 | switch (cmd) { | |
3996 | case DIOCSETTIMEOUT: { | |
3997 | int old; | |
3998 | ||
3999 | if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || | |
4000 | pt->seconds < 0) { | |
4001 | error = EINVAL; | |
4002 | goto fail; | |
4003 | } | |
4004 | old = pf_default_rule.timeout[pt->timeout]; | |
4005 | if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) | |
4006 | pt->seconds = 1; | |
4007 | pf_default_rule.timeout[pt->timeout] = pt->seconds; | |
4008 | if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) | |
4009 | wakeup(pf_purge_thread_fn); | |
4010 | pt->seconds = old; | |
4011 | break; | |
4012 | } | |
4013 | ||
4014 | case DIOCGETTIMEOUT: { | |
4015 | if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { | |
4016 | error = EINVAL; | |
4017 | goto fail; | |
4018 | } | |
4019 | pt->seconds = pf_default_rule.timeout[pt->timeout]; | |
4020 | break; | |
4021 | } | |
4022 | ||
4023 | default: | |
4024 | VERIFY(0); | |
4025 | /* NOTREACHED */ | |
4026 | } | |
4027 | fail: | |
4028 | return (error); | |
4029 | } | |
4030 | ||
4031 | static int | |
4032 | pfioctl_ioc_limit(u_long cmd, struct pfioc_limit *pl, struct proc *p) | |
4033 | { | |
4034 | #pragma unused(p) | |
4035 | int error = 0; | |
4036 | ||
4037 | switch (cmd) { | |
4038 | case DIOCGETLIMIT: { | |
4039 | ||
4040 | if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { | |
4041 | error = EINVAL; | |
4042 | goto fail; | |
4043 | } | |
4044 | pl->limit = pf_pool_limits[pl->index].limit; | |
4045 | break; | |
4046 | } | |
4047 | ||
4048 | case DIOCSETLIMIT: { | |
4049 | int old_limit; | |
4050 | ||
4051 | if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || | |
4052 | pf_pool_limits[pl->index].pp == NULL) { | |
4053 | error = EINVAL; | |
4054 | goto fail; | |
4055 | } | |
4056 | pool_sethardlimit(pf_pool_limits[pl->index].pp, | |
4057 | pl->limit, NULL, 0); | |
4058 | old_limit = pf_pool_limits[pl->index].limit; | |
4059 | pf_pool_limits[pl->index].limit = pl->limit; | |
4060 | pl->limit = old_limit; | |
4061 | break; | |
4062 | } | |
4063 | ||
4064 | default: | |
4065 | VERIFY(0); | |
4066 | /* NOTREACHED */ | |
4067 | } | |
4068 | fail: | |
4069 | return (error); | |
4070 | } | |
4071 | ||
4072 | static int | |
4073 | pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p) | |
4074 | { | |
4075 | #pragma unused(p) | |
4076 | struct pf_pooladdr *pa = NULL; | |
4077 | struct pf_pool *pool = NULL; | |
4078 | int error = 0; | |
4079 | ||
4080 | switch (cmd) { | |
4081 | case DIOCBEGINADDRS: { | |
4082 | pf_empty_pool(&pf_pabuf); | |
4083 | pp->ticket = ++ticket_pabuf; | |
4084 | break; | |
4085 | } | |
4086 | ||
4087 | case DIOCADDADDR: { | |
4088 | pp->anchor[sizeof (pp->anchor) - 1] = '\0'; | |
4089 | if (pp->ticket != ticket_pabuf) { | |
4090 | error = EBUSY; | |
4091 | break; | |
4092 | } | |
4093 | #if !INET | |
4094 | if (pp->af == AF_INET) { | |
4095 | error = EAFNOSUPPORT; | |
4096 | break; | |
4097 | } | |
4098 | #endif /* INET */ | |
4099 | #if !INET6 | |
4100 | if (pp->af == AF_INET6) { | |
4101 | error = EAFNOSUPPORT; | |
4102 | break; | |
4103 | } | |
4104 | #endif /* INET6 */ | |
4105 | if (pp->addr.addr.type != PF_ADDR_ADDRMASK && | |
4106 | pp->addr.addr.type != PF_ADDR_DYNIFTL && | |
4107 | pp->addr.addr.type != PF_ADDR_TABLE) { | |
4108 | error = EINVAL; | |
4109 | break; | |
4110 | } | |
4111 | pa = pool_get(&pf_pooladdr_pl, PR_WAITOK); | |
4112 | if (pa == NULL) { | |
4113 | error = ENOMEM; | |
4114 | break; | |
4115 | } | |
4116 | pf_pooladdr_copyin(&pp->addr, pa); | |
4117 | if (pa->ifname[0]) { | |
4118 | pa->kif = pfi_kif_get(pa->ifname); | |
4119 | if (pa->kif == NULL) { | |
4120 | pool_put(&pf_pooladdr_pl, pa); | |
4121 | error = EINVAL; | |
4122 | break; | |
4123 | } | |
4124 | pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE); | |
4125 | } | |
4126 | pf_addrwrap_setup(&pa->addr); | |
4127 | if (pfi_dynaddr_setup(&pa->addr, pp->af)) { | |
4128 | pfi_dynaddr_remove(&pa->addr); | |
4129 | pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE); | |
4130 | pool_put(&pf_pooladdr_pl, pa); | |
4131 | error = EINVAL; | |
4132 | break; | |
4133 | } | |
4134 | TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries); | |
4135 | break; | |
4136 | } | |
4137 | ||
4138 | case DIOCGETADDRS: { | |
4139 | pp->nr = 0; | |
4140 | pp->anchor[sizeof (pp->anchor) - 1] = '\0'; | |
4141 | pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, | |
4142 | pp->r_num, 0, 1, 0); | |
4143 | if (pool == NULL) { | |
4144 | error = EBUSY; | |
4145 | break; | |
4146 | } | |
4147 | TAILQ_FOREACH(pa, &pool->list, entries) | |
4148 | pp->nr++; | |
4149 | break; | |
4150 | } | |
4151 | ||
4152 | case DIOCGETADDR: { | |
4153 | u_int32_t nr = 0; | |
4154 | ||
4155 | pp->anchor[sizeof (pp->anchor) - 1] = '\0'; | |
4156 | pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, | |
4157 | pp->r_num, 0, 1, 1); | |
4158 | if (pool == NULL) { | |
4159 | error = EBUSY; | |
4160 | break; | |
4161 | } | |
4162 | pa = TAILQ_FIRST(&pool->list); | |
4163 | while ((pa != NULL) && (nr < pp->nr)) { | |
4164 | pa = TAILQ_NEXT(pa, entries); | |
4165 | nr++; | |
4166 | } | |
4167 | if (pa == NULL) { | |
4168 | error = EBUSY; | |
4169 | break; | |
4170 | } | |
4171 | pf_pooladdr_copyout(pa, &pp->addr); | |
4172 | pfi_dynaddr_copyout(&pp->addr.addr); | |
4173 | pf_tbladdr_copyout(&pp->addr.addr); | |
4174 | pf_rtlabel_copyout(&pp->addr.addr); | |
4175 | break; | |
4176 | } | |
4177 | ||
4178 | case DIOCCHANGEADDR: { | |
4179 | struct pfioc_pooladdr *pca = pp; | |
4180 | struct pf_pooladdr *oldpa = NULL, *newpa = NULL; | |
4181 | struct pf_ruleset *ruleset; | |
4182 | ||
4183 | if (pca->action < PF_CHANGE_ADD_HEAD || | |
4184 | pca->action > PF_CHANGE_REMOVE) { | |
4185 | error = EINVAL; | |
4186 | break; | |
4187 | } | |
4188 | if (pca->addr.addr.type != PF_ADDR_ADDRMASK && | |
4189 | pca->addr.addr.type != PF_ADDR_DYNIFTL && | |
4190 | pca->addr.addr.type != PF_ADDR_TABLE) { | |
4191 | error = EINVAL; | |
4192 | break; | |
4193 | } | |
4194 | ||
4195 | pca->anchor[sizeof (pca->anchor) - 1] = '\0'; | |
4196 | ruleset = pf_find_ruleset(pca->anchor); | |
4197 | if (ruleset == NULL) { | |
4198 | error = EBUSY; | |
4199 | break; | |
4200 | } | |
4201 | pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action, | |
4202 | pca->r_num, pca->r_last, 1, 1); | |
4203 | if (pool == NULL) { | |
4204 | error = EBUSY; | |
4205 | break; | |
4206 | } | |
4207 | if (pca->action != PF_CHANGE_REMOVE) { | |
4208 | newpa = pool_get(&pf_pooladdr_pl, PR_WAITOK); | |
4209 | if (newpa == NULL) { | |
4210 | error = ENOMEM; | |
4211 | break; | |
4212 | } | |
4213 | pf_pooladdr_copyin(&pca->addr, newpa); | |
4214 | #if !INET | |
4215 | if (pca->af == AF_INET) { | |
4216 | pool_put(&pf_pooladdr_pl, newpa); | |
4217 | error = EAFNOSUPPORT; | |
4218 | break; | |
4219 | } | |
4220 | #endif /* INET */ | |
4221 | #if !INET6 | |
4222 | if (pca->af == AF_INET6) { | |
4223 | pool_put(&pf_pooladdr_pl, newpa); | |
4224 | error = EAFNOSUPPORT; | |
4225 | break; | |
4226 | } | |
4227 | #endif /* INET6 */ | |
4228 | if (newpa->ifname[0]) { | |
4229 | newpa->kif = pfi_kif_get(newpa->ifname); | |
4230 | if (newpa->kif == NULL) { | |
4231 | pool_put(&pf_pooladdr_pl, newpa); | |
4232 | error = EINVAL; | |
4233 | break; | |
4234 | } | |
4235 | pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE); | |
4236 | } else | |
4237 | newpa->kif = NULL; | |
4238 | pf_addrwrap_setup(&newpa->addr); | |
4239 | if (pfi_dynaddr_setup(&newpa->addr, pca->af) || | |
4240 | pf_tbladdr_setup(ruleset, &newpa->addr)) { | |
4241 | pfi_dynaddr_remove(&newpa->addr); | |
4242 | pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE); | |
4243 | pool_put(&pf_pooladdr_pl, newpa); | |
4244 | error = EINVAL; | |
4245 | break; | |
4246 | } | |
4247 | } | |
4248 | ||
4249 | if (pca->action == PF_CHANGE_ADD_HEAD) | |
4250 | oldpa = TAILQ_FIRST(&pool->list); | |
4251 | else if (pca->action == PF_CHANGE_ADD_TAIL) | |
4252 | oldpa = TAILQ_LAST(&pool->list, pf_palist); | |
4253 | else { | |
4254 | int i = 0; | |
4255 | ||
4256 | oldpa = TAILQ_FIRST(&pool->list); | |
4257 | while ((oldpa != NULL) && (i < (int)pca->nr)) { | |
4258 | oldpa = TAILQ_NEXT(oldpa, entries); | |
4259 | i++; | |
4260 | } | |
4261 | if (oldpa == NULL) { | |
4262 | error = EINVAL; | |
4263 | break; | |
4264 | } | |
4265 | } | |
4266 | ||
4267 | if (pca->action == PF_CHANGE_REMOVE) { | |
4268 | TAILQ_REMOVE(&pool->list, oldpa, entries); | |
4269 | pfi_dynaddr_remove(&oldpa->addr); | |
4270 | pf_tbladdr_remove(&oldpa->addr); | |
4271 | pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE); | |
4272 | pool_put(&pf_pooladdr_pl, oldpa); | |
4273 | } else { | |
4274 | if (oldpa == NULL) | |
4275 | TAILQ_INSERT_TAIL(&pool->list, newpa, entries); | |
4276 | else if (pca->action == PF_CHANGE_ADD_HEAD || | |
4277 | pca->action == PF_CHANGE_ADD_BEFORE) | |
4278 | TAILQ_INSERT_BEFORE(oldpa, newpa, entries); | |
4279 | else | |
4280 | TAILQ_INSERT_AFTER(&pool->list, oldpa, | |
4281 | newpa, entries); | |
4282 | } | |
4283 | ||
4284 | pool->cur = TAILQ_FIRST(&pool->list); | |
4285 | PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, | |
4286 | pca->af); | |
4287 | break; | |
4288 | } | |
4289 | ||
4290 | default: | |
4291 | VERIFY(0); | |
4292 | /* NOTREACHED */ | |
4293 | } | |
4294 | ||
4295 | return (error); | |
4296 | } | |
4297 | ||
4298 | static int | |
4299 | pfioctl_ioc_ruleset(u_long cmd, struct pfioc_ruleset *pr, struct proc *p) | |
4300 | { | |
4301 | #pragma unused(p) | |
4302 | int error = 0; | |
4303 | ||
4304 | switch (cmd) { | |
4305 | case DIOCGETRULESETS: { | |
4306 | struct pf_ruleset *ruleset; | |
4307 | struct pf_anchor *anchor; | |
4308 | ||
4309 | pr->path[sizeof (pr->path) - 1] = '\0'; | |
4310 | pr->name[sizeof (pr->name) - 1] = '\0'; | |
4311 | if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { | |
4312 | error = EINVAL; | |
4313 | break; | |
4314 | } | |
4315 | pr->nr = 0; | |
4316 | if (ruleset->anchor == NULL) { | |
4317 | /* XXX kludge for pf_main_ruleset */ | |
4318 | RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) | |
4319 | if (anchor->parent == NULL) | |
4320 | pr->nr++; | |
4321 | } else { | |
4322 | RB_FOREACH(anchor, pf_anchor_node, | |
4323 | &ruleset->anchor->children) | |
4324 | pr->nr++; | |
4325 | } | |
4326 | break; | |
4327 | } | |
4328 | ||
4329 | case DIOCGETRULESET: { | |
4330 | struct pf_ruleset *ruleset; | |
4331 | struct pf_anchor *anchor; | |
4332 | u_int32_t nr = 0; | |
4333 | ||
4334 | pr->path[sizeof (pr->path) - 1] = '\0'; | |
4335 | if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { | |
4336 | error = EINVAL; | |
4337 | break; | |
4338 | } | |
4339 | pr->name[0] = 0; | |
4340 | if (ruleset->anchor == NULL) { | |
4341 | /* XXX kludge for pf_main_ruleset */ | |
4342 | RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) | |
4343 | if (anchor->parent == NULL && nr++ == pr->nr) { | |
4344 | strlcpy(pr->name, anchor->name, | |
4345 | sizeof (pr->name)); | |
4346 | break; | |
4347 | } | |
4348 | } else { | |
4349 | RB_FOREACH(anchor, pf_anchor_node, | |
4350 | &ruleset->anchor->children) | |
4351 | if (nr++ == pr->nr) { | |
4352 | strlcpy(pr->name, anchor->name, | |
4353 | sizeof (pr->name)); | |
4354 | break; | |
4355 | } | |
4356 | } | |
4357 | if (!pr->name[0]) | |
4358 | error = EBUSY; | |
4359 | break; | |
4360 | } | |
4361 | ||
4362 | default: | |
4363 | VERIFY(0); | |
4364 | /* NOTREACHED */ | |
4365 | } | |
4366 | ||
4367 | return (error); | |
4368 | } | |
4369 | ||
4370 | static int | |
4371 | pfioctl_ioc_trans(u_long cmd, struct pfioc_trans_32 *io32, | |
4372 | struct pfioc_trans_64 *io64, struct proc *p) | |
4373 | { | |
4374 | int p64 = proc_is64bit(p); | |
4375 | int error = 0, esize, size; | |
4376 | user_addr_t buf; | |
4377 | ||
4378 | esize = (p64 ? io64->esize : io32->esize); | |
4379 | size = (p64 ? io64->size : io32->size); | |
4380 | buf = (p64 ? io64->array : io32->array); | |
4381 | ||
4382 | switch (cmd) { | |
4383 | case DIOCXBEGIN: { | |
4384 | struct pfioc_trans_e *ioe; | |
4385 | struct pfr_table *table; | |
4386 | int i; | |
4387 | ||
4388 | if (esize != sizeof (*ioe)) { | |
4389 | error = ENODEV; | |
4390 | goto fail; | |
4391 | } | |
4392 | ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK); | |
4393 | table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK); | |
4394 | for (i = 0; i < size; i++, buf += sizeof (*ioe)) { | |
4395 | if (copyin(buf, ioe, sizeof (*ioe))) { | |
4396 | _FREE(table, M_TEMP); | |
4397 | _FREE(ioe, M_TEMP); | |
4398 | error = EFAULT; | |
4399 | goto fail; | |
4400 | } | |
4401 | ioe->anchor[sizeof (ioe->anchor) - 1] = '\0'; | |
4402 | switch (ioe->rs_num) { | |
4403 | case PF_RULESET_ALTQ: | |
4404 | #if PF_ALTQ | |
4405 | if (altq_allowed) { | |
4406 | if (ioe->anchor[0]) { | |
4407 | _FREE(table, M_TEMP); | |
4408 | _FREE(ioe, M_TEMP); | |
4409 | error = EINVAL; | |
4410 | goto fail; | |
4411 | } | |
4412 | error = pf_begin_altq(&ioe->ticket); | |
4413 | if (error != 0) { | |
4414 | _FREE(table, M_TEMP); | |
4415 | _FREE(ioe, M_TEMP); | |
4416 | goto fail; | |
4417 | } | |
4418 | } | |
4419 | #endif /* PF_ALTQ */ | |
4420 | break; | |
4421 | case PF_RULESET_TABLE: | |
4422 | bzero(table, sizeof (*table)); | |
4423 | strlcpy(table->pfrt_anchor, ioe->anchor, | |
4424 | sizeof (table->pfrt_anchor)); | |
4425 | if ((error = pfr_ina_begin(table, | |
4426 | &ioe->ticket, NULL, 0))) { | |
4427 | _FREE(table, M_TEMP); | |
4428 | _FREE(ioe, M_TEMP); | |
4429 | goto fail; | |
4430 | } | |
4431 | break; | |
4432 | default: | |
4433 | if ((error = pf_begin_rules(&ioe->ticket, | |
4434 | ioe->rs_num, ioe->anchor))) { | |
4435 | _FREE(table, M_TEMP); | |
4436 | _FREE(ioe, M_TEMP); | |
4437 | goto fail; | |
4438 | } | |
4439 | break; | |
4440 | } | |
4441 | if (copyout(ioe, buf, sizeof (*ioe))) { | |
4442 | _FREE(table, M_TEMP); | |
4443 | _FREE(ioe, M_TEMP); | |
4444 | error = EFAULT; | |
4445 | goto fail; | |
4446 | } | |
4447 | } | |
4448 | _FREE(table, M_TEMP); | |
4449 | _FREE(ioe, M_TEMP); | |
4450 | break; | |
4451 | } | |
4452 | ||
4453 | case DIOCXROLLBACK: { | |
4454 | struct pfioc_trans_e *ioe; | |
4455 | struct pfr_table *table; | |
4456 | int i; | |
4457 | ||
4458 | if (esize != sizeof (*ioe)) { | |
4459 | error = ENODEV; | |
4460 | goto fail; | |
4461 | } | |
4462 | ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK); | |
4463 | table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK); | |
4464 | for (i = 0; i < size; i++, buf += sizeof (*ioe)) { | |
4465 | if (copyin(buf, ioe, sizeof (*ioe))) { | |
4466 | _FREE(table, M_TEMP); | |
4467 | _FREE(ioe, M_TEMP); | |
4468 | error = EFAULT; | |
4469 | goto fail; | |
4470 | } | |
4471 | ioe->anchor[sizeof (ioe->anchor) - 1] = '\0'; | |
4472 | switch (ioe->rs_num) { | |
4473 | case PF_RULESET_ALTQ: | |
4474 | #if PF_ALTQ | |
4475 | if (altq_allowed) { | |
4476 | if (ioe->anchor[0]) { | |
4477 | _FREE(table, M_TEMP); | |
4478 | _FREE(ioe, M_TEMP); | |
4479 | error = EINVAL; | |
4480 | goto fail; | |
4481 | } | |
4482 | error = pf_rollback_altq(ioe->ticket); | |
4483 | if (error != 0) { | |
4484 | _FREE(table, M_TEMP); | |
4485 | _FREE(ioe, M_TEMP); | |
4486 | goto fail; /* really bad */ | |
4487 | } | |
4488 | } | |
4489 | #endif /* PF_ALTQ */ | |
4490 | break; | |
4491 | case PF_RULESET_TABLE: | |
4492 | bzero(table, sizeof (*table)); | |
4493 | strlcpy(table->pfrt_anchor, ioe->anchor, | |
4494 | sizeof (table->pfrt_anchor)); | |
4495 | if ((error = pfr_ina_rollback(table, | |
4496 | ioe->ticket, NULL, 0))) { | |
4497 | _FREE(table, M_TEMP); | |
4498 | _FREE(ioe, M_TEMP); | |
4499 | goto fail; /* really bad */ | |
4500 | } | |
4501 | break; | |
4502 | default: | |
4503 | if ((error = pf_rollback_rules(ioe->ticket, | |
4504 | ioe->rs_num, ioe->anchor))) { | |
4505 | _FREE(table, M_TEMP); | |
4506 | _FREE(ioe, M_TEMP); | |
4507 | goto fail; /* really bad */ | |
4508 | } | |
4509 | break; | |
4510 | } | |
4511 | } | |
4512 | _FREE(table, M_TEMP); | |
4513 | _FREE(ioe, M_TEMP); | |
4514 | break; | |
4515 | } | |
4516 | ||
4517 | case DIOCXCOMMIT: { | |
4518 | struct pfioc_trans_e *ioe; | |
4519 | struct pfr_table *table; | |
4520 | struct pf_ruleset *rs; | |
4521 | user_addr_t _buf = buf; | |
4522 | int i; | |
4523 | ||
4524 | if (esize != sizeof (*ioe)) { | |
4525 | error = ENODEV; | |
4526 | goto fail; | |
4527 | } | |
4528 | ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK); | |
4529 | table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK); | |
4530 | /* first makes sure everything will succeed */ | |
4531 | for (i = 0; i < size; i++, buf += sizeof (*ioe)) { | |
4532 | if (copyin(buf, ioe, sizeof (*ioe))) { | |
4533 | _FREE(table, M_TEMP); | |
4534 | _FREE(ioe, M_TEMP); | |
4535 | error = EFAULT; | |
4536 | goto fail; | |
4537 | } | |
4538 | ioe->anchor[sizeof (ioe->anchor) - 1] = '\0'; | |
4539 | switch (ioe->rs_num) { | |
4540 | case PF_RULESET_ALTQ: | |
4541 | #if PF_ALTQ | |
4542 | if (altq_allowed) { | |
4543 | if (ioe->anchor[0]) { | |
4544 | _FREE(table, M_TEMP); | |
4545 | _FREE(ioe, M_TEMP); | |
4546 | error = EINVAL; | |
4547 | goto fail; | |
4548 | } | |
4549 | if (!altqs_inactive_open || | |
4550 | ioe->ticket != | |
4551 | ticket_altqs_inactive) { | |
4552 | _FREE(table, M_TEMP); | |
4553 | _FREE(ioe, M_TEMP); | |
4554 | error = EBUSY; | |
4555 | goto fail; | |
4556 | } | |
4557 | } | |
4558 | #endif /* PF_ALTQ */ | |
4559 | break; | |
4560 | case PF_RULESET_TABLE: | |
4561 | rs = pf_find_ruleset(ioe->anchor); | |
4562 | if (rs == NULL || !rs->topen || ioe->ticket != | |
4563 | rs->tticket) { | |
4564 | _FREE(table, M_TEMP); | |
4565 | _FREE(ioe, M_TEMP); | |
4566 | error = EBUSY; | |
4567 | goto fail; | |
4568 | } | |
4569 | break; | |
4570 | default: | |
4571 | if (ioe->rs_num < 0 || ioe->rs_num >= | |
4572 | PF_RULESET_MAX) { | |
4573 | _FREE(table, M_TEMP); | |
4574 | _FREE(ioe, M_TEMP); | |
4575 | error = EINVAL; | |
4576 | goto fail; | |
4577 | } | |
4578 | rs = pf_find_ruleset(ioe->anchor); | |
4579 | if (rs == NULL || | |
4580 | !rs->rules[ioe->rs_num].inactive.open || | |
4581 | rs->rules[ioe->rs_num].inactive.ticket != | |
4582 | ioe->ticket) { | |
4583 | _FREE(table, M_TEMP); | |
4584 | _FREE(ioe, M_TEMP); | |
4585 | error = EBUSY; | |
4586 | goto fail; | |
4587 | } | |
4588 | break; | |
4589 | } | |
4590 | } | |
4591 | buf = _buf; | |
4592 | /* now do the commit - no errors should happen here */ | |
4593 | for (i = 0; i < size; i++, buf += sizeof (*ioe)) { | |
4594 | if (copyin(buf, ioe, sizeof (*ioe))) { | |
4595 | _FREE(table, M_TEMP); | |
4596 | _FREE(ioe, M_TEMP); | |
4597 | error = EFAULT; | |
4598 | goto fail; | |
4599 | } | |
4600 | ioe->anchor[sizeof (ioe->anchor) - 1] = '\0'; | |
4601 | switch (ioe->rs_num) { | |
4602 | case PF_RULESET_ALTQ: | |
4603 | #if PF_ALTQ | |
4604 | if (altq_allowed && | |
4605 | (error = pf_commit_altq(ioe->ticket))) { | |
4606 | _FREE(table, M_TEMP); | |
4607 | _FREE(ioe, M_TEMP); | |
4608 | goto fail; /* really bad */ | |
4609 | } | |
4610 | #endif /* PF_ALTQ */ | |
4611 | break; | |
4612 | case PF_RULESET_TABLE: | |
4613 | bzero(table, sizeof (*table)); | |
4614 | strlcpy(table->pfrt_anchor, ioe->anchor, | |
4615 | sizeof (table->pfrt_anchor)); | |
4616 | if ((error = pfr_ina_commit(table, ioe->ticket, | |
4617 | NULL, NULL, 0))) { | |
4618 | _FREE(table, M_TEMP); | |
4619 | _FREE(ioe, M_TEMP); | |
4620 | goto fail; /* really bad */ | |
4621 | } | |
4622 | break; | |
4623 | default: | |
4624 | if ((error = pf_commit_rules(ioe->ticket, | |
4625 | ioe->rs_num, ioe->anchor))) { | |
4626 | _FREE(table, M_TEMP); | |
4627 | _FREE(ioe, M_TEMP); | |
4628 | goto fail; /* really bad */ | |
4629 | } | |
4630 | break; | |
4631 | } | |
4632 | } | |
4633 | _FREE(table, M_TEMP); | |
4634 | _FREE(ioe, M_TEMP); | |
4635 | break; | |
4636 | } | |
4637 | ||
4638 | default: | |
4639 | VERIFY(0); | |
4640 | /* NOTREACHED */ | |
4641 | } | |
4642 | fail: | |
4643 | return (error); | |
4644 | } | |
4645 | ||
4646 | static int | |
4647 | pfioctl_ioc_src_nodes(u_long cmd, struct pfioc_src_nodes_32 *psn32, | |
4648 | struct pfioc_src_nodes_64 *psn64, struct proc *p) | |
4649 | { | |
4650 | int p64 = proc_is64bit(p); | |
4651 | int error = 0; | |
4652 | ||
4653 | switch (cmd) { | |
4654 | case DIOCGETSRCNODES: { | |
4655 | struct pf_src_node *n, *pstore; | |
4656 | user_addr_t buf; | |
4657 | u_int32_t nr = 0; | |
4658 | int space, size; | |
4659 | ||
4660 | space = (p64 ? psn64->psn_len : psn32->psn_len); | |
4661 | if (space == 0) { | |
4662 | RB_FOREACH(n, pf_src_tree, &tree_src_tracking) | |
4663 | nr++; | |
4664 | ||
4665 | size = sizeof (struct pf_src_node) * nr; | |
4666 | if (p64) | |
4667 | psn64->psn_len = size; | |
4668 | else | |
4669 | psn32->psn_len = size; | |
4670 | break; | |
4671 | } | |
4672 | ||
4673 | pstore = _MALLOC(sizeof (*pstore), M_TEMP, M_WAITOK); | |
4674 | if (pstore == NULL) { | |
4675 | error = ENOMEM; | |
4676 | break; | |
4677 | } | |
4678 | buf = (p64 ? psn64->psn_buf : psn32->psn_buf); | |
4679 | ||
4680 | RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { | |
4681 | uint64_t secs = pf_time_second(), diff; | |
4682 | ||
4683 | if ((nr + 1) * sizeof (*pstore) > (unsigned)space) | |
4684 | break; | |
4685 | ||
4686 | bcopy(n, pstore, sizeof (*pstore)); | |
4687 | if (n->rule.ptr != NULL) | |
4688 | pstore->rule.nr = n->rule.ptr->nr; | |
4689 | pstore->creation = secs - pstore->creation; | |
4690 | if (pstore->expire > secs) | |
4691 | pstore->expire -= secs; | |
4692 | else | |
4693 | pstore->expire = 0; | |
4694 | ||
4695 | /* adjust the connection rate estimate */ | |
4696 | diff = secs - n->conn_rate.last; | |
4697 | if (diff >= n->conn_rate.seconds) | |
4698 | pstore->conn_rate.count = 0; | |
4699 | else | |
4700 | pstore->conn_rate.count -= | |
4701 | n->conn_rate.count * diff / | |
4702 | n->conn_rate.seconds; | |
4703 | ||
4704 | _RB_PARENT(pstore, entry) = NULL; | |
4705 | RB_LEFT(pstore, entry) = RB_RIGHT(pstore, entry) = NULL; | |
4706 | pstore->kif = NULL; | |
4707 | ||
4708 | error = copyout(pstore, buf, sizeof (*pstore)); | |
4709 | if (error) { | |
4710 | _FREE(pstore, M_TEMP); | |
4711 | goto fail; | |
4712 | } | |
4713 | buf += sizeof (*pstore); | |
4714 | nr++; | |
4715 | } | |
4716 | ||
4717 | size = sizeof (struct pf_src_node) * nr; | |
4718 | if (p64) | |
4719 | psn64->psn_len = size; | |
4720 | else | |
4721 | psn32->psn_len = size; | |
4722 | ||
4723 | _FREE(pstore, M_TEMP); | |
4724 | break; | |
4725 | } | |
4726 | ||
4727 | default: | |
4728 | VERIFY(0); | |
4729 | /* NOTREACHED */ | |
4730 | } | |
4731 | fail: | |
4732 | return (error); | |
4733 | ||
4734 | } | |
4735 | ||
4736 | static int | |
4737 | pfioctl_ioc_src_node_kill(u_long cmd, struct pfioc_src_node_kill *psnk, | |
4738 | struct proc *p) | |
4739 | { | |
4740 | #pragma unused(p) | |
4741 | int error = 0; | |
4742 | ||
4743 | switch (cmd) { | |
4744 | case DIOCKILLSRCNODES: { | |
4745 | struct pf_src_node *sn; | |
4746 | struct pf_state *s; | |
4747 | int killed = 0; | |
4748 | ||
4749 | RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { | |
4750 | if (PF_MATCHA(psnk->psnk_src.neg, | |
4751 | &psnk->psnk_src.addr.v.a.addr, | |
4752 | &psnk->psnk_src.addr.v.a.mask, | |
4753 | &sn->addr, sn->af) && | |
4754 | PF_MATCHA(psnk->psnk_dst.neg, | |
4755 | &psnk->psnk_dst.addr.v.a.addr, | |
4756 | &psnk->psnk_dst.addr.v.a.mask, | |
4757 | &sn->raddr, sn->af)) { | |
4758 | /* Handle state to src_node linkage */ | |
4759 | if (sn->states != 0) { | |
4760 | RB_FOREACH(s, pf_state_tree_id, | |
4761 | &tree_id) { | |
4762 | if (s->src_node == sn) | |
4763 | s->src_node = NULL; | |
4764 | if (s->nat_src_node == sn) | |
4765 | s->nat_src_node = NULL; | |
4766 | } | |
4767 | sn->states = 0; | |
4768 | } | |
4769 | sn->expire = 1; | |
4770 | killed++; | |
4771 | } | |
4772 | } | |
4773 | ||
4774 | if (killed > 0) | |
4775 | pf_purge_expired_src_nodes(); | |
4776 | ||
4777 | psnk->psnk_af = killed; | |
4778 | break; | |
4779 | } | |
4780 | ||
4781 | default: | |
4782 | VERIFY(0); | |
4783 | /* NOTREACHED */ | |
4784 | } | |
4785 | ||
4786 | return (error); | |
4787 | } | |
4788 | ||
4789 | static int | |
4790 | pfioctl_ioc_iface(u_long cmd, struct pfioc_iface_32 *io32, | |
4791 | struct pfioc_iface_64 *io64, struct proc *p) | |
4792 | { | |
4793 | int p64 = proc_is64bit(p); | |
4794 | int error = 0; | |
4795 | ||
4796 | switch (cmd) { | |
4797 | case DIOCIGETIFACES: { | |
4798 | user_addr_t buf; | |
4799 | int esize; | |
4800 | ||
4801 | buf = (p64 ? io64->pfiio_buffer : io32->pfiio_buffer); | |
4802 | esize = (p64 ? io64->pfiio_esize : io32->pfiio_esize); | |
4803 | ||
4804 | /* esize must be that of the user space version of pfi_kif */ | |
4805 | if (esize != sizeof (struct pfi_uif)) { | |
4806 | error = ENODEV; | |
4807 | break; | |
4808 | } | |
4809 | if (p64) | |
4810 | io64->pfiio_name[sizeof (io64->pfiio_name) - 1] = '\0'; | |
4811 | else | |
4812 | io32->pfiio_name[sizeof (io32->pfiio_name) - 1] = '\0'; | |
4813 | error = pfi_get_ifaces( | |
4814 | p64 ? io64->pfiio_name : io32->pfiio_name, buf, | |
4815 | p64 ? &io64->pfiio_size : &io32->pfiio_size); | |
4816 | break; | |
4817 | } | |
4818 | ||
4819 | case DIOCSETIFFLAG: { | |
4820 | if (p64) | |
4821 | io64->pfiio_name[sizeof (io64->pfiio_name) - 1] = '\0'; | |
4822 | else | |
4823 | io32->pfiio_name[sizeof (io32->pfiio_name) - 1] = '\0'; | |
4824 | ||
4825 | error = pfi_set_flags( | |
4826 | p64 ? io64->pfiio_name : io32->pfiio_name, | |
4827 | p64 ? io64->pfiio_flags : io32->pfiio_flags); | |
4828 | break; | |
4829 | } | |
4830 | ||
4831 | case DIOCCLRIFFLAG: { | |
4832 | if (p64) | |
4833 | io64->pfiio_name[sizeof (io64->pfiio_name) - 1] = '\0'; | |
4834 | else | |
4835 | io32->pfiio_name[sizeof (io32->pfiio_name) - 1] = '\0'; | |
4836 | ||
4837 | error = pfi_clear_flags( | |
4838 | p64 ? io64->pfiio_name : io32->pfiio_name, | |
4839 | p64 ? io64->pfiio_flags : io32->pfiio_flags); | |
4840 | break; | |
4841 | } | |
4842 | ||
4843 | default: | |
4844 | VERIFY(0); | |
4845 | /* NOTREACHED */ | |
4846 | } | |
4847 | ||
4848 | return (error); | |
4849 | } | |
4850 | ||
4851 | int | |
4852 | pf_af_hook(struct ifnet *ifp, struct mbuf **mppn, struct mbuf **mp, | |
4853 | unsigned int af, int input, struct ip_fw_args *fwa) | |
4854 | { | |
4855 | int error = 0; | |
4856 | struct mbuf *nextpkt; | |
4857 | net_thread_marks_t marks; | |
4858 | struct ifnet * pf_ifp = ifp; | |
4859 | ||
4860 | /* Always allow traffic on co-processor interfaces. */ | |
4861 | if (ifp && IFNET_IS_INTCOPROC(ifp)) | |
4862 | return (0); | |
4863 | ||
4864 | marks = net_thread_marks_push(NET_THREAD_HELD_PF); | |
4865 | ||
4866 | if (marks != net_thread_marks_none) { | |
4867 | lck_rw_lock_shared(pf_perim_lock); | |
4868 | if (!pf_is_enabled) | |
4869 | goto done; | |
4870 | lck_mtx_lock(pf_lock); | |
4871 | } | |
4872 | ||
4873 | if (mppn != NULL && *mppn != NULL) | |
4874 | VERIFY(*mppn == *mp); | |
4875 | if ((nextpkt = (*mp)->m_nextpkt) != NULL) | |
4876 | (*mp)->m_nextpkt = NULL; | |
4877 | ||
4878 | /* | |
4879 | * For packets destined to locally hosted IP address | |
4880 | * ip_output_list sets Mbuf's pkt header's rcvif to | |
4881 | * the interface hosting the IP address. | |
4882 | * While on the output path ifp passed to pf_af_hook | |
4883 | * to such local communication is the loopback interface, | |
4884 | * the input path derives ifp from mbuf packet header's | |
4885 | * rcvif. | |
4886 | * This asymmetry caues issues with PF. | |
4887 | * To handle that case, we have a limited change here to | |
4888 | * pass interface as loopback if packets are looped in. | |
4889 | */ | |
4890 | if (input && ((*mp)->m_pkthdr.pkt_flags & PKTF_LOOP)) { | |
4891 | pf_ifp = lo_ifp; | |
4892 | } | |
4893 | ||
4894 | switch (af) { | |
4895 | #if INET | |
4896 | case AF_INET: { | |
4897 | error = pf_inet_hook(pf_ifp, mp, input, fwa); | |
4898 | break; | |
4899 | } | |
4900 | #endif /* INET */ | |
4901 | #if INET6 | |
4902 | case AF_INET6: | |
4903 | error = pf_inet6_hook(pf_ifp, mp, input, fwa); | |
4904 | break; | |
4905 | #endif /* INET6 */ | |
4906 | default: | |
4907 | break; | |
4908 | } | |
4909 | ||
4910 | /* When packet valid, link to the next packet */ | |
4911 | if (*mp != NULL && nextpkt != NULL) { | |
4912 | struct mbuf *m = *mp; | |
4913 | while (m->m_nextpkt != NULL) | |
4914 | m = m->m_nextpkt; | |
4915 | m->m_nextpkt = nextpkt; | |
4916 | } | |
4917 | /* Fix up linkage of previous packet in the chain */ | |
4918 | if (mppn != NULL) { | |
4919 | if (*mp != NULL) | |
4920 | *mppn = *mp; | |
4921 | else | |
4922 | *mppn = nextpkt; | |
4923 | } | |
4924 | ||
4925 | if (marks != net_thread_marks_none) | |
4926 | lck_mtx_unlock(pf_lock); | |
4927 | ||
4928 | done: | |
4929 | if (marks != net_thread_marks_none) | |
4930 | lck_rw_done(pf_perim_lock); | |
4931 | ||
4932 | net_thread_marks_pop(marks); | |
4933 | return (error); | |
4934 | } | |
4935 | ||
4936 | ||
4937 | #if INET | |
4938 | static int | |
4939 | pf_inet_hook(struct ifnet *ifp, struct mbuf **mp, int input, | |
4940 | struct ip_fw_args *fwa) | |
4941 | { | |
4942 | struct mbuf *m = *mp; | |
4943 | #if BYTE_ORDER != BIG_ENDIAN | |
4944 | struct ip *ip = mtod(m, struct ip *); | |
4945 | #endif | |
4946 | int error = 0; | |
4947 | ||
4948 | /* | |
4949 | * If the packet is outbound, is originated locally, is flagged for | |
4950 | * delayed UDP/TCP checksum calculation, and is about to be processed | |
4951 | * for an interface that doesn't support the appropriate checksum | |
4952 | * offloading, then calculated the checksum here so that PF can adjust | |
4953 | * it properly. | |
4954 | */ | |
4955 | if (!input && m->m_pkthdr.rcvif == NULL) { | |
4956 | static const int mask = CSUM_DELAY_DATA; | |
4957 | const int flags = m->m_pkthdr.csum_flags & | |
4958 | ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist); | |
4959 | ||
4960 | if (flags & mask) { | |
4961 | in_delayed_cksum(m); | |
4962 | m->m_pkthdr.csum_flags &= ~mask; | |
4963 | } | |
4964 | } | |
4965 | ||
4966 | #if BYTE_ORDER != BIG_ENDIAN | |
4967 | HTONS(ip->ip_len); | |
4968 | HTONS(ip->ip_off); | |
4969 | #endif | |
4970 | if (pf_test(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) { | |
4971 | if (*mp != NULL) { | |
4972 | m_freem(*mp); | |
4973 | *mp = NULL; | |
4974 | error = EHOSTUNREACH; | |
4975 | } else { | |
4976 | error = ENOBUFS; | |
4977 | } | |
4978 | } | |
4979 | #if BYTE_ORDER != BIG_ENDIAN | |
4980 | else { | |
4981 | if (*mp != NULL) { | |
4982 | ip = mtod(*mp, struct ip *); | |
4983 | NTOHS(ip->ip_len); | |
4984 | NTOHS(ip->ip_off); | |
4985 | } | |
4986 | } | |
4987 | #endif | |
4988 | return (error); | |
4989 | } | |
4990 | #endif /* INET */ | |
4991 | ||
4992 | #if INET6 | |
4993 | int | |
4994 | pf_inet6_hook(struct ifnet *ifp, struct mbuf **mp, int input, | |
4995 | struct ip_fw_args *fwa) | |
4996 | { | |
4997 | int error = 0; | |
4998 | ||
4999 | /* | |
5000 | * If the packet is outbound, is originated locally, is flagged for | |
5001 | * delayed UDP/TCP checksum calculation, and is about to be processed | |
5002 | * for an interface that doesn't support the appropriate checksum | |
5003 | * offloading, then calculated the checksum here so that PF can adjust | |
5004 | * it properly. | |
5005 | */ | |
5006 | if (!input && (*mp)->m_pkthdr.rcvif == NULL) { | |
5007 | static const int mask = CSUM_DELAY_IPV6_DATA; | |
5008 | const int flags = (*mp)->m_pkthdr.csum_flags & | |
5009 | ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist); | |
5010 | ||
5011 | if (flags & mask) { | |
5012 | /* | |
5013 | * Checksum offload should not have been enabled | |
5014 | * when extension headers exist, thus 0 for optlen. | |
5015 | */ | |
5016 | in6_delayed_cksum(*mp); | |
5017 | (*mp)->m_pkthdr.csum_flags &= ~mask; | |
5018 | } | |
5019 | } | |
5020 | ||
5021 | if (pf_test6(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) { | |
5022 | if (*mp != NULL) { | |
5023 | m_freem(*mp); | |
5024 | *mp = NULL; | |
5025 | error = EHOSTUNREACH; | |
5026 | } else { | |
5027 | error = ENOBUFS; | |
5028 | } | |
5029 | } | |
5030 | return (error); | |
5031 | } | |
5032 | #endif /* INET6 */ | |
5033 | ||
5034 | int | |
5035 | pf_ifaddr_hook(struct ifnet *ifp) | |
5036 | { | |
5037 | struct pfi_kif *kif = ifp->if_pf_kif; | |
5038 | ||
5039 | if (kif != NULL) { | |
5040 | lck_rw_lock_shared(pf_perim_lock); | |
5041 | lck_mtx_lock(pf_lock); | |
5042 | ||
5043 | pfi_kifaddr_update(kif); | |
5044 | ||
5045 | lck_mtx_unlock(pf_lock); | |
5046 | lck_rw_done(pf_perim_lock); | |
5047 | } | |
5048 | return (0); | |
5049 | } | |
5050 | ||
5051 | /* | |
5052 | * Caller acquires dlil lock as writer (exclusive) | |
5053 | */ | |
5054 | void | |
5055 | pf_ifnet_hook(struct ifnet *ifp, int attach) | |
5056 | { | |
5057 | lck_rw_lock_shared(pf_perim_lock); | |
5058 | lck_mtx_lock(pf_lock); | |
5059 | if (attach) | |
5060 | pfi_attach_ifnet(ifp); | |
5061 | else | |
5062 | pfi_detach_ifnet(ifp); | |
5063 | lck_mtx_unlock(pf_lock); | |
5064 | lck_rw_done(pf_perim_lock); | |
5065 | } | |
5066 | ||
5067 | static void | |
5068 | pf_attach_hooks(void) | |
5069 | { | |
5070 | ifnet_head_lock_shared(); | |
5071 | /* | |
5072 | * Check against ifnet_addrs[] before proceeding, in case this | |
5073 | * is called very early on, e.g. during dlil_init() before any | |
5074 | * network interface is attached. | |
5075 | */ | |
5076 | if (ifnet_addrs != NULL) { | |
5077 | int i; | |
5078 | ||
5079 | for (i = 0; i <= if_index; i++) { | |
5080 | struct ifnet *ifp = ifindex2ifnet[i]; | |
5081 | if (ifp != NULL) { | |
5082 | pfi_attach_ifnet(ifp); | |
5083 | } | |
5084 | } | |
5085 | } | |
5086 | ifnet_head_done(); | |
5087 | } | |
5088 | ||
5089 | #if 0 | |
5090 | /* currently unused along with pfdetach() */ | |
5091 | static void | |
5092 | pf_detach_hooks(void) | |
5093 | { | |
5094 | ifnet_head_lock_shared(); | |
5095 | if (ifnet_addrs != NULL) { | |
5096 | for (i = 0; i <= if_index; i++) { | |
5097 | int i; | |
5098 | ||
5099 | struct ifnet *ifp = ifindex2ifnet[i]; | |
5100 | if (ifp != NULL && ifp->if_pf_kif != NULL) { | |
5101 | pfi_detach_ifnet(ifp); | |
5102 | } | |
5103 | } | |
5104 | } | |
5105 | ifnet_head_done(); | |
5106 | } | |
5107 | #endif | |
5108 | ||
5109 | /* | |
5110 | * 'D' group ioctls. | |
5111 | * | |
5112 | * The switch statement below does nothing at runtime, as it serves as a | |
5113 | * compile time check to ensure that all of the socket 'D' ioctls (those | |
5114 | * in the 'D' group going thru soo_ioctl) that are made available by the | |
5115 | * networking stack is unique. This works as long as this routine gets | |
5116 | * updated each time a new interface ioctl gets added. | |
5117 | * | |
5118 | * Any failures at compile time indicates duplicated ioctl values. | |
5119 | */ | |
5120 | static __attribute__((unused)) void | |
5121 | pfioctl_cassert(void) | |
5122 | { | |
5123 | /* | |
5124 | * This is equivalent to _CASSERT() and the compiler wouldn't | |
5125 | * generate any instructions, thus for compile time only. | |
5126 | */ | |
5127 | switch ((u_long)0) { | |
5128 | case 0: | |
5129 | ||
5130 | /* bsd/net/pfvar.h */ | |
5131 | case DIOCSTART: | |
5132 | case DIOCSTOP: | |
5133 | case DIOCADDRULE: | |
5134 | case DIOCGETSTARTERS: | |
5135 | case DIOCGETRULES: | |
5136 | case DIOCGETRULE: | |
5137 | case DIOCSTARTREF: | |
5138 | case DIOCSTOPREF: | |
5139 | case DIOCCLRSTATES: | |
5140 | case DIOCGETSTATE: | |
5141 | case DIOCSETSTATUSIF: | |
5142 | case DIOCGETSTATUS: | |
5143 | case DIOCCLRSTATUS: | |
5144 | case DIOCNATLOOK: | |
5145 | case DIOCSETDEBUG: | |
5146 | case DIOCGETSTATES: | |
5147 | case DIOCCHANGERULE: | |
5148 | case DIOCINSERTRULE: | |
5149 | case DIOCDELETERULE: | |
5150 | case DIOCSETTIMEOUT: | |
5151 | case DIOCGETTIMEOUT: | |
5152 | case DIOCADDSTATE: | |
5153 | case DIOCCLRRULECTRS: | |
5154 | case DIOCGETLIMIT: | |
5155 | case DIOCSETLIMIT: | |
5156 | case DIOCKILLSTATES: | |
5157 | case DIOCSTARTALTQ: | |
5158 | case DIOCSTOPALTQ: | |
5159 | case DIOCADDALTQ: | |
5160 | case DIOCGETALTQS: | |
5161 | case DIOCGETALTQ: | |
5162 | case DIOCCHANGEALTQ: | |
5163 | case DIOCGETQSTATS: | |
5164 | case DIOCBEGINADDRS: | |
5165 | case DIOCADDADDR: | |
5166 | case DIOCGETADDRS: | |
5167 | case DIOCGETADDR: | |
5168 | case DIOCCHANGEADDR: | |
5169 | case DIOCGETRULESETS: | |
5170 | case DIOCGETRULESET: | |
5171 | case DIOCRCLRTABLES: | |
5172 | case DIOCRADDTABLES: | |
5173 | case DIOCRDELTABLES: | |
5174 | case DIOCRGETTABLES: | |
5175 | case DIOCRGETTSTATS: | |
5176 | case DIOCRCLRTSTATS: | |
5177 | case DIOCRCLRADDRS: | |
5178 | case DIOCRADDADDRS: | |
5179 | case DIOCRDELADDRS: | |
5180 | case DIOCRSETADDRS: | |
5181 | case DIOCRGETADDRS: | |
5182 | case DIOCRGETASTATS: | |
5183 | case DIOCRCLRASTATS: | |
5184 | case DIOCRTSTADDRS: | |
5185 | case DIOCRSETTFLAGS: | |
5186 | case DIOCRINADEFINE: | |
5187 | case DIOCOSFPFLUSH: | |
5188 | case DIOCOSFPADD: | |
5189 | case DIOCOSFPGET: | |
5190 | case DIOCXBEGIN: | |
5191 | case DIOCXCOMMIT: | |
5192 | case DIOCXROLLBACK: | |
5193 | case DIOCGETSRCNODES: | |
5194 | case DIOCCLRSRCNODES: | |
5195 | case DIOCSETHOSTID: | |
5196 | case DIOCIGETIFACES: | |
5197 | case DIOCSETIFFLAG: | |
5198 | case DIOCCLRIFFLAG: | |
5199 | case DIOCKILLSRCNODES: | |
5200 | case DIOCGIFSPEED: | |
5201 | ; | |
5202 | } | |
5203 | } |