]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/pf_ioctl.c
xnu-2050.18.24.tar.gz
[apple/xnu.git] / bsd / net / pf_ioctl.c
1 /*
2 * Copyright (c) 2007-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */
30 /* $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
31
32 /*
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * - Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * - Redistributions in binary form must reproduce the above
44 * copyright notice, this list of conditions and the following
45 * disclaimer in the documentation and/or other materials provided
46 * with the distribution.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 *
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
64 *
65 */
66
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/mbuf.h>
71 #include <sys/filio.h>
72 #include <sys/fcntl.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/kernel.h>
76 #include <sys/time.h>
77 #include <sys/proc_internal.h>
78 #include <sys/malloc.h>
79 #include <sys/kauth.h>
80 #include <sys/conf.h>
81 #include <sys/mcache.h>
82 #include <sys/queue.h>
83
84 #include <mach/vm_param.h>
85
86 #include <net/dlil.h>
87 #include <net/if.h>
88 #include <net/if_types.h>
89 #include <net/route.h>
90
91 #include <netinet/in.h>
92 #include <netinet/in_var.h>
93 #include <netinet/in_systm.h>
94 #include <netinet/ip.h>
95 #include <netinet/ip_var.h>
96 #include <netinet/ip_icmp.h>
97 #include <netinet/if_ether.h>
98
99 #if DUMMYNET
100 #include <netinet/ip_dummynet.h>
101 #else
102 struct ip_fw_args;
103 #endif /* DUMMYNET */
104
105 #include <libkern/crypto/md5.h>
106
107 #include <machine/machine_routines.h>
108
109 #include <miscfs/devfs/devfs.h>
110
111 #include <net/pfvar.h>
112
113 #if NPFSYNC
114 #include <net/if_pfsync.h>
115 #endif /* NPFSYNC */
116
117 #if PFLOG
118 #include <net/if_pflog.h>
119 #endif /* PFLOG */
120
121 #if INET6
122 #include <netinet/ip6.h>
123 #include <netinet/in_pcb.h>
124 #endif /* INET6 */
125
126 #if PF_ALTQ
127 #include <net/altq/altq.h>
128 #include <net/altq/altq_cbq.h>
129 #include <net/classq/classq_red.h>
130 #include <net/classq/classq_rio.h>
131 #include <net/classq/classq_blue.h>
132 #include <net/classq/classq_sfb.h>
133 #endif /* PF_ALTQ */
134
135 #if 0
136 static void pfdetach(void);
137 #endif
138 static int pfopen(dev_t, int, int, struct proc *);
139 static int pfclose(dev_t, int, int, struct proc *);
140 static int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
141 static int pfioctl_ioc_table(u_long, struct pfioc_table_32 *,
142 struct pfioc_table_64 *, struct proc *);
143 static int pfioctl_ioc_tokens(u_long, struct pfioc_tokens_32 *,
144 struct pfioc_tokens_64 *, struct proc *);
145 static int pfioctl_ioc_rule(u_long, int, struct pfioc_rule *, struct proc *);
146 static int pfioctl_ioc_state_kill(u_long, struct pfioc_state_kill *,
147 struct proc *);
148 static int pfioctl_ioc_state(u_long, struct pfioc_state *, struct proc *);
149 static int pfioctl_ioc_states(u_long, struct pfioc_states_32 *,
150 struct pfioc_states_64 *, struct proc *);
151 static int pfioctl_ioc_natlook(u_long, struct pfioc_natlook *, struct proc *);
152 static int pfioctl_ioc_tm(u_long, struct pfioc_tm *, struct proc *);
153 static int pfioctl_ioc_limit(u_long, struct pfioc_limit *, struct proc *);
154 static int pfioctl_ioc_pooladdr(u_long, struct pfioc_pooladdr *, struct proc *);
155 static int pfioctl_ioc_ruleset(u_long, struct pfioc_ruleset *, struct proc *);
156 static int pfioctl_ioc_trans(u_long, struct pfioc_trans_32 *,
157 struct pfioc_trans_64 *, struct proc *);
158 static int pfioctl_ioc_src_nodes(u_long, struct pfioc_src_nodes_32 *,
159 struct pfioc_src_nodes_64 *, struct proc *);
160 static int pfioctl_ioc_src_node_kill(u_long, struct pfioc_src_node_kill *,
161 struct proc *);
162 static int pfioctl_ioc_iface(u_long, struct pfioc_iface_32 *,
163 struct pfioc_iface_64 *, struct proc *);
164 static struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
165 u_int8_t, u_int8_t, u_int8_t);
166 static void pf_mv_pool(struct pf_palist *, struct pf_palist *);
167 static void pf_empty_pool(struct pf_palist *);
168 #if PF_ALTQ
169 static int pf_begin_altq(u_int32_t *);
170 static int pf_rollback_altq(u_int32_t);
171 static int pf_commit_altq(u_int32_t);
172 static int pf_enable_altq(struct pf_altq *);
173 static int pf_disable_altq(struct pf_altq *);
174 static void pf_altq_copyin(struct pf_altq *, struct pf_altq *);
175 static void pf_altq_copyout(struct pf_altq *, struct pf_altq *);
176 #endif /* PF_ALTQ */
177 static int pf_begin_rules(u_int32_t *, int, const char *);
178 static int pf_rollback_rules(u_int32_t, int, char *);
179 static int pf_setup_pfsync_matching(struct pf_ruleset *);
180 static void pf_hash_rule(MD5_CTX *, struct pf_rule *);
181 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *, u_int8_t);
182 static int pf_commit_rules(u_int32_t, int, char *);
183 static void pf_rule_copyin(struct pf_rule *, struct pf_rule *, struct proc *,
184 int);
185 static void pf_rule_copyout(struct pf_rule *, struct pf_rule *);
186 static void pf_state_export(struct pfsync_state *, struct pf_state_key *,
187 struct pf_state *);
188 static void pf_state_import(struct pfsync_state *, struct pf_state_key *,
189 struct pf_state *);
190 static void pf_pooladdr_copyin(struct pf_pooladdr *, struct pf_pooladdr *);
191 static void pf_pooladdr_copyout(struct pf_pooladdr *, struct pf_pooladdr *);
192 static void pf_expire_states_and_src_nodes(struct pf_rule *);
193 static void pf_delete_rule_from_ruleset(struct pf_ruleset *,
194 int, struct pf_rule *);
195 static int pf_rule_setup(struct pfioc_rule *, struct pf_rule *,
196 struct pf_ruleset *);
197 static void pf_delete_rule_by_owner(char *);
198 static int pf_delete_rule_by_ticket(struct pfioc_rule *);
199 static void pf_ruleset_cleanup(struct pf_ruleset *, int);
200 static void pf_deleterule_anchor_step_out(struct pf_ruleset **,
201 int, struct pf_rule **);
202
203 #define PF_CDEV_MAJOR (-1)
204
205 static struct cdevsw pf_cdevsw = {
206 /* open */ pfopen,
207 /* close */ pfclose,
208 /* read */ eno_rdwrt,
209 /* write */ eno_rdwrt,
210 /* ioctl */ pfioctl,
211 /* stop */ eno_stop,
212 /* reset */ eno_reset,
213 /* tty */ NULL,
214 /* select */ eno_select,
215 /* mmap */ eno_mmap,
216 /* strategy */ eno_strat,
217 /* getc */ eno_getc,
218 /* putc */ eno_putc,
219 /* type */ 0
220 };
221
222 static void pf_attach_hooks(void);
223 #if 0
224 /* currently unused along with pfdetach() */
225 static void pf_detach_hooks(void);
226 #endif
227
228 /*
229 * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer,
230 * and used in pf_af_hook() for performance optimization, such that packets
231 * will enter pf_test() or pf_test6() only when PF is running.
232 */
233 int pf_is_enabled = 0;
234
235 #if PF_ALTQ
236 u_int32_t altq_allowed = 0;
237 #endif /* PF_ALTQ */
238
239 u_int32_t pf_hash_seed;
240
241 /*
242 * These are the pf enabled reference counting variables
243 */
244 static u_int64_t pf_enabled_ref_count;
245 static u_int32_t nr_tokens = 0;
246 static u_int64_t pffwrules;
247 static u_int32_t pfdevcnt;
248
249 SLIST_HEAD(list_head, pfioc_kernel_token);
250 static struct list_head token_list_head;
251
252 struct pf_rule pf_default_rule;
253 #if PF_ALTQ
254 static int pf_altq_running;
255 #endif /* PF_ALTQ */
256
257 #define TAGID_MAX 50000
258 #if !PF_ALTQ
259 static TAILQ_HEAD(pf_tags, pf_tagname) pf_tags =
260 TAILQ_HEAD_INITIALIZER(pf_tags);
261 #else /* PF_ALTQ */
262 static TAILQ_HEAD(pf_tags, pf_tagname)
263 pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
264 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
265 #endif /* PF_ALTQ */
266
267 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
268 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
269 #endif
270 static u_int16_t tagname2tag(struct pf_tags *, char *);
271 static void tag2tagname(struct pf_tags *, u_int16_t, char *);
272 static void tag_unref(struct pf_tags *, u_int16_t);
273 static int pf_rtlabel_add(struct pf_addr_wrap *);
274 static void pf_rtlabel_remove(struct pf_addr_wrap *);
275 static void pf_rtlabel_copyout(struct pf_addr_wrap *);
276
277 #if INET
278 static int pf_inet_hook(struct ifnet *, struct mbuf **, int,
279 struct ip_fw_args *);
280 #endif /* INET */
281 #if INET6
282 static int pf_inet6_hook(struct ifnet *, struct mbuf **, int,
283 struct ip_fw_args *);
284 #endif /* INET6 */
285
286 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
287
288 /*
289 * Helper macros for ioctl structures which vary in size (32-bit vs. 64-bit)
290 */
291 #define PFIOCX_STRUCT_DECL(s) \
292 struct { \
293 union { \
294 struct s##_32 _s##_32; \
295 struct s##_64 _s##_64; \
296 } _u; \
297 } *s##_un = NULL \
298
299 #define PFIOCX_STRUCT_BEGIN(a, s, _action) { \
300 VERIFY(s##_un == NULL); \
301 s##_un = _MALLOC(sizeof (*s##_un), M_TEMP, M_WAITOK|M_ZERO); \
302 if (s##_un == NULL) { \
303 _action \
304 } else { \
305 if (p64) \
306 bcopy(a, &s##_un->_u._s##_64, \
307 sizeof (struct s##_64)); \
308 else \
309 bcopy(a, &s##_un->_u._s##_32, \
310 sizeof (struct s##_32)); \
311 } \
312 }
313
314 #define PFIOCX_STRUCT_END(s, a) { \
315 VERIFY(s##_un != NULL); \
316 if (p64) \
317 bcopy(&s##_un->_u._s##_64, a, sizeof (struct s##_64)); \
318 else \
319 bcopy(&s##_un->_u._s##_32, a, sizeof (struct s##_32)); \
320 _FREE(s##_un, M_TEMP); \
321 s##_un = NULL; \
322 }
323
324 #define PFIOCX_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
325 #define PFIOCX_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
326
327 /*
328 * Helper macros for regular ioctl structures.
329 */
330 #define PFIOC_STRUCT_BEGIN(a, v, _action) { \
331 VERIFY((v) == NULL); \
332 (v) = _MALLOC(sizeof (*(v)), M_TEMP, M_WAITOK|M_ZERO); \
333 if ((v) == NULL) { \
334 _action \
335 } else { \
336 bcopy(a, v, sizeof (*(v))); \
337 } \
338 }
339
340 #define PFIOC_STRUCT_END(v, a) { \
341 VERIFY((v) != NULL); \
342 bcopy(v, a, sizeof (*(v))); \
343 _FREE(v, M_TEMP); \
344 (v) = NULL; \
345 }
346
347 #define PFIOC_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
348 #define PFIOC_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
349
350 static lck_attr_t *pf_perim_lock_attr;
351 static lck_grp_t *pf_perim_lock_grp;
352 static lck_grp_attr_t *pf_perim_lock_grp_attr;
353
354 static lck_attr_t *pf_lock_attr;
355 static lck_grp_t *pf_lock_grp;
356 static lck_grp_attr_t *pf_lock_grp_attr;
357
358 struct thread *pf_purge_thread;
359
360 extern void pfi_kifaddr_update(void *);
361
362 /* pf enable ref-counting helper functions */
363 static u_int64_t generate_token(struct proc *);
364 static int remove_token(struct pfioc_remove_token *);
365 static void invalidate_all_tokens(void);
366
367 static u_int64_t
368 generate_token(struct proc *p)
369 {
370 u_int64_t token_value;
371 struct pfioc_kernel_token *new_token;
372
373 new_token = _MALLOC(sizeof (struct pfioc_kernel_token), M_TEMP,
374 M_WAITOK|M_ZERO);
375
376 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
377
378 if (new_token == NULL) {
379 /* malloc failed! bail! */
380 printf("%s: unable to allocate pf token structure!", __func__);
381 return (0);
382 }
383
384 token_value = (u_int64_t)(uintptr_t)new_token;
385
386 new_token->token.token_value = token_value;
387 new_token->token.pid = proc_pid(p);
388 proc_name(new_token->token.pid, new_token->token.proc_name,
389 sizeof (new_token->token.proc_name));
390 new_token->token.timestamp = pf_calendar_time_second();
391
392 SLIST_INSERT_HEAD(&token_list_head, new_token, next);
393 nr_tokens++;
394
395 return (token_value);
396 }
397
398 static int
399 remove_token(struct pfioc_remove_token *tok)
400 {
401 struct pfioc_kernel_token *entry, *tmp;
402
403 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
404
405 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
406 if (tok->token_value == entry->token.token_value) {
407 SLIST_REMOVE(&token_list_head, entry,
408 pfioc_kernel_token, next);
409 _FREE(entry, M_TEMP);
410 nr_tokens--;
411 return (0); /* success */
412 }
413 }
414
415 printf("pf : remove failure\n");
416 return (ESRCH); /* failure */
417 }
418
419 static void
420 invalidate_all_tokens(void)
421 {
422 struct pfioc_kernel_token *entry, *tmp;
423
424 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
425
426 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
427 SLIST_REMOVE(&token_list_head, entry, pfioc_kernel_token, next);
428 _FREE(entry, M_TEMP);
429 }
430
431 nr_tokens = 0;
432 }
433
434 void
435 pfinit(void)
436 {
437 u_int32_t *t = pf_default_rule.timeout;
438 int maj;
439
440 pf_perim_lock_grp_attr = lck_grp_attr_alloc_init();
441 pf_perim_lock_grp = lck_grp_alloc_init("pf_perim",
442 pf_perim_lock_grp_attr);
443 pf_perim_lock_attr = lck_attr_alloc_init();
444 lck_rw_init(pf_perim_lock, pf_perim_lock_grp, pf_perim_lock_attr);
445
446 pf_lock_grp_attr = lck_grp_attr_alloc_init();
447 pf_lock_grp = lck_grp_alloc_init("pf", pf_lock_grp_attr);
448 pf_lock_attr = lck_attr_alloc_init();
449 lck_mtx_init(pf_lock, pf_lock_grp, pf_lock_attr);
450
451 pool_init(&pf_rule_pl, sizeof (struct pf_rule), 0, 0, 0, "pfrulepl",
452 NULL);
453 pool_init(&pf_src_tree_pl, sizeof (struct pf_src_node), 0, 0, 0,
454 "pfsrctrpl", NULL);
455 pool_init(&pf_state_pl, sizeof (struct pf_state), 0, 0, 0, "pfstatepl",
456 NULL);
457 pool_init(&pf_state_key_pl, sizeof (struct pf_state_key), 0, 0, 0,
458 "pfstatekeypl", NULL);
459 pool_init(&pf_app_state_pl, sizeof (struct pf_app_state), 0, 0, 0,
460 "pfappstatepl", NULL);
461 #if PF_ALTQ
462 pool_init(&pf_altq_pl, sizeof (struct pf_altq), 0, 0, 0, "pfaltqpl",
463 NULL);
464 #endif /* PF_ALTQ */
465 pool_init(&pf_pooladdr_pl, sizeof (struct pf_pooladdr), 0, 0, 0,
466 "pfpooladdrpl", NULL);
467 pfr_initialize();
468 pfi_initialize();
469 pf_osfp_initialize();
470
471 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
472 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
473
474 if (max_mem <= 256*1024*1024)
475 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
476 PFR_KENTRY_HIWAT_SMALL;
477
478 RB_INIT(&tree_src_tracking);
479 RB_INIT(&pf_anchors);
480 pf_init_ruleset(&pf_main_ruleset);
481 TAILQ_INIT(&pf_pabuf);
482 TAILQ_INIT(&state_list);
483 #if PF_ALTQ
484 TAILQ_INIT(&pf_altqs[0]);
485 TAILQ_INIT(&pf_altqs[1]);
486 pf_altqs_active = &pf_altqs[0];
487 pf_altqs_inactive = &pf_altqs[1];
488
489 PE_parse_boot_argn("altq", &altq_allowed, sizeof (altq_allowed));
490
491 _CASSERT(ALTRQ_PURGE == CLASSQRQ_PURGE);
492 _CASSERT(ALTRQ_PURGE_SC == CLASSQRQ_PURGE_SC);
493 _CASSERT(ALTRQ_EVENT == CLASSQRQ_EVENT);
494
495 _CASSERT(ALTDQ_REMOVE == CLASSQDQ_REMOVE);
496 _CASSERT(ALTDQ_POLL == CLASSQDQ_POLL);
497 #endif /* PF_ALTQ */
498
499 _CASSERT((SC_BE & SCIDX_MASK) == SCIDX_BE);
500 _CASSERT((SC_BK_SYS & SCIDX_MASK) == SCIDX_BK_SYS);
501 _CASSERT((SC_BK & SCIDX_MASK) == SCIDX_BK);
502 _CASSERT((SC_RD & SCIDX_MASK) == SCIDX_RD);
503 _CASSERT((SC_OAM & SCIDX_MASK) == SCIDX_OAM);
504 _CASSERT((SC_AV & SCIDX_MASK) == SCIDX_AV);
505 _CASSERT((SC_RV & SCIDX_MASK) == SCIDX_RV);
506 _CASSERT((SC_VI & SCIDX_MASK) == SCIDX_VI);
507 _CASSERT((SC_VO & SCIDX_MASK) == SCIDX_VO);
508 _CASSERT((SC_CTL & SCIDX_MASK) == SCIDX_CTL);
509
510 /* default rule should never be garbage collected */
511 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
512 pf_default_rule.action = PF_PASS;
513 pf_default_rule.nr = -1;
514 pf_default_rule.rtableid = IFSCOPE_NONE;
515
516 /* initialize default timeouts */
517 t[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
518 t[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
519 t[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
520 t[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
521 t[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
522 t[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
523 t[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
524 t[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
525 t[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
526 t[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
527 t[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
528 t[PFTM_GREv1_FIRST_PACKET] = PFTM_GREv1_FIRST_PACKET_VAL;
529 t[PFTM_GREv1_INITIATING] = PFTM_GREv1_INITIATING_VAL;
530 t[PFTM_GREv1_ESTABLISHED] = PFTM_GREv1_ESTABLISHED_VAL;
531 t[PFTM_ESP_FIRST_PACKET] = PFTM_ESP_FIRST_PACKET_VAL;
532 t[PFTM_ESP_INITIATING] = PFTM_ESP_INITIATING_VAL;
533 t[PFTM_ESP_ESTABLISHED] = PFTM_ESP_ESTABLISHED_VAL;
534 t[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
535 t[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
536 t[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
537 t[PFTM_FRAG] = PFTM_FRAG_VAL;
538 t[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
539 t[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
540 t[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
541 t[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
542 t[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
543
544 pf_normalize_init();
545 bzero(&pf_status, sizeof (pf_status));
546 pf_status.debug = PF_DEBUG_URGENT;
547 pf_hash_seed = random();
548
549 /* XXX do our best to avoid a conflict */
550 pf_status.hostid = random();
551
552 if (kernel_thread_start(pf_purge_thread_fn, NULL,
553 &pf_purge_thread) != 0) {
554 printf("%s: unable to start purge thread!", __func__);
555 return;
556 }
557
558 maj = cdevsw_add(PF_CDEV_MAJOR, &pf_cdevsw);
559 if (maj == -1) {
560 printf("%s: failed to allocate major number!\n", __func__);
561 return;
562 }
563 (void) devfs_make_node(makedev(maj, PFDEV_PF), DEVFS_CHAR,
564 UID_ROOT, GID_WHEEL, 0600, "pf", 0);
565
566 (void) devfs_make_node(makedev(maj, PFDEV_PFM), DEVFS_CHAR,
567 UID_ROOT, GID_WHEEL, 0600, "pfm", 0);
568
569 pf_attach_hooks();
570 }
571
572 #if 0
573 static void
574 pfdetach(void)
575 {
576 struct pf_anchor *anchor;
577 struct pf_state *state;
578 struct pf_src_node *node;
579 struct pfioc_table pt;
580 u_int32_t ticket;
581 int i;
582 char r = '\0';
583
584 pf_detach_hooks();
585
586 pf_status.running = 0;
587 wakeup(pf_purge_thread_fn);
588
589 /* clear the rulesets */
590 for (i = 0; i < PF_RULESET_MAX; i++)
591 if (pf_begin_rules(&ticket, i, &r) == 0)
592 pf_commit_rules(ticket, i, &r);
593 #if PF_ALTQ
594 if (pf_begin_altq(&ticket) == 0)
595 pf_commit_altq(ticket);
596 #endif /* PF_ALTQ */
597
598 /* clear states */
599 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
600 state->timeout = PFTM_PURGE;
601 #if NPFSYNC
602 state->sync_flags = PFSTATE_NOSYNC;
603 #endif
604 }
605 pf_purge_expired_states(pf_status.states);
606
607 #if NPFSYNC
608 pfsync_clear_states(pf_status.hostid, NULL);
609 #endif
610
611 /* clear source nodes */
612 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
613 state->src_node = NULL;
614 state->nat_src_node = NULL;
615 }
616 RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
617 node->expire = 1;
618 node->states = 0;
619 }
620 pf_purge_expired_src_nodes();
621
622 /* clear tables */
623 memset(&pt, '\0', sizeof (pt));
624 pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
625
626 /* destroy anchors */
627 while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
628 for (i = 0; i < PF_RULESET_MAX; i++)
629 if (pf_begin_rules(&ticket, i, anchor->name) == 0)
630 pf_commit_rules(ticket, i, anchor->name);
631 }
632
633 /* destroy main ruleset */
634 pf_remove_if_empty_ruleset(&pf_main_ruleset);
635
636 /* destroy the pools */
637 pool_destroy(&pf_pooladdr_pl);
638 #if PF_ALTQ
639 pool_destroy(&pf_altq_pl);
640 #endif /* PF_ALTQ */
641 pool_destroy(&pf_state_pl);
642 pool_destroy(&pf_rule_pl);
643 pool_destroy(&pf_src_tree_pl);
644
645 /* destroy subsystems */
646 pf_normalize_destroy();
647 pf_osfp_destroy();
648 pfr_destroy();
649 pfi_destroy();
650 }
651 #endif
652
653 static int
654 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
655 {
656 #pragma unused(flags, fmt, p)
657 if (minor(dev) >= PFDEV_MAX)
658 return (ENXIO);
659
660 if (minor(dev) == PFDEV_PFM) {
661 lck_mtx_lock(pf_lock);
662 if (pfdevcnt != 0) {
663 lck_mtx_unlock(pf_lock);
664 return (EBUSY);
665 }
666 pfdevcnt++;
667 lck_mtx_unlock(pf_lock);
668 }
669 return (0);
670 }
671
672 static int
673 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
674 {
675 #pragma unused(flags, fmt, p)
676 if (minor(dev) >= PFDEV_MAX)
677 return (ENXIO);
678
679 if (minor(dev) == PFDEV_PFM) {
680 lck_mtx_lock(pf_lock);
681 VERIFY(pfdevcnt > 0);
682 pfdevcnt--;
683 lck_mtx_unlock(pf_lock);
684 }
685 return (0);
686 }
687
688 static struct pf_pool *
689 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
690 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
691 u_int8_t check_ticket)
692 {
693 struct pf_ruleset *ruleset;
694 struct pf_rule *rule;
695 int rs_num;
696
697 ruleset = pf_find_ruleset(anchor);
698 if (ruleset == NULL)
699 return (NULL);
700 rs_num = pf_get_ruleset_number(rule_action);
701 if (rs_num >= PF_RULESET_MAX)
702 return (NULL);
703 if (active) {
704 if (check_ticket && ticket !=
705 ruleset->rules[rs_num].active.ticket)
706 return (NULL);
707 if (r_last)
708 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
709 pf_rulequeue);
710 else
711 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
712 } else {
713 if (check_ticket && ticket !=
714 ruleset->rules[rs_num].inactive.ticket)
715 return (NULL);
716 if (r_last)
717 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
718 pf_rulequeue);
719 else
720 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
721 }
722 if (!r_last) {
723 while ((rule != NULL) && (rule->nr != rule_number))
724 rule = TAILQ_NEXT(rule, entries);
725 }
726 if (rule == NULL)
727 return (NULL);
728
729 return (&rule->rpool);
730 }
731
732 static void
733 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
734 {
735 struct pf_pooladdr *mv_pool_pa;
736
737 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
738 TAILQ_REMOVE(poola, mv_pool_pa, entries);
739 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
740 }
741 }
742
743 static void
744 pf_empty_pool(struct pf_palist *poola)
745 {
746 struct pf_pooladdr *empty_pool_pa;
747
748 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
749 pfi_dynaddr_remove(&empty_pool_pa->addr);
750 pf_tbladdr_remove(&empty_pool_pa->addr);
751 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
752 TAILQ_REMOVE(poola, empty_pool_pa, entries);
753 pool_put(&pf_pooladdr_pl, empty_pool_pa);
754 }
755 }
756
757 void
758 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
759 {
760 if (rulequeue != NULL) {
761 if (rule->states <= 0) {
762 /*
763 * XXX - we need to remove the table *before* detaching
764 * the rule to make sure the table code does not delete
765 * the anchor under our feet.
766 */
767 pf_tbladdr_remove(&rule->src.addr);
768 pf_tbladdr_remove(&rule->dst.addr);
769 if (rule->overload_tbl)
770 pfr_detach_table(rule->overload_tbl);
771 }
772 TAILQ_REMOVE(rulequeue, rule, entries);
773 rule->entries.tqe_prev = NULL;
774 rule->nr = -1;
775 }
776
777 if (rule->states > 0 || rule->src_nodes > 0 ||
778 rule->entries.tqe_prev != NULL)
779 return;
780 pf_tag_unref(rule->tag);
781 pf_tag_unref(rule->match_tag);
782 #if PF_ALTQ
783 if (altq_allowed) {
784 if (rule->pqid != rule->qid)
785 pf_qid_unref(rule->pqid);
786 pf_qid_unref(rule->qid);
787 }
788 #endif /* PF_ALTQ */
789 pf_rtlabel_remove(&rule->src.addr);
790 pf_rtlabel_remove(&rule->dst.addr);
791 pfi_dynaddr_remove(&rule->src.addr);
792 pfi_dynaddr_remove(&rule->dst.addr);
793 if (rulequeue == NULL) {
794 pf_tbladdr_remove(&rule->src.addr);
795 pf_tbladdr_remove(&rule->dst.addr);
796 if (rule->overload_tbl)
797 pfr_detach_table(rule->overload_tbl);
798 }
799 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
800 pf_anchor_remove(rule);
801 pf_empty_pool(&rule->rpool.list);
802 pool_put(&pf_rule_pl, rule);
803 }
804
805 static u_int16_t
806 tagname2tag(struct pf_tags *head, char *tagname)
807 {
808 struct pf_tagname *tag, *p = NULL;
809 u_int16_t new_tagid = 1;
810
811 TAILQ_FOREACH(tag, head, entries)
812 if (strcmp(tagname, tag->name) == 0) {
813 tag->ref++;
814 return (tag->tag);
815 }
816
817 /*
818 * to avoid fragmentation, we do a linear search from the beginning
819 * and take the first free slot we find. if there is none or the list
820 * is empty, append a new entry at the end.
821 */
822
823 /* new entry */
824 if (!TAILQ_EMPTY(head))
825 for (p = TAILQ_FIRST(head); p != NULL &&
826 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
827 new_tagid = p->tag + 1;
828
829 if (new_tagid > TAGID_MAX)
830 return (0);
831
832 /* allocate and fill new struct pf_tagname */
833 tag = _MALLOC(sizeof (*tag), M_TEMP, M_WAITOK|M_ZERO);
834 if (tag == NULL)
835 return (0);
836 strlcpy(tag->name, tagname, sizeof (tag->name));
837 tag->tag = new_tagid;
838 tag->ref++;
839
840 if (p != NULL) /* insert new entry before p */
841 TAILQ_INSERT_BEFORE(p, tag, entries);
842 else /* either list empty or no free slot in between */
843 TAILQ_INSERT_TAIL(head, tag, entries);
844
845 return (tag->tag);
846 }
847
848 static void
849 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
850 {
851 struct pf_tagname *tag;
852
853 TAILQ_FOREACH(tag, head, entries)
854 if (tag->tag == tagid) {
855 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
856 return;
857 }
858 }
859
860 static void
861 tag_unref(struct pf_tags *head, u_int16_t tag)
862 {
863 struct pf_tagname *p, *next;
864
865 if (tag == 0)
866 return;
867
868 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
869 next = TAILQ_NEXT(p, entries);
870 if (tag == p->tag) {
871 if (--p->ref == 0) {
872 TAILQ_REMOVE(head, p, entries);
873 _FREE(p, M_TEMP);
874 }
875 break;
876 }
877 }
878 }
879
880 u_int16_t
881 pf_tagname2tag(char *tagname)
882 {
883 return (tagname2tag(&pf_tags, tagname));
884 }
885
886 void
887 pf_tag2tagname(u_int16_t tagid, char *p)
888 {
889 tag2tagname(&pf_tags, tagid, p);
890 }
891
892 void
893 pf_tag_ref(u_int16_t tag)
894 {
895 struct pf_tagname *t;
896
897 TAILQ_FOREACH(t, &pf_tags, entries)
898 if (t->tag == tag)
899 break;
900 if (t != NULL)
901 t->ref++;
902 }
903
904 void
905 pf_tag_unref(u_int16_t tag)
906 {
907 tag_unref(&pf_tags, tag);
908 }
909
910 static int
911 pf_rtlabel_add(struct pf_addr_wrap *a)
912 {
913 #pragma unused(a)
914 return (0);
915 }
916
917 static void
918 pf_rtlabel_remove(struct pf_addr_wrap *a)
919 {
920 #pragma unused(a)
921 }
922
923 static void
924 pf_rtlabel_copyout(struct pf_addr_wrap *a)
925 {
926 #pragma unused(a)
927 }
928
929 #if PF_ALTQ
930 u_int32_t
931 pf_qname2qid(char *qname)
932 {
933 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
934
935 return ((u_int32_t)tagname2tag(&pf_qids, qname));
936 }
937
938 void
939 pf_qid2qname(u_int32_t qid, char *p)
940 {
941 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
942
943 tag2tagname(&pf_qids, (u_int16_t)qid, p);
944 }
945
946 void
947 pf_qid_unref(u_int32_t qid)
948 {
949 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
950
951 tag_unref(&pf_qids, (u_int16_t)qid);
952 }
953
954 static int
955 pf_begin_altq(u_int32_t *ticket)
956 {
957 struct pf_altq *altq;
958 int error = 0;
959
960 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
961
962 /* Purge the old altq list */
963 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
964 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
965 if (altq->qname[0] == '\0') {
966 /* detach and destroy the discipline */
967 error = altq_remove(altq);
968 } else
969 pf_qid_unref(altq->qid);
970 pool_put(&pf_altq_pl, altq);
971 }
972 if (error)
973 return (error);
974 *ticket = ++ticket_altqs_inactive;
975 altqs_inactive_open = 1;
976 return (0);
977 }
978
979 static int
980 pf_rollback_altq(u_int32_t ticket)
981 {
982 struct pf_altq *altq;
983 int error = 0;
984
985 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
986
987 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
988 return (0);
989 /* Purge the old altq list */
990 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
991 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
992 if (altq->qname[0] == '\0') {
993 /* detach and destroy the discipline */
994 error = altq_remove(altq);
995 } else
996 pf_qid_unref(altq->qid);
997 pool_put(&pf_altq_pl, altq);
998 }
999 altqs_inactive_open = 0;
1000 return (error);
1001 }
1002
1003 static int
1004 pf_commit_altq(u_int32_t ticket)
1005 {
1006 struct pf_altqqueue *old_altqs;
1007 struct pf_altq *altq;
1008 int err, error = 0;
1009
1010 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1011
1012 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
1013 return (EBUSY);
1014
1015 /* swap altqs, keep the old. */
1016 old_altqs = pf_altqs_active;
1017 pf_altqs_active = pf_altqs_inactive;
1018 pf_altqs_inactive = old_altqs;
1019 ticket_altqs_active = ticket_altqs_inactive;
1020
1021 /* Attach new disciplines */
1022 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1023 if (altq->qname[0] == '\0') {
1024 /* attach the discipline */
1025 error = altq_pfattach(altq);
1026 if (error == 0 && pf_altq_running)
1027 error = pf_enable_altq(altq);
1028 if (error != 0) {
1029 return (error);
1030 }
1031 }
1032 }
1033
1034 /* Purge the old altq list */
1035 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
1036 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
1037 if (altq->qname[0] == '\0') {
1038 /* detach and destroy the discipline */
1039 if (pf_altq_running)
1040 error = pf_disable_altq(altq);
1041 err = altq_pfdetach(altq);
1042 if (err != 0 && error == 0)
1043 error = err;
1044 err = altq_remove(altq);
1045 if (err != 0 && error == 0)
1046 error = err;
1047 } else
1048 pf_qid_unref(altq->qid);
1049 pool_put(&pf_altq_pl, altq);
1050 }
1051
1052 altqs_inactive_open = 0;
1053 return (error);
1054 }
1055
1056 static int
1057 pf_enable_altq(struct pf_altq *altq)
1058 {
1059 struct ifnet *ifp;
1060 struct ifclassq *ifq;
1061 int error = 0;
1062
1063 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1064
1065 if ((ifp = ifunit(altq->ifname)) == NULL)
1066 return (EINVAL);
1067
1068 ifq = &ifp->if_snd;
1069 IFCQ_LOCK(ifq);
1070 if (IFCQ_ALTQ(ifq)->altq_type != ALTQT_NONE)
1071 error = altq_enable(IFCQ_ALTQ(ifq));
1072
1073 /* set or clear tokenbucket regulator */
1074 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(IFCQ_ALTQ(ifq))) {
1075 struct tb_profile tb = { 0, 0, 0 };
1076
1077 if (altq->aflags & PF_ALTQF_TBR) {
1078 if (altq->bwtype != PF_ALTQ_BW_ABSOLUTE &&
1079 altq->bwtype != PF_ALTQ_BW_PERCENT) {
1080 error = EINVAL;
1081 } else {
1082 if (altq->bwtype == PF_ALTQ_BW_ABSOLUTE)
1083 tb.rate = altq->ifbandwidth;
1084 else
1085 tb.percent = altq->ifbandwidth;
1086 tb.depth = altq->tbrsize;
1087 error = ifclassq_tbr_set(ifq, &tb, TRUE);
1088 }
1089 } else if (IFCQ_TBR_IS_ENABLED(ifq)) {
1090 error = ifclassq_tbr_set(ifq, &tb, TRUE);
1091 }
1092 }
1093 IFCQ_UNLOCK(ifq);
1094
1095 return (error);
1096 }
1097
1098 static int
1099 pf_disable_altq(struct pf_altq *altq)
1100 {
1101 struct ifnet *ifp;
1102 struct ifclassq *ifq;
1103 int error;
1104
1105 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1106
1107 if ((ifp = ifunit(altq->ifname)) == NULL)
1108 return (EINVAL);
1109
1110 /*
1111 * when the discipline is no longer referenced, it was overridden
1112 * by a new one. if so, just return.
1113 */
1114 ifq = &ifp->if_snd;
1115 IFCQ_LOCK(ifq);
1116 if (altq->altq_disc != IFCQ_ALTQ(ifq)->altq_disc) {
1117 IFCQ_UNLOCK(ifq);
1118 return (0);
1119 }
1120
1121 error = altq_disable(IFCQ_ALTQ(ifq));
1122
1123 if (error == 0 && IFCQ_TBR_IS_ENABLED(ifq)) {
1124 /* clear tokenbucket regulator */
1125 struct tb_profile tb = { 0, 0, 0 };
1126 error = ifclassq_tbr_set(ifq, &tb, TRUE);
1127 }
1128 IFCQ_UNLOCK(ifq);
1129
1130 return (error);
1131 }
1132
1133 static void
1134 pf_altq_copyin(struct pf_altq *src, struct pf_altq *dst)
1135 {
1136 bcopy(src, dst, sizeof (struct pf_altq));
1137
1138 dst->ifname[sizeof (dst->ifname) - 1] = '\0';
1139 dst->qname[sizeof (dst->qname) - 1] = '\0';
1140 dst->parent[sizeof (dst->parent) - 1] = '\0';
1141 dst->altq_disc = NULL;
1142 dst->entries.tqe_next = NULL;
1143 dst->entries.tqe_prev = NULL;
1144 }
1145
1146 static void
1147 pf_altq_copyout(struct pf_altq *src, struct pf_altq *dst)
1148 {
1149 struct pf_altq pa;
1150
1151 bcopy(src, &pa, sizeof (struct pf_altq));
1152 pa.altq_disc = NULL;
1153 pa.entries.tqe_next = NULL;
1154 pa.entries.tqe_prev = NULL;
1155 bcopy(&pa, dst, sizeof (struct pf_altq));
1156 }
1157 #endif /* PF_ALTQ */
1158
1159 static int
1160 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1161 {
1162 struct pf_ruleset *rs;
1163 struct pf_rule *rule;
1164
1165 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1166 return (EINVAL);
1167 rs = pf_find_or_create_ruleset(anchor);
1168 if (rs == NULL)
1169 return (EINVAL);
1170 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1171 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1172 rs->rules[rs_num].inactive.rcount--;
1173 }
1174 *ticket = ++rs->rules[rs_num].inactive.ticket;
1175 rs->rules[rs_num].inactive.open = 1;
1176 return (0);
1177 }
1178
1179 static int
1180 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1181 {
1182 struct pf_ruleset *rs;
1183 struct pf_rule *rule;
1184
1185 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1186 return (EINVAL);
1187 rs = pf_find_ruleset(anchor);
1188 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1189 rs->rules[rs_num].inactive.ticket != ticket)
1190 return (0);
1191 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1192 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1193 rs->rules[rs_num].inactive.rcount--;
1194 }
1195 rs->rules[rs_num].inactive.open = 0;
1196 return (0);
1197 }
1198
1199 #define PF_MD5_UPD(st, elm) \
1200 MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm))
1201
1202 #define PF_MD5_UPD_STR(st, elm) \
1203 MD5Update(ctx, (u_int8_t *)(st)->elm, strlen((st)->elm))
1204
1205 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
1206 (stor) = htonl((st)->elm); \
1207 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t)); \
1208 } while (0)
1209
1210 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
1211 (stor) = htons((st)->elm); \
1212 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t)); \
1213 } while (0)
1214
1215 static void
1216 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr, u_int8_t proto)
1217 {
1218 PF_MD5_UPD(pfr, addr.type);
1219 switch (pfr->addr.type) {
1220 case PF_ADDR_DYNIFTL:
1221 PF_MD5_UPD(pfr, addr.v.ifname);
1222 PF_MD5_UPD(pfr, addr.iflags);
1223 break;
1224 case PF_ADDR_TABLE:
1225 PF_MD5_UPD(pfr, addr.v.tblname);
1226 break;
1227 case PF_ADDR_ADDRMASK:
1228 /* XXX ignore af? */
1229 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1230 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1231 break;
1232 case PF_ADDR_RTLABEL:
1233 PF_MD5_UPD(pfr, addr.v.rtlabelname);
1234 break;
1235 }
1236
1237 switch (proto) {
1238 case IPPROTO_TCP:
1239 case IPPROTO_UDP:
1240 PF_MD5_UPD(pfr, xport.range.port[0]);
1241 PF_MD5_UPD(pfr, xport.range.port[1]);
1242 PF_MD5_UPD(pfr, xport.range.op);
1243 break;
1244
1245 default:
1246 break;
1247 }
1248
1249 PF_MD5_UPD(pfr, neg);
1250 }
1251
1252 static void
1253 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
1254 {
1255 u_int16_t x;
1256 u_int32_t y;
1257
1258 pf_hash_rule_addr(ctx, &rule->src, rule->proto);
1259 pf_hash_rule_addr(ctx, &rule->dst, rule->proto);
1260 PF_MD5_UPD_STR(rule, label);
1261 PF_MD5_UPD_STR(rule, ifname);
1262 PF_MD5_UPD_STR(rule, match_tagname);
1263 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1264 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1265 PF_MD5_UPD_HTONL(rule, prob, y);
1266 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1267 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1268 PF_MD5_UPD(rule, uid.op);
1269 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1270 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1271 PF_MD5_UPD(rule, gid.op);
1272 PF_MD5_UPD_HTONL(rule, rule_flag, y);
1273 PF_MD5_UPD(rule, action);
1274 PF_MD5_UPD(rule, direction);
1275 PF_MD5_UPD(rule, af);
1276 PF_MD5_UPD(rule, quick);
1277 PF_MD5_UPD(rule, ifnot);
1278 PF_MD5_UPD(rule, match_tag_not);
1279 PF_MD5_UPD(rule, natpass);
1280 PF_MD5_UPD(rule, keep_state);
1281 PF_MD5_UPD(rule, proto);
1282 PF_MD5_UPD(rule, type);
1283 PF_MD5_UPD(rule, code);
1284 PF_MD5_UPD(rule, flags);
1285 PF_MD5_UPD(rule, flagset);
1286 PF_MD5_UPD(rule, allow_opts);
1287 PF_MD5_UPD(rule, rt);
1288 PF_MD5_UPD(rule, tos);
1289 }
1290
1291 static int
1292 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1293 {
1294 struct pf_ruleset *rs;
1295 struct pf_rule *rule, **old_array;
1296 struct pf_rulequeue *old_rules;
1297 int error;
1298 u_int32_t old_rcount;
1299
1300 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1301
1302 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1303 return (EINVAL);
1304 rs = pf_find_ruleset(anchor);
1305 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1306 ticket != rs->rules[rs_num].inactive.ticket)
1307 return (EBUSY);
1308
1309 /* Calculate checksum for the main ruleset */
1310 if (rs == &pf_main_ruleset) {
1311 error = pf_setup_pfsync_matching(rs);
1312 if (error != 0)
1313 return (error);
1314 }
1315
1316 /* Swap rules, keep the old. */
1317 old_rules = rs->rules[rs_num].active.ptr;
1318 old_rcount = rs->rules[rs_num].active.rcount;
1319 old_array = rs->rules[rs_num].active.ptr_array;
1320
1321 rs->rules[rs_num].active.ptr =
1322 rs->rules[rs_num].inactive.ptr;
1323 rs->rules[rs_num].active.ptr_array =
1324 rs->rules[rs_num].inactive.ptr_array;
1325 rs->rules[rs_num].active.rcount =
1326 rs->rules[rs_num].inactive.rcount;
1327 rs->rules[rs_num].inactive.ptr = old_rules;
1328 rs->rules[rs_num].inactive.ptr_array = old_array;
1329 rs->rules[rs_num].inactive.rcount = old_rcount;
1330
1331 rs->rules[rs_num].active.ticket =
1332 rs->rules[rs_num].inactive.ticket;
1333 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1334
1335
1336 /* Purge the old rule list. */
1337 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1338 pf_rm_rule(old_rules, rule);
1339 if (rs->rules[rs_num].inactive.ptr_array)
1340 _FREE(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1341 rs->rules[rs_num].inactive.ptr_array = NULL;
1342 rs->rules[rs_num].inactive.rcount = 0;
1343 rs->rules[rs_num].inactive.open = 0;
1344 pf_remove_if_empty_ruleset(rs);
1345 return (0);
1346 }
1347
1348 static void
1349 pf_rule_copyin(struct pf_rule *src, struct pf_rule *dst, struct proc *p,
1350 int minordev)
1351 {
1352 bcopy(src, dst, sizeof (struct pf_rule));
1353
1354 dst->label[sizeof (dst->label) - 1] = '\0';
1355 dst->ifname[sizeof (dst->ifname) - 1] = '\0';
1356 dst->qname[sizeof (dst->qname) - 1] = '\0';
1357 dst->pqname[sizeof (dst->pqname) - 1] = '\0';
1358 dst->tagname[sizeof (dst->tagname) - 1] = '\0';
1359 dst->match_tagname[sizeof (dst->match_tagname) - 1] = '\0';
1360 dst->overload_tblname[sizeof (dst->overload_tblname) - 1] = '\0';
1361
1362 dst->cuid = kauth_cred_getuid(p->p_ucred);
1363 dst->cpid = p->p_pid;
1364
1365 dst->anchor = NULL;
1366 dst->kif = NULL;
1367 dst->overload_tbl = NULL;
1368
1369 TAILQ_INIT(&dst->rpool.list);
1370 dst->rpool.cur = NULL;
1371
1372 /* initialize refcounting */
1373 dst->states = 0;
1374 dst->src_nodes = 0;
1375
1376 dst->entries.tqe_prev = NULL;
1377 dst->entries.tqe_next = NULL;
1378 if ((uint8_t)minordev == PFDEV_PFM)
1379 dst->rule_flag |= PFRULE_PFM;
1380 }
1381
1382 static void
1383 pf_rule_copyout(struct pf_rule *src, struct pf_rule *dst)
1384 {
1385 bcopy(src, dst, sizeof (struct pf_rule));
1386
1387 dst->anchor = NULL;
1388 dst->kif = NULL;
1389 dst->overload_tbl = NULL;
1390
1391 TAILQ_INIT(&dst->rpool.list);
1392 dst->rpool.cur = NULL;
1393
1394 dst->entries.tqe_prev = NULL;
1395 dst->entries.tqe_next = NULL;
1396 }
1397
1398 static void
1399 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk,
1400 struct pf_state *s)
1401 {
1402 uint64_t secs = pf_time_second();
1403 bzero(sp, sizeof (struct pfsync_state));
1404
1405 /* copy from state key */
1406 sp->lan.addr = sk->lan.addr;
1407 sp->lan.xport = sk->lan.xport;
1408 sp->gwy.addr = sk->gwy.addr;
1409 sp->gwy.xport = sk->gwy.xport;
1410 sp->ext.addr = sk->ext.addr;
1411 sp->ext.xport = sk->ext.xport;
1412 sp->proto_variant = sk->proto_variant;
1413 sp->tag = s->tag;
1414 sp->proto = sk->proto;
1415 sp->af = sk->af;
1416 sp->direction = sk->direction;
1417 sp->flowhash = sk->flowhash;
1418
1419 /* copy from state */
1420 memcpy(&sp->id, &s->id, sizeof (sp->id));
1421 sp->creatorid = s->creatorid;
1422 strlcpy(sp->ifname, s->kif->pfik_name, sizeof (sp->ifname));
1423 pf_state_peer_to_pfsync(&s->src, &sp->src);
1424 pf_state_peer_to_pfsync(&s->dst, &sp->dst);
1425
1426 sp->rule = s->rule.ptr->nr;
1427 sp->nat_rule = (s->nat_rule.ptr == NULL) ?
1428 (unsigned)-1 : s->nat_rule.ptr->nr;
1429 sp->anchor = (s->anchor.ptr == NULL) ?
1430 (unsigned)-1 : s->anchor.ptr->nr;
1431
1432 pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]);
1433 pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]);
1434 pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]);
1435 pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]);
1436 sp->creation = secs - s->creation;
1437 sp->expire = pf_state_expires(s);
1438 sp->log = s->log;
1439 sp->allow_opts = s->allow_opts;
1440 sp->timeout = s->timeout;
1441
1442 if (s->src_node)
1443 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
1444 if (s->nat_src_node)
1445 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
1446
1447 if (sp->expire > secs)
1448 sp->expire -= secs;
1449 else
1450 sp->expire = 0;
1451
1452 }
1453
1454 static void
1455 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk,
1456 struct pf_state *s)
1457 {
1458 /* copy to state key */
1459 sk->lan.addr = sp->lan.addr;
1460 sk->lan.xport = sp->lan.xport;
1461 sk->gwy.addr = sp->gwy.addr;
1462 sk->gwy.xport = sp->gwy.xport;
1463 sk->ext.addr = sp->ext.addr;
1464 sk->ext.xport = sp->ext.xport;
1465 sk->proto_variant = sp->proto_variant;
1466 s->tag = sp->tag;
1467 sk->proto = sp->proto;
1468 sk->af = sp->af;
1469 sk->direction = sp->direction;
1470 sk->flowhash = pf_calc_state_key_flowhash(sk);
1471
1472 /* copy to state */
1473 memcpy(&s->id, &sp->id, sizeof (sp->id));
1474 s->creatorid = sp->creatorid;
1475 pf_state_peer_from_pfsync(&sp->src, &s->src);
1476 pf_state_peer_from_pfsync(&sp->dst, &s->dst);
1477
1478 s->rule.ptr = &pf_default_rule;
1479 s->nat_rule.ptr = NULL;
1480 s->anchor.ptr = NULL;
1481 s->rt_kif = NULL;
1482 s->creation = pf_time_second();
1483 s->expire = pf_time_second();
1484 if (sp->expire > 0)
1485 s->expire -= pf_default_rule.timeout[sp->timeout] - sp->expire;
1486 s->pfsync_time = 0;
1487 s->packets[0] = s->packets[1] = 0;
1488 s->bytes[0] = s->bytes[1] = 0;
1489 }
1490
1491 static void
1492 pf_pooladdr_copyin(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1493 {
1494 bcopy(src, dst, sizeof (struct pf_pooladdr));
1495
1496 dst->entries.tqe_prev = NULL;
1497 dst->entries.tqe_next = NULL;
1498 dst->ifname[sizeof (dst->ifname) - 1] = '\0';
1499 dst->kif = NULL;
1500 }
1501
1502 static void
1503 pf_pooladdr_copyout(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1504 {
1505 bcopy(src, dst, sizeof (struct pf_pooladdr));
1506
1507 dst->entries.tqe_prev = NULL;
1508 dst->entries.tqe_next = NULL;
1509 dst->kif = NULL;
1510 }
1511
1512 static int
1513 pf_setup_pfsync_matching(struct pf_ruleset *rs)
1514 {
1515 MD5_CTX ctx;
1516 struct pf_rule *rule;
1517 int rs_cnt;
1518 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
1519
1520 MD5Init(&ctx);
1521 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1522 /* XXX PF_RULESET_SCRUB as well? */
1523 if (rs_cnt == PF_RULESET_SCRUB)
1524 continue;
1525
1526 if (rs->rules[rs_cnt].inactive.ptr_array)
1527 _FREE(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1528 rs->rules[rs_cnt].inactive.ptr_array = NULL;
1529
1530 if (rs->rules[rs_cnt].inactive.rcount) {
1531 rs->rules[rs_cnt].inactive.ptr_array =
1532 _MALLOC(sizeof (caddr_t) *
1533 rs->rules[rs_cnt].inactive.rcount,
1534 M_TEMP, M_WAITOK);
1535
1536 if (!rs->rules[rs_cnt].inactive.ptr_array)
1537 return (ENOMEM);
1538 }
1539
1540 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1541 entries) {
1542 pf_hash_rule(&ctx, rule);
1543 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1544 }
1545 }
1546
1547 MD5Final(digest, &ctx);
1548 memcpy(pf_status.pf_chksum, digest, sizeof (pf_status.pf_chksum));
1549 return (0);
1550 }
1551
1552 static void
1553 pf_start(void)
1554 {
1555 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1556
1557 VERIFY(pf_is_enabled == 0);
1558
1559 pf_is_enabled = 1;
1560 pf_status.running = 1;
1561 pf_status.since = pf_calendar_time_second();
1562 if (pf_status.stateid == 0) {
1563 pf_status.stateid = pf_time_second();
1564 pf_status.stateid = pf_status.stateid << 32;
1565 }
1566 wakeup(pf_purge_thread_fn);
1567 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1568 }
1569
1570 static void
1571 pf_stop(void)
1572 {
1573 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1574
1575 VERIFY(pf_is_enabled);
1576
1577 pf_status.running = 0;
1578 pf_is_enabled = 0;
1579 pf_status.since = pf_calendar_time_second();
1580 wakeup(pf_purge_thread_fn);
1581 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1582 }
1583
1584 static int
1585 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1586 {
1587 #pragma unused(dev)
1588 int p64 = proc_is64bit(p);
1589 int error = 0;
1590 int minordev = minor(dev);
1591
1592 if (kauth_cred_issuser(kauth_cred_get()) == 0)
1593 return (EPERM);
1594
1595 /* XXX keep in sync with switch() below */
1596 if (securelevel > 1)
1597 switch (cmd) {
1598 case DIOCGETRULES:
1599 case DIOCGETRULE:
1600 case DIOCGETADDRS:
1601 case DIOCGETADDR:
1602 case DIOCGETSTATE:
1603 case DIOCSETSTATUSIF:
1604 case DIOCGETSTATUS:
1605 case DIOCCLRSTATUS:
1606 case DIOCNATLOOK:
1607 case DIOCSETDEBUG:
1608 case DIOCGETSTATES:
1609 case DIOCINSERTRULE:
1610 case DIOCDELETERULE:
1611 case DIOCGETTIMEOUT:
1612 case DIOCCLRRULECTRS:
1613 case DIOCGETLIMIT:
1614 case DIOCGETALTQS:
1615 case DIOCGETALTQ:
1616 case DIOCGETQSTATS:
1617 case DIOCGETRULESETS:
1618 case DIOCGETRULESET:
1619 case DIOCRGETTABLES:
1620 case DIOCRGETTSTATS:
1621 case DIOCRCLRTSTATS:
1622 case DIOCRCLRADDRS:
1623 case DIOCRADDADDRS:
1624 case DIOCRDELADDRS:
1625 case DIOCRSETADDRS:
1626 case DIOCRGETADDRS:
1627 case DIOCRGETASTATS:
1628 case DIOCRCLRASTATS:
1629 case DIOCRTSTADDRS:
1630 case DIOCOSFPGET:
1631 case DIOCGETSRCNODES:
1632 case DIOCCLRSRCNODES:
1633 case DIOCIGETIFACES:
1634 case DIOCGIFSPEED:
1635 case DIOCSETIFFLAG:
1636 case DIOCCLRIFFLAG:
1637 break;
1638 case DIOCRCLRTABLES:
1639 case DIOCRADDTABLES:
1640 case DIOCRDELTABLES:
1641 case DIOCRSETTFLAGS: {
1642 int pfrio_flags;
1643
1644 bcopy(&((struct pfioc_table *)(void *)addr)->
1645 pfrio_flags, &pfrio_flags, sizeof (pfrio_flags));
1646
1647 if (pfrio_flags & PFR_FLAG_DUMMY)
1648 break; /* dummy operation ok */
1649 return (EPERM);
1650 }
1651 default:
1652 return (EPERM);
1653 }
1654
1655 if (!(flags & FWRITE))
1656 switch (cmd) {
1657 case DIOCSTART:
1658 case DIOCSTARTREF:
1659 case DIOCSTOP:
1660 case DIOCSTOPREF:
1661 case DIOCGETSTARTERS:
1662 case DIOCGETRULES:
1663 case DIOCGETADDRS:
1664 case DIOCGETADDR:
1665 case DIOCGETSTATE:
1666 case DIOCGETSTATUS:
1667 case DIOCGETSTATES:
1668 case DIOCINSERTRULE:
1669 case DIOCDELETERULE:
1670 case DIOCGETTIMEOUT:
1671 case DIOCGETLIMIT:
1672 case DIOCGETALTQS:
1673 case DIOCGETALTQ:
1674 case DIOCGETQSTATS:
1675 case DIOCGETRULESETS:
1676 case DIOCGETRULESET:
1677 case DIOCNATLOOK:
1678 case DIOCRGETTABLES:
1679 case DIOCRGETTSTATS:
1680 case DIOCRGETADDRS:
1681 case DIOCRGETASTATS:
1682 case DIOCRTSTADDRS:
1683 case DIOCOSFPGET:
1684 case DIOCGETSRCNODES:
1685 case DIOCIGETIFACES:
1686 case DIOCGIFSPEED:
1687 break;
1688 case DIOCRCLRTABLES:
1689 case DIOCRADDTABLES:
1690 case DIOCRDELTABLES:
1691 case DIOCRCLRTSTATS:
1692 case DIOCRCLRADDRS:
1693 case DIOCRADDADDRS:
1694 case DIOCRDELADDRS:
1695 case DIOCRSETADDRS:
1696 case DIOCRSETTFLAGS: {
1697 int pfrio_flags;
1698
1699 bcopy(&((struct pfioc_table *)(void *)addr)->
1700 pfrio_flags, &pfrio_flags, sizeof (pfrio_flags));
1701
1702 if (pfrio_flags & PFR_FLAG_DUMMY) {
1703 flags |= FWRITE; /* need write lock for dummy */
1704 break; /* dummy operation ok */
1705 }
1706 return (EACCES);
1707 }
1708 case DIOCGETRULE: {
1709 u_int32_t action;
1710
1711 bcopy(&((struct pfioc_rule *)(void *)addr)->action,
1712 &action, sizeof (action));
1713
1714 if (action == PF_GET_CLR_CNTR)
1715 return (EACCES);
1716 break;
1717 }
1718 default:
1719 return (EACCES);
1720 }
1721
1722 #if PF_ALTQ
1723 switch (cmd) {
1724 case DIOCSTARTALTQ:
1725 case DIOCSTOPALTQ:
1726 case DIOCADDALTQ:
1727 case DIOCGETALTQS:
1728 case DIOCGETALTQ:
1729 case DIOCCHANGEALTQ:
1730 case DIOCGETQSTATS:
1731 /* fail if ALTQ is disabled */
1732 if (!altq_allowed)
1733 return (ENODEV);
1734 break;
1735 }
1736 #endif /* PF_ALTQ */
1737
1738 if (flags & FWRITE)
1739 lck_rw_lock_exclusive(pf_perim_lock);
1740 else
1741 lck_rw_lock_shared(pf_perim_lock);
1742
1743 lck_mtx_lock(pf_lock);
1744
1745 switch (cmd) {
1746
1747 case DIOCSTART:
1748 if (pf_status.running) {
1749 /*
1750 * Increment the reference for a simple -e enable, so
1751 * that even if other processes drop their references,
1752 * pf will still be available to processes that turned
1753 * it on without taking a reference
1754 */
1755 if (nr_tokens == pf_enabled_ref_count) {
1756 pf_enabled_ref_count++;
1757 VERIFY(pf_enabled_ref_count != 0);
1758 }
1759 error = EEXIST;
1760 } else if (pf_purge_thread == NULL) {
1761 error = ENOMEM;
1762 } else {
1763 pf_start();
1764 pf_enabled_ref_count++;
1765 VERIFY(pf_enabled_ref_count != 0);
1766 }
1767 break;
1768
1769 case DIOCSTARTREF: /* u_int64_t */
1770 if (pf_purge_thread == NULL) {
1771 error = ENOMEM;
1772 } else {
1773 u_int64_t token;
1774
1775 /* small enough to be on stack */
1776 if ((token = generate_token(p)) != 0) {
1777 if (pf_is_enabled == 0) {
1778 pf_start();
1779 }
1780 pf_enabled_ref_count++;
1781 VERIFY(pf_enabled_ref_count != 0);
1782 } else {
1783 error = ENOMEM;
1784 DPFPRINTF(PF_DEBUG_URGENT,
1785 ("pf: unable to generate token\n"));
1786 }
1787 bcopy(&token, addr, sizeof (token));
1788 }
1789 break;
1790
1791 case DIOCSTOP:
1792 if (!pf_status.running) {
1793 error = ENOENT;
1794 } else {
1795 pf_stop();
1796 pf_enabled_ref_count = 0;
1797 invalidate_all_tokens();
1798 }
1799 break;
1800
1801 case DIOCSTOPREF: /* struct pfioc_remove_token */
1802 if (!pf_status.running) {
1803 error = ENOENT;
1804 } else {
1805 struct pfioc_remove_token pfrt;
1806
1807 /* small enough to be on stack */
1808 bcopy(addr, &pfrt, sizeof (pfrt));
1809 if ((error = remove_token(&pfrt)) == 0) {
1810 VERIFY(pf_enabled_ref_count != 0);
1811 pf_enabled_ref_count--;
1812 /* return currently held references */
1813 pfrt.refcount = pf_enabled_ref_count;
1814 DPFPRINTF(PF_DEBUG_MISC,
1815 ("pf: enabled refcount decremented\n"));
1816 } else {
1817 error = EINVAL;
1818 DPFPRINTF(PF_DEBUG_URGENT,
1819 ("pf: token mismatch\n"));
1820 }
1821 bcopy(&pfrt, addr, sizeof (pfrt));
1822
1823 if (error == 0 && pf_enabled_ref_count == 0)
1824 pf_stop();
1825 }
1826 break;
1827
1828 case DIOCGETSTARTERS: { /* struct pfioc_tokens */
1829 PFIOCX_STRUCT_DECL(pfioc_tokens);
1830
1831 PFIOCX_STRUCT_BEGIN(addr, pfioc_tokens, error = ENOMEM; break;);
1832 error = pfioctl_ioc_tokens(cmd,
1833 PFIOCX_STRUCT_ADDR32(pfioc_tokens),
1834 PFIOCX_STRUCT_ADDR64(pfioc_tokens), p);
1835 PFIOCX_STRUCT_END(pfioc_tokens, addr);
1836 break;
1837 }
1838
1839 case DIOCADDRULE: /* struct pfioc_rule */
1840 case DIOCGETRULES: /* struct pfioc_rule */
1841 case DIOCGETRULE: /* struct pfioc_rule */
1842 case DIOCCHANGERULE: /* struct pfioc_rule */
1843 case DIOCINSERTRULE: /* struct pfioc_rule */
1844 case DIOCDELETERULE: { /* struct pfioc_rule */
1845 struct pfioc_rule *pr = NULL;
1846
1847 PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break;);
1848 error = pfioctl_ioc_rule(cmd, minordev, pr, p);
1849 PFIOC_STRUCT_END(pr, addr);
1850 break;
1851 }
1852
1853 case DIOCCLRSTATES: /* struct pfioc_state_kill */
1854 case DIOCKILLSTATES: { /* struct pfioc_state_kill */
1855 struct pfioc_state_kill *psk = NULL;
1856
1857 PFIOC_STRUCT_BEGIN(addr, psk, error = ENOMEM; break;);
1858 error = pfioctl_ioc_state_kill(cmd, psk, p);
1859 PFIOC_STRUCT_END(psk, addr);
1860 break;
1861 }
1862
1863 case DIOCADDSTATE: /* struct pfioc_state */
1864 case DIOCGETSTATE: { /* struct pfioc_state */
1865 struct pfioc_state *ps = NULL;
1866
1867 PFIOC_STRUCT_BEGIN(addr, ps, error = ENOMEM; break;);
1868 error = pfioctl_ioc_state(cmd, ps, p);
1869 PFIOC_STRUCT_END(ps, addr);
1870 break;
1871 }
1872
1873 case DIOCGETSTATES: { /* struct pfioc_states */
1874 PFIOCX_STRUCT_DECL(pfioc_states);
1875
1876 PFIOCX_STRUCT_BEGIN(addr, pfioc_states, error = ENOMEM; break;);
1877 error = pfioctl_ioc_states(cmd,
1878 PFIOCX_STRUCT_ADDR32(pfioc_states),
1879 PFIOCX_STRUCT_ADDR64(pfioc_states), p);
1880 PFIOCX_STRUCT_END(pfioc_states, addr);
1881 break;
1882 }
1883
1884 case DIOCGETSTATUS: { /* struct pf_status */
1885 struct pf_status *s = NULL;
1886
1887 PFIOC_STRUCT_BEGIN(&pf_status, s, error = ENOMEM; break;);
1888 pfi_update_status(s->ifname, s);
1889 PFIOC_STRUCT_END(s, addr);
1890 break;
1891 }
1892
1893 case DIOCSETSTATUSIF: { /* struct pfioc_if */
1894 struct pfioc_if *pi = (struct pfioc_if *)(void *)addr;
1895
1896 /* OK for unaligned accesses */
1897 if (pi->ifname[0] == 0) {
1898 bzero(pf_status.ifname, IFNAMSIZ);
1899 break;
1900 }
1901 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1902 break;
1903 }
1904
1905 case DIOCCLRSTATUS: {
1906 bzero(pf_status.counters, sizeof (pf_status.counters));
1907 bzero(pf_status.fcounters, sizeof (pf_status.fcounters));
1908 bzero(pf_status.scounters, sizeof (pf_status.scounters));
1909 pf_status.since = pf_calendar_time_second();
1910 if (*pf_status.ifname)
1911 pfi_update_status(pf_status.ifname, NULL);
1912 break;
1913 }
1914
1915 case DIOCNATLOOK: { /* struct pfioc_natlook */
1916 struct pfioc_natlook *pnl = NULL;
1917
1918 PFIOC_STRUCT_BEGIN(addr, pnl, error = ENOMEM; break;);
1919 error = pfioctl_ioc_natlook(cmd, pnl, p);
1920 PFIOC_STRUCT_END(pnl, addr);
1921 break;
1922 }
1923
1924 case DIOCSETTIMEOUT: /* struct pfioc_tm */
1925 case DIOCGETTIMEOUT: { /* struct pfioc_tm */
1926 struct pfioc_tm pt;
1927
1928 /* small enough to be on stack */
1929 bcopy(addr, &pt, sizeof (pt));
1930 error = pfioctl_ioc_tm(cmd, &pt, p);
1931 bcopy(&pt, addr, sizeof (pt));
1932 break;
1933 }
1934
1935 case DIOCGETLIMIT: /* struct pfioc_limit */
1936 case DIOCSETLIMIT: { /* struct pfioc_limit */
1937 struct pfioc_limit pl;
1938
1939 /* small enough to be on stack */
1940 bcopy(addr, &pl, sizeof (pl));
1941 error = pfioctl_ioc_limit(cmd, &pl, p);
1942 bcopy(&pl, addr, sizeof (pl));
1943 break;
1944 }
1945
1946 case DIOCSETDEBUG: { /* u_int32_t */
1947 bcopy(addr, &pf_status.debug, sizeof (u_int32_t));
1948 break;
1949 }
1950
1951 case DIOCCLRRULECTRS: {
1952 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1953 struct pf_ruleset *ruleset = &pf_main_ruleset;
1954 struct pf_rule *rule;
1955
1956 TAILQ_FOREACH(rule,
1957 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1958 rule->evaluations = 0;
1959 rule->packets[0] = rule->packets[1] = 0;
1960 rule->bytes[0] = rule->bytes[1] = 0;
1961 }
1962 break;
1963 }
1964
1965 case DIOCGIFSPEED: {
1966 struct pf_ifspeed *psp = (struct pf_ifspeed *)(void *)addr;
1967 struct pf_ifspeed ps;
1968 struct ifnet *ifp;
1969 u_int64_t baudrate;
1970
1971 if (psp->ifname[0] != '\0') {
1972 /* Can we completely trust user-land? */
1973 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
1974 ps.ifname[IFNAMSIZ - 1] = '\0';
1975 ifp = ifunit(ps.ifname);
1976 if (ifp != NULL) {
1977 baudrate = ifp->if_output_bw.max_bw;
1978 bcopy(&baudrate, &psp->baudrate,
1979 sizeof (baudrate));
1980 } else {
1981 error = EINVAL;
1982 }
1983 } else {
1984 error = EINVAL;
1985 }
1986 break;
1987 }
1988
1989 #if PF_ALTQ
1990 case DIOCSTARTALTQ: {
1991 struct pf_altq *altq;
1992
1993 VERIFY(altq_allowed);
1994 /* enable all altq interfaces on active list */
1995 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1996 if (altq->qname[0] == '\0') {
1997 error = pf_enable_altq(altq);
1998 if (error != 0)
1999 break;
2000 }
2001 }
2002 if (error == 0)
2003 pf_altq_running = 1;
2004 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2005 break;
2006 }
2007
2008 case DIOCSTOPALTQ: {
2009 struct pf_altq *altq;
2010
2011 VERIFY(altq_allowed);
2012 /* disable all altq interfaces on active list */
2013 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2014 if (altq->qname[0] == '\0') {
2015 error = pf_disable_altq(altq);
2016 if (error != 0)
2017 break;
2018 }
2019 }
2020 if (error == 0)
2021 pf_altq_running = 0;
2022 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2023 break;
2024 }
2025
2026 case DIOCADDALTQ: { /* struct pfioc_altq */
2027 struct pfioc_altq *pa = (struct pfioc_altq *)(void *)addr;
2028 struct pf_altq *altq, *a;
2029 u_int32_t ticket;
2030
2031 VERIFY(altq_allowed);
2032 bcopy(&pa->ticket, &ticket, sizeof (ticket));
2033 if (ticket != ticket_altqs_inactive) {
2034 error = EBUSY;
2035 break;
2036 }
2037 altq = pool_get(&pf_altq_pl, PR_WAITOK);
2038 if (altq == NULL) {
2039 error = ENOMEM;
2040 break;
2041 }
2042 pf_altq_copyin(&pa->altq, altq);
2043
2044 /*
2045 * if this is for a queue, find the discipline and
2046 * copy the necessary fields
2047 */
2048 if (altq->qname[0] != '\0') {
2049 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2050 error = EBUSY;
2051 pool_put(&pf_altq_pl, altq);
2052 break;
2053 }
2054 altq->altq_disc = NULL;
2055 TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
2056 if (strncmp(a->ifname, altq->ifname,
2057 IFNAMSIZ) == 0 && a->qname[0] == '\0') {
2058 altq->altq_disc = a->altq_disc;
2059 break;
2060 }
2061 }
2062 }
2063
2064 error = altq_add(altq);
2065 if (error) {
2066 pool_put(&pf_altq_pl, altq);
2067 break;
2068 }
2069
2070 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
2071 pf_altq_copyout(altq, &pa->altq);
2072 break;
2073 }
2074
2075 case DIOCGETALTQS: {
2076 struct pfioc_altq *pa = (struct pfioc_altq *)(void *)addr;
2077 struct pf_altq *altq;
2078 u_int32_t nr;
2079
2080 VERIFY(altq_allowed);
2081 nr = 0;
2082 TAILQ_FOREACH(altq, pf_altqs_active, entries)
2083 nr++;
2084 bcopy(&nr, &pa->nr, sizeof (nr));
2085 bcopy(&ticket_altqs_active, &pa->ticket, sizeof (pa->ticket));
2086 break;
2087 }
2088
2089 case DIOCGETALTQ: {
2090 struct pfioc_altq *pa = (struct pfioc_altq *)(void *)addr;
2091 struct pf_altq *altq;
2092 u_int32_t nr, pa_nr, ticket;
2093
2094 VERIFY(altq_allowed);
2095 bcopy(&pa->ticket, &ticket, sizeof (ticket));
2096 if (ticket != ticket_altqs_active) {
2097 error = EBUSY;
2098 break;
2099 }
2100 bcopy(&pa->nr, &pa_nr, sizeof (pa_nr));
2101 nr = 0;
2102 altq = TAILQ_FIRST(pf_altqs_active);
2103 while ((altq != NULL) && (nr < pa_nr)) {
2104 altq = TAILQ_NEXT(altq, entries);
2105 nr++;
2106 }
2107 if (altq == NULL) {
2108 error = EBUSY;
2109 break;
2110 }
2111 pf_altq_copyout(altq, &pa->altq);
2112 break;
2113 }
2114
2115 case DIOCCHANGEALTQ:
2116 VERIFY(altq_allowed);
2117 /* CHANGEALTQ not supported yet! */
2118 error = ENODEV;
2119 break;
2120
2121 case DIOCGETQSTATS: {
2122 struct pfioc_qstats *pq = (struct pfioc_qstats *)(void *)addr;
2123 struct pf_altq *altq;
2124 u_int32_t nr, pq_nr, ticket;
2125 int nbytes;
2126
2127 VERIFY(altq_allowed);
2128 bcopy(&pq->ticket, &ticket, sizeof (ticket));
2129 if (ticket != ticket_altqs_active) {
2130 error = EBUSY;
2131 break;
2132 }
2133 bcopy(&pq->nr, &pq_nr, sizeof (pq_nr));
2134 nr = 0;
2135 altq = TAILQ_FIRST(pf_altqs_active);
2136 while ((altq != NULL) && (nr < pq_nr)) {
2137 altq = TAILQ_NEXT(altq, entries);
2138 nr++;
2139 }
2140 if (altq == NULL) {
2141 error = EBUSY;
2142 break;
2143 }
2144 bcopy(&pq->nbytes, &nbytes, sizeof (nbytes));
2145 error = altq_getqstats(altq, pq->buf, &nbytes);
2146 if (error == 0) {
2147 pq->scheduler = altq->scheduler;
2148 bcopy(&nbytes, &pq->nbytes, sizeof (nbytes));
2149 }
2150 break;
2151 }
2152 #endif /* PF_ALTQ */
2153
2154 case DIOCBEGINADDRS: /* struct pfioc_pooladdr */
2155 case DIOCADDADDR: /* struct pfioc_pooladdr */
2156 case DIOCGETADDRS: /* struct pfioc_pooladdr */
2157 case DIOCGETADDR: /* struct pfioc_pooladdr */
2158 case DIOCCHANGEADDR: { /* struct pfioc_pooladdr */
2159 struct pfioc_pooladdr *pp = NULL;
2160
2161 PFIOC_STRUCT_BEGIN(addr, pp, error = ENOMEM; break;)
2162 error = pfioctl_ioc_pooladdr(cmd, pp, p);
2163 PFIOC_STRUCT_END(pp, addr);
2164 break;
2165 }
2166
2167 case DIOCGETRULESETS: /* struct pfioc_ruleset */
2168 case DIOCGETRULESET: { /* struct pfioc_ruleset */
2169 struct pfioc_ruleset *pr = NULL;
2170
2171 PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break;);
2172 error = pfioctl_ioc_ruleset(cmd, pr, p);
2173 PFIOC_STRUCT_END(pr, addr);
2174 break;
2175 }
2176
2177 case DIOCRCLRTABLES: /* struct pfioc_table */
2178 case DIOCRADDTABLES: /* struct pfioc_table */
2179 case DIOCRDELTABLES: /* struct pfioc_table */
2180 case DIOCRGETTABLES: /* struct pfioc_table */
2181 case DIOCRGETTSTATS: /* struct pfioc_table */
2182 case DIOCRCLRTSTATS: /* struct pfioc_table */
2183 case DIOCRSETTFLAGS: /* struct pfioc_table */
2184 case DIOCRCLRADDRS: /* struct pfioc_table */
2185 case DIOCRADDADDRS: /* struct pfioc_table */
2186 case DIOCRDELADDRS: /* struct pfioc_table */
2187 case DIOCRSETADDRS: /* struct pfioc_table */
2188 case DIOCRGETADDRS: /* struct pfioc_table */
2189 case DIOCRGETASTATS: /* struct pfioc_table */
2190 case DIOCRCLRASTATS: /* struct pfioc_table */
2191 case DIOCRTSTADDRS: /* struct pfioc_table */
2192 case DIOCRINADEFINE: { /* struct pfioc_table */
2193 PFIOCX_STRUCT_DECL(pfioc_table);
2194
2195 PFIOCX_STRUCT_BEGIN(addr, pfioc_table, error = ENOMEM; break;);
2196 error = pfioctl_ioc_table(cmd,
2197 PFIOCX_STRUCT_ADDR32(pfioc_table),
2198 PFIOCX_STRUCT_ADDR64(pfioc_table), p);
2199 PFIOCX_STRUCT_END(pfioc_table, addr);
2200 break;
2201 }
2202
2203 case DIOCOSFPADD: /* struct pf_osfp_ioctl */
2204 case DIOCOSFPGET: { /* struct pf_osfp_ioctl */
2205 struct pf_osfp_ioctl *io = NULL;
2206
2207 PFIOC_STRUCT_BEGIN(addr, io, error = ENOMEM; break;);
2208 if (cmd == DIOCOSFPADD) {
2209 error = pf_osfp_add(io);
2210 } else {
2211 VERIFY(cmd == DIOCOSFPGET);
2212 error = pf_osfp_get(io);
2213 }
2214 PFIOC_STRUCT_END(io, addr);
2215 break;
2216 }
2217
2218 case DIOCXBEGIN: /* struct pfioc_trans */
2219 case DIOCXROLLBACK: /* struct pfioc_trans */
2220 case DIOCXCOMMIT: { /* struct pfioc_trans */
2221 PFIOCX_STRUCT_DECL(pfioc_trans);
2222
2223 PFIOCX_STRUCT_BEGIN(addr, pfioc_trans, error = ENOMEM; break;);
2224 error = pfioctl_ioc_trans(cmd,
2225 PFIOCX_STRUCT_ADDR32(pfioc_trans),
2226 PFIOCX_STRUCT_ADDR64(pfioc_trans), p);
2227 PFIOCX_STRUCT_END(pfioc_trans, addr);
2228 break;
2229 }
2230
2231 case DIOCGETSRCNODES: { /* struct pfioc_src_nodes */
2232 PFIOCX_STRUCT_DECL(pfioc_src_nodes);
2233
2234 PFIOCX_STRUCT_BEGIN(addr, pfioc_src_nodes,
2235 error = ENOMEM; break;);
2236 error = pfioctl_ioc_src_nodes(cmd,
2237 PFIOCX_STRUCT_ADDR32(pfioc_src_nodes),
2238 PFIOCX_STRUCT_ADDR64(pfioc_src_nodes), p);
2239 PFIOCX_STRUCT_END(pfioc_src_nodes, addr);
2240 break;
2241 }
2242
2243 case DIOCCLRSRCNODES: {
2244 struct pf_src_node *n;
2245 struct pf_state *state;
2246
2247 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
2248 state->src_node = NULL;
2249 state->nat_src_node = NULL;
2250 }
2251 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2252 n->expire = 1;
2253 n->states = 0;
2254 }
2255 pf_purge_expired_src_nodes();
2256 pf_status.src_nodes = 0;
2257 break;
2258 }
2259
2260 case DIOCKILLSRCNODES: { /* struct pfioc_src_node_kill */
2261 struct pfioc_src_node_kill *psnk = NULL;
2262
2263 PFIOC_STRUCT_BEGIN(addr, psnk, error = ENOMEM; break;);
2264 error = pfioctl_ioc_src_node_kill(cmd, psnk, p);
2265 PFIOC_STRUCT_END(psnk, addr);
2266 break;
2267 }
2268
2269 case DIOCSETHOSTID: { /* u_int32_t */
2270 u_int32_t hid;
2271
2272 /* small enough to be on stack */
2273 bcopy(addr, &hid, sizeof (hid));
2274 if (hid == 0)
2275 pf_status.hostid = random();
2276 else
2277 pf_status.hostid = hid;
2278 break;
2279 }
2280
2281 case DIOCOSFPFLUSH:
2282 pf_osfp_flush();
2283 break;
2284
2285 case DIOCIGETIFACES: /* struct pfioc_iface */
2286 case DIOCSETIFFLAG: /* struct pfioc_iface */
2287 case DIOCCLRIFFLAG: { /* struct pfioc_iface */
2288 PFIOCX_STRUCT_DECL(pfioc_iface);
2289
2290 PFIOCX_STRUCT_BEGIN(addr, pfioc_iface, error = ENOMEM; break;);
2291 error = pfioctl_ioc_iface(cmd,
2292 PFIOCX_STRUCT_ADDR32(pfioc_iface),
2293 PFIOCX_STRUCT_ADDR64(pfioc_iface), p);
2294 PFIOCX_STRUCT_END(pfioc_iface, addr);
2295 break;
2296 }
2297
2298 default:
2299 error = ENODEV;
2300 break;
2301 }
2302
2303 lck_mtx_unlock(pf_lock);
2304 lck_rw_done(pf_perim_lock);
2305
2306 return (error);
2307 }
2308
2309 static int
2310 pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32,
2311 struct pfioc_table_64 *io64, struct proc *p)
2312 {
2313 int p64 = proc_is64bit(p);
2314 int error = 0;
2315
2316 if (!p64)
2317 goto struct32;
2318
2319 /*
2320 * 64-bit structure processing
2321 */
2322 switch (cmd) {
2323 case DIOCRCLRTABLES:
2324 if (io64->pfrio_esize != 0) {
2325 error = ENODEV;
2326 break;
2327 }
2328 pfr_table_copyin_cleanup(&io64->pfrio_table);
2329 error = pfr_clr_tables(&io64->pfrio_table, &io64->pfrio_ndel,
2330 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2331 break;
2332
2333 case DIOCRADDTABLES:
2334 if (io64->pfrio_esize != sizeof (struct pfr_table)) {
2335 error = ENODEV;
2336 break;
2337 }
2338 error = pfr_add_tables(io64->pfrio_buffer, io64->pfrio_size,
2339 &io64->pfrio_nadd, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2340 break;
2341
2342 case DIOCRDELTABLES:
2343 if (io64->pfrio_esize != sizeof (struct pfr_table)) {
2344 error = ENODEV;
2345 break;
2346 }
2347 error = pfr_del_tables(io64->pfrio_buffer, io64->pfrio_size,
2348 &io64->pfrio_ndel, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2349 break;
2350
2351 case DIOCRGETTABLES:
2352 if (io64->pfrio_esize != sizeof (struct pfr_table)) {
2353 error = ENODEV;
2354 break;
2355 }
2356 pfr_table_copyin_cleanup(&io64->pfrio_table);
2357 error = pfr_get_tables(&io64->pfrio_table, io64->pfrio_buffer,
2358 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2359 break;
2360
2361 case DIOCRGETTSTATS:
2362 if (io64->pfrio_esize != sizeof (struct pfr_tstats)) {
2363 error = ENODEV;
2364 break;
2365 }
2366 pfr_table_copyin_cleanup(&io64->pfrio_table);
2367 error = pfr_get_tstats(&io64->pfrio_table, io64->pfrio_buffer,
2368 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2369 break;
2370
2371 case DIOCRCLRTSTATS:
2372 if (io64->pfrio_esize != sizeof (struct pfr_table)) {
2373 error = ENODEV;
2374 break;
2375 }
2376 error = pfr_clr_tstats(io64->pfrio_buffer, io64->pfrio_size,
2377 &io64->pfrio_nzero, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2378 break;
2379
2380 case DIOCRSETTFLAGS:
2381 if (io64->pfrio_esize != sizeof (struct pfr_table)) {
2382 error = ENODEV;
2383 break;
2384 }
2385 error = pfr_set_tflags(io64->pfrio_buffer, io64->pfrio_size,
2386 io64->pfrio_setflag, io64->pfrio_clrflag,
2387 &io64->pfrio_nchange, &io64->pfrio_ndel,
2388 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2389 break;
2390
2391 case DIOCRCLRADDRS:
2392 if (io64->pfrio_esize != 0) {
2393 error = ENODEV;
2394 break;
2395 }
2396 pfr_table_copyin_cleanup(&io64->pfrio_table);
2397 error = pfr_clr_addrs(&io64->pfrio_table, &io64->pfrio_ndel,
2398 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2399 break;
2400
2401 case DIOCRADDADDRS:
2402 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2403 error = ENODEV;
2404 break;
2405 }
2406 pfr_table_copyin_cleanup(&io64->pfrio_table);
2407 error = pfr_add_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2408 io64->pfrio_size, &io64->pfrio_nadd, io64->pfrio_flags |
2409 PFR_FLAG_USERIOCTL);
2410 break;
2411
2412 case DIOCRDELADDRS:
2413 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2414 error = ENODEV;
2415 break;
2416 }
2417 pfr_table_copyin_cleanup(&io64->pfrio_table);
2418 error = pfr_del_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2419 io64->pfrio_size, &io64->pfrio_ndel, io64->pfrio_flags |
2420 PFR_FLAG_USERIOCTL);
2421 break;
2422
2423 case DIOCRSETADDRS:
2424 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2425 error = ENODEV;
2426 break;
2427 }
2428 pfr_table_copyin_cleanup(&io64->pfrio_table);
2429 error = pfr_set_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2430 io64->pfrio_size, &io64->pfrio_size2, &io64->pfrio_nadd,
2431 &io64->pfrio_ndel, &io64->pfrio_nchange, io64->pfrio_flags |
2432 PFR_FLAG_USERIOCTL, 0);
2433 break;
2434
2435 case DIOCRGETADDRS:
2436 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2437 error = ENODEV;
2438 break;
2439 }
2440 pfr_table_copyin_cleanup(&io64->pfrio_table);
2441 error = pfr_get_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2442 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2443 break;
2444
2445 case DIOCRGETASTATS:
2446 if (io64->pfrio_esize != sizeof (struct pfr_astats)) {
2447 error = ENODEV;
2448 break;
2449 }
2450 pfr_table_copyin_cleanup(&io64->pfrio_table);
2451 error = pfr_get_astats(&io64->pfrio_table, io64->pfrio_buffer,
2452 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2453 break;
2454
2455 case DIOCRCLRASTATS:
2456 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2457 error = ENODEV;
2458 break;
2459 }
2460 pfr_table_copyin_cleanup(&io64->pfrio_table);
2461 error = pfr_clr_astats(&io64->pfrio_table, io64->pfrio_buffer,
2462 io64->pfrio_size, &io64->pfrio_nzero, io64->pfrio_flags |
2463 PFR_FLAG_USERIOCTL);
2464 break;
2465
2466 case DIOCRTSTADDRS:
2467 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2468 error = ENODEV;
2469 break;
2470 }
2471 pfr_table_copyin_cleanup(&io64->pfrio_table);
2472 error = pfr_tst_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2473 io64->pfrio_size, &io64->pfrio_nmatch, io64->pfrio_flags |
2474 PFR_FLAG_USERIOCTL);
2475 break;
2476
2477 case DIOCRINADEFINE:
2478 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2479 error = ENODEV;
2480 break;
2481 }
2482 pfr_table_copyin_cleanup(&io64->pfrio_table);
2483 error = pfr_ina_define(&io64->pfrio_table, io64->pfrio_buffer,
2484 io64->pfrio_size, &io64->pfrio_nadd, &io64->pfrio_naddr,
2485 io64->pfrio_ticket, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2486 break;
2487
2488 default:
2489 VERIFY(0);
2490 /* NOTREACHED */
2491 }
2492 goto done;
2493
2494 struct32:
2495 /*
2496 * 32-bit structure processing
2497 */
2498 switch (cmd) {
2499 case DIOCRCLRTABLES:
2500 if (io32->pfrio_esize != 0) {
2501 error = ENODEV;
2502 break;
2503 }
2504 pfr_table_copyin_cleanup(&io32->pfrio_table);
2505 error = pfr_clr_tables(&io32->pfrio_table, &io32->pfrio_ndel,
2506 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2507 break;
2508
2509 case DIOCRADDTABLES:
2510 if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2511 error = ENODEV;
2512 break;
2513 }
2514 error = pfr_add_tables(io32->pfrio_buffer, io32->pfrio_size,
2515 &io32->pfrio_nadd, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2516 break;
2517
2518 case DIOCRDELTABLES:
2519 if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2520 error = ENODEV;
2521 break;
2522 }
2523 error = pfr_del_tables(io32->pfrio_buffer, io32->pfrio_size,
2524 &io32->pfrio_ndel, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2525 break;
2526
2527 case DIOCRGETTABLES:
2528 if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2529 error = ENODEV;
2530 break;
2531 }
2532 pfr_table_copyin_cleanup(&io32->pfrio_table);
2533 error = pfr_get_tables(&io32->pfrio_table, io32->pfrio_buffer,
2534 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2535 break;
2536
2537 case DIOCRGETTSTATS:
2538 if (io32->pfrio_esize != sizeof (struct pfr_tstats)) {
2539 error = ENODEV;
2540 break;
2541 }
2542 pfr_table_copyin_cleanup(&io32->pfrio_table);
2543 error = pfr_get_tstats(&io32->pfrio_table, io32->pfrio_buffer,
2544 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2545 break;
2546
2547 case DIOCRCLRTSTATS:
2548 if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2549 error = ENODEV;
2550 break;
2551 }
2552 error = pfr_clr_tstats(io32->pfrio_buffer, io32->pfrio_size,
2553 &io32->pfrio_nzero, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2554 break;
2555
2556 case DIOCRSETTFLAGS:
2557 if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2558 error = ENODEV;
2559 break;
2560 }
2561 error = pfr_set_tflags(io32->pfrio_buffer, io32->pfrio_size,
2562 io32->pfrio_setflag, io32->pfrio_clrflag,
2563 &io32->pfrio_nchange, &io32->pfrio_ndel,
2564 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2565 break;
2566
2567 case DIOCRCLRADDRS:
2568 if (io32->pfrio_esize != 0) {
2569 error = ENODEV;
2570 break;
2571 }
2572 pfr_table_copyin_cleanup(&io32->pfrio_table);
2573 error = pfr_clr_addrs(&io32->pfrio_table, &io32->pfrio_ndel,
2574 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2575 break;
2576
2577 case DIOCRADDADDRS:
2578 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2579 error = ENODEV;
2580 break;
2581 }
2582 pfr_table_copyin_cleanup(&io32->pfrio_table);
2583 error = pfr_add_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2584 io32->pfrio_size, &io32->pfrio_nadd, io32->pfrio_flags |
2585 PFR_FLAG_USERIOCTL);
2586 break;
2587
2588 case DIOCRDELADDRS:
2589 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2590 error = ENODEV;
2591 break;
2592 }
2593 pfr_table_copyin_cleanup(&io32->pfrio_table);
2594 error = pfr_del_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2595 io32->pfrio_size, &io32->pfrio_ndel, io32->pfrio_flags |
2596 PFR_FLAG_USERIOCTL);
2597 break;
2598
2599 case DIOCRSETADDRS:
2600 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2601 error = ENODEV;
2602 break;
2603 }
2604 pfr_table_copyin_cleanup(&io32->pfrio_table);
2605 error = pfr_set_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2606 io32->pfrio_size, &io32->pfrio_size2, &io32->pfrio_nadd,
2607 &io32->pfrio_ndel, &io32->pfrio_nchange, io32->pfrio_flags |
2608 PFR_FLAG_USERIOCTL, 0);
2609 break;
2610
2611 case DIOCRGETADDRS:
2612 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2613 error = ENODEV;
2614 break;
2615 }
2616 pfr_table_copyin_cleanup(&io32->pfrio_table);
2617 error = pfr_get_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2618 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2619 break;
2620
2621 case DIOCRGETASTATS:
2622 if (io32->pfrio_esize != sizeof (struct pfr_astats)) {
2623 error = ENODEV;
2624 break;
2625 }
2626 pfr_table_copyin_cleanup(&io32->pfrio_table);
2627 error = pfr_get_astats(&io32->pfrio_table, io32->pfrio_buffer,
2628 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2629 break;
2630
2631 case DIOCRCLRASTATS:
2632 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2633 error = ENODEV;
2634 break;
2635 }
2636 pfr_table_copyin_cleanup(&io32->pfrio_table);
2637 error = pfr_clr_astats(&io32->pfrio_table, io32->pfrio_buffer,
2638 io32->pfrio_size, &io32->pfrio_nzero, io32->pfrio_flags |
2639 PFR_FLAG_USERIOCTL);
2640 break;
2641
2642 case DIOCRTSTADDRS:
2643 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2644 error = ENODEV;
2645 break;
2646 }
2647 pfr_table_copyin_cleanup(&io32->pfrio_table);
2648 error = pfr_tst_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2649 io32->pfrio_size, &io32->pfrio_nmatch, io32->pfrio_flags |
2650 PFR_FLAG_USERIOCTL);
2651 break;
2652
2653 case DIOCRINADEFINE:
2654 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2655 error = ENODEV;
2656 break;
2657 }
2658 pfr_table_copyin_cleanup(&io32->pfrio_table);
2659 error = pfr_ina_define(&io32->pfrio_table, io32->pfrio_buffer,
2660 io32->pfrio_size, &io32->pfrio_nadd, &io32->pfrio_naddr,
2661 io32->pfrio_ticket, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2662 break;
2663
2664 default:
2665 VERIFY(0);
2666 /* NOTREACHED */
2667 }
2668
2669 done:
2670 return (error);
2671 }
2672
2673 static int
2674 pfioctl_ioc_tokens(u_long cmd, struct pfioc_tokens_32 *tok32,
2675 struct pfioc_tokens_64 *tok64, struct proc *p)
2676 {
2677 struct pfioc_token *tokens;
2678 struct pfioc_kernel_token *entry, *tmp;
2679 user_addr_t token_buf;
2680 int ocnt, cnt, error = 0, p64 = proc_is64bit(p);
2681 char *ptr;
2682
2683 switch (cmd) {
2684 case DIOCGETSTARTERS: {
2685 int size;
2686
2687 if (nr_tokens == 0) {
2688 error = ENOENT;
2689 break;
2690 }
2691
2692 size = sizeof (struct pfioc_token) * nr_tokens;
2693 ocnt = cnt = (p64 ? tok64->size : tok32->size);
2694 if (cnt == 0) {
2695 if (p64)
2696 tok64->size = size;
2697 else
2698 tok32->size = size;
2699 break;
2700 }
2701
2702 token_buf = (p64 ? tok64->pgt_buf : tok32->pgt_buf);
2703 tokens = _MALLOC(size, M_TEMP, M_WAITOK|M_ZERO);
2704 if (tokens == NULL) {
2705 error = ENOMEM;
2706 break;
2707 }
2708
2709 ptr = (void *)tokens;
2710 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
2711 struct pfioc_token *t;
2712
2713 if ((unsigned)cnt < sizeof (*tokens))
2714 break; /* no more buffer space left */
2715
2716 t = (struct pfioc_token *)(void *)ptr;
2717 t->token_value = entry->token.token_value;
2718 t->timestamp = entry->token.timestamp;
2719 t->pid = entry->token.pid;
2720 bcopy(entry->token.proc_name, t->proc_name,
2721 PFTOK_PROCNAME_LEN);
2722 ptr += sizeof (struct pfioc_token);
2723
2724 cnt -= sizeof (struct pfioc_token);
2725 }
2726
2727 if (cnt < ocnt)
2728 error = copyout(tokens, token_buf, ocnt - cnt);
2729
2730 if (p64)
2731 tok64->size = ocnt - cnt;
2732 else
2733 tok32->size = ocnt - cnt;
2734
2735 _FREE(tokens, M_TEMP);
2736 break;
2737 }
2738
2739 default:
2740 VERIFY(0);
2741 /* NOTREACHED */
2742 }
2743
2744 return (error);
2745 }
2746
2747 static void
2748 pf_expire_states_and_src_nodes(struct pf_rule *rule)
2749 {
2750 struct pf_state *state;
2751 struct pf_src_node *sn;
2752 int killed = 0;
2753
2754 /* expire the states */
2755 state = TAILQ_FIRST(&state_list);
2756 while (state) {
2757 if (state->rule.ptr == rule)
2758 state->timeout = PFTM_PURGE;
2759 state = TAILQ_NEXT(state, entry_list);
2760 }
2761 pf_purge_expired_states(pf_status.states);
2762
2763 /* expire the src_nodes */
2764 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2765 if (sn->rule.ptr != rule)
2766 continue;
2767 if (sn->states != 0) {
2768 RB_FOREACH(state, pf_state_tree_id,
2769 &tree_id) {
2770 if (state->src_node == sn)
2771 state->src_node = NULL;
2772 if (state->nat_src_node == sn)
2773 state->nat_src_node = NULL;
2774 }
2775 sn->states = 0;
2776 }
2777 sn->expire = 1;
2778 killed++;
2779 }
2780 if (killed)
2781 pf_purge_expired_src_nodes();
2782 }
2783
2784 static void
2785 pf_delete_rule_from_ruleset(struct pf_ruleset *ruleset, int rs_num,
2786 struct pf_rule *rule)
2787 {
2788 struct pf_rule *r;
2789 int nr = 0;
2790
2791 pf_expire_states_and_src_nodes(rule);
2792
2793 pf_rm_rule(ruleset->rules[rs_num].active.ptr, rule);
2794 if (ruleset->rules[rs_num].active.rcount-- == 0)
2795 panic("%s: rcount value broken!", __func__);
2796 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2797
2798 while (r) {
2799 r->nr = nr++;
2800 r = TAILQ_NEXT(r, entries);
2801 }
2802 }
2803
2804
2805 static void
2806 pf_ruleset_cleanup(struct pf_ruleset *ruleset, int rs)
2807 {
2808 pf_calc_skip_steps(ruleset->rules[rs].active.ptr);
2809 ruleset->rules[rs].active.ticket =
2810 ++ruleset->rules[rs].inactive.ticket;
2811 }
2812
2813 static int
2814 pf_delete_rule_by_ticket(struct pfioc_rule *pr)
2815 {
2816 struct pf_ruleset *ruleset;
2817 struct pf_rule *rule;
2818 int rs_num;
2819 int is_anchor;
2820 int error;
2821
2822 is_anchor = (pr->anchor_call[0] != '\0');
2823 if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
2824 pr->rule.owner, is_anchor, &error)) == NULL)
2825 return (error);
2826
2827 rs_num = pf_get_ruleset_number(pr->rule.action);
2828 if (rs_num >= PF_RULESET_MAX) {
2829 return (EINVAL);
2830 }
2831
2832 if (pr->rule.ticket) {
2833 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2834 while (rule && (rule->ticket != pr->rule.ticket))
2835 rule = TAILQ_NEXT(rule, entries);
2836 if (rule == NULL)
2837 return (ENOENT);
2838
2839 if (strcmp(rule->owner, pr->rule.owner))
2840 return (EACCES);
2841
2842 delete_rule:
2843 if (rule->anchor && (ruleset != &pf_main_ruleset) &&
2844 ((strcmp(ruleset->anchor->owner, "")) == 0) &&
2845 ((ruleset->rules[rs_num].active.rcount - 1) == 0)) {
2846 /* set rule & ruleset to parent and repeat */
2847 struct pf_rule *delete_rule = rule;
2848 struct pf_ruleset *delete_ruleset = ruleset;
2849
2850 #define parent_ruleset ruleset->anchor->parent->ruleset
2851 if (ruleset->anchor->parent == NULL)
2852 ruleset = &pf_main_ruleset;
2853 else
2854 ruleset = &parent_ruleset;
2855
2856 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2857 while (rule &&
2858 (rule->anchor != delete_ruleset->anchor))
2859 rule = TAILQ_NEXT(rule, entries);
2860 if (rule == NULL)
2861 panic("%s: rule not found!", __func__);
2862
2863 if (delete_rule->rule_flag & PFRULE_PFM)
2864 pffwrules--;
2865
2866 pf_delete_rule_from_ruleset(delete_ruleset,
2867 rs_num, delete_rule);
2868 delete_ruleset->rules[rs_num].active.ticket =
2869 ++delete_ruleset->rules[rs_num].inactive.ticket;
2870
2871 goto delete_rule;
2872 } else {
2873 if (rule->rule_flag & PFRULE_PFM)
2874 pffwrules--;
2875 pf_delete_rule_from_ruleset(ruleset, rs_num,
2876 rule);
2877 pf_ruleset_cleanup(ruleset, rs_num);
2878 }
2879 }
2880
2881 return (0);
2882 }
2883
2884 static void
2885 pf_delete_rule_by_owner(char *owner)
2886 {
2887 struct pf_ruleset *ruleset;
2888 struct pf_rule *rule, *next;
2889 int deleted = 0;
2890
2891 for (int rs = 0; rs < PF_RULESET_MAX; rs++) {
2892 rule = TAILQ_FIRST(pf_main_ruleset.rules[rs].active.ptr);
2893 ruleset = &pf_main_ruleset;
2894 while (rule) {
2895 next = TAILQ_NEXT(rule, entries);
2896 if (rule->anchor) {
2897 if (((strcmp(rule->owner, owner)) == 0) ||
2898 ((strcmp(rule->owner, "")) == 0)) {
2899 if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2900 if (deleted) {
2901 pf_ruleset_cleanup(ruleset, rs);
2902 deleted = 0;
2903 }
2904 /* step into anchor */
2905 ruleset =
2906 &rule->anchor->ruleset;
2907 rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2908 continue;
2909 } else {
2910 if (rule->rule_flag &
2911 PFRULE_PFM)
2912 pffwrules--;
2913 pf_delete_rule_from_ruleset(ruleset, rs, rule);
2914 deleted = 1;
2915 rule = next;
2916 }
2917 } else
2918 rule = next;
2919 } else {
2920 if (((strcmp(rule->owner, owner)) == 0)) {
2921 /* delete rule */
2922 if (rule->rule_flag & PFRULE_PFM)
2923 pffwrules--;
2924 pf_delete_rule_from_ruleset(ruleset,
2925 rs, rule);
2926 deleted = 1;
2927 }
2928 rule = next;
2929 }
2930 if (rule == NULL) {
2931 if (deleted) {
2932 pf_ruleset_cleanup(ruleset, rs);
2933 deleted = 0;
2934 }
2935 if (ruleset != &pf_main_ruleset)
2936 pf_deleterule_anchor_step_out(&ruleset,
2937 rs, &rule);
2938 }
2939 }
2940 }
2941 }
2942
2943 static void
2944 pf_deleterule_anchor_step_out(struct pf_ruleset **ruleset_ptr,
2945 int rs, struct pf_rule **rule_ptr)
2946 {
2947 struct pf_ruleset *ruleset = *ruleset_ptr;
2948 struct pf_rule *rule = *rule_ptr;
2949
2950 /* step out of anchor */
2951 struct pf_ruleset *rs_copy = ruleset;
2952 ruleset = ruleset->anchor->parent?
2953 &ruleset->anchor->parent->ruleset:&pf_main_ruleset;
2954
2955 rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2956 while (rule && (rule->anchor != rs_copy->anchor))
2957 rule = TAILQ_NEXT(rule, entries);
2958 if (rule == NULL)
2959 panic("%s: parent rule of anchor not found!", __func__);
2960 if (rule->anchor->ruleset.rules[rs].active.rcount > 0)
2961 rule = TAILQ_NEXT(rule, entries);
2962
2963 *ruleset_ptr = ruleset;
2964 *rule_ptr = rule;
2965 }
2966
2967 static int
2968 pf_rule_setup(struct pfioc_rule *pr, struct pf_rule *rule,
2969 struct pf_ruleset *ruleset) {
2970 struct pf_pooladdr *apa;
2971 int error = 0;
2972
2973 if (rule->ifname[0]) {
2974 rule->kif = pfi_kif_get(rule->ifname);
2975 if (rule->kif == NULL) {
2976 pool_put(&pf_rule_pl, rule);
2977 return (EINVAL);
2978 }
2979 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
2980 }
2981 #if PF_ALTQ
2982 /* set queue IDs */
2983 if (altq_allowed && rule->qname[0] != '\0') {
2984 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2985 error = EBUSY;
2986 else if (rule->pqname[0] != '\0') {
2987 if ((rule->pqid =
2988 pf_qname2qid(rule->pqname)) == 0)
2989 error = EBUSY;
2990 } else
2991 rule->pqid = rule->qid;
2992 }
2993 #endif /* PF_ALTQ */
2994 if (rule->tagname[0])
2995 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2996 error = EBUSY;
2997 if (rule->match_tagname[0])
2998 if ((rule->match_tag =
2999 pf_tagname2tag(rule->match_tagname)) == 0)
3000 error = EBUSY;
3001 if (rule->rt && !rule->direction)
3002 error = EINVAL;
3003 #if PFLOG
3004 if (!rule->log)
3005 rule->logif = 0;
3006 if (rule->logif >= PFLOGIFS_MAX)
3007 error = EINVAL;
3008 #endif /* PFLOG */
3009 if (pf_rtlabel_add(&rule->src.addr) ||
3010 pf_rtlabel_add(&rule->dst.addr))
3011 error = EBUSY;
3012 if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
3013 error = EINVAL;
3014 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
3015 error = EINVAL;
3016 if (pf_tbladdr_setup(ruleset, &rule->src.addr))
3017 error = EINVAL;
3018 if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
3019 error = EINVAL;
3020 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
3021 error = EINVAL;
3022 TAILQ_FOREACH(apa, &pf_pabuf, entries)
3023 if (pf_tbladdr_setup(ruleset, &apa->addr))
3024 error = EINVAL;
3025
3026 if (rule->overload_tblname[0]) {
3027 if ((rule->overload_tbl = pfr_attach_table(ruleset,
3028 rule->overload_tblname)) == NULL)
3029 error = EINVAL;
3030 else
3031 rule->overload_tbl->pfrkt_flags |=
3032 PFR_TFLAG_ACTIVE;
3033 }
3034
3035 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
3036 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
3037 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
3038 (rule->rt > PF_FASTROUTE)) &&
3039 (TAILQ_FIRST(&rule->rpool.list) == NULL))
3040 error = EINVAL;
3041
3042 if (error) {
3043 pf_rm_rule(NULL, rule);
3044 return (error);
3045 }
3046 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
3047 rule->evaluations = rule->packets[0] = rule->packets[1] =
3048 rule->bytes[0] = rule->bytes[1] = 0;
3049
3050 return (0);
3051 }
3052
3053 static int
3054 pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p)
3055 {
3056 int error = 0;
3057
3058 switch (cmd) {
3059 case DIOCADDRULE: {
3060 struct pf_ruleset *ruleset;
3061 struct pf_rule *rule, *tail;
3062 int rs_num;
3063
3064 pr->anchor[sizeof (pr->anchor) - 1] = '\0';
3065 pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
3066 ruleset = pf_find_ruleset(pr->anchor);
3067 if (ruleset == NULL) {
3068 error = EINVAL;
3069 break;
3070 }
3071 rs_num = pf_get_ruleset_number(pr->rule.action);
3072 if (rs_num >= PF_RULESET_MAX) {
3073 error = EINVAL;
3074 break;
3075 }
3076 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3077 error = EINVAL;
3078 break;
3079 }
3080 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
3081 error = EBUSY;
3082 break;
3083 }
3084 if (pr->pool_ticket != ticket_pabuf) {
3085 error = EBUSY;
3086 break;
3087 }
3088 rule = pool_get(&pf_rule_pl, PR_WAITOK);
3089 if (rule == NULL) {
3090 error = ENOMEM;
3091 break;
3092 }
3093 pf_rule_copyin(&pr->rule, rule, p, minordev);
3094 #if !INET
3095 if (rule->af == AF_INET) {
3096 pool_put(&pf_rule_pl, rule);
3097 error = EAFNOSUPPORT;
3098 break;
3099 }
3100 #endif /* INET */
3101 #if !INET6
3102 if (rule->af == AF_INET6) {
3103 pool_put(&pf_rule_pl, rule);
3104 error = EAFNOSUPPORT;
3105 break;
3106 }
3107 #endif /* INET6 */
3108 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
3109 pf_rulequeue);
3110 if (tail)
3111 rule->nr = tail->nr + 1;
3112 else
3113 rule->nr = 0;
3114
3115 if ((error = pf_rule_setup(pr, rule, ruleset)))
3116 break;
3117
3118 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
3119 rule, entries);
3120 ruleset->rules[rs_num].inactive.rcount++;
3121 if (rule->rule_flag & PFRULE_PFM)
3122 pffwrules++;
3123 break;
3124 }
3125
3126 case DIOCGETRULES: {
3127 struct pf_ruleset *ruleset;
3128 struct pf_rule *tail;
3129 int rs_num;
3130
3131 pr->anchor[sizeof (pr->anchor) - 1] = '\0';
3132 pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
3133 ruleset = pf_find_ruleset(pr->anchor);
3134 if (ruleset == NULL) {
3135 error = EINVAL;
3136 break;
3137 }
3138 rs_num = pf_get_ruleset_number(pr->rule.action);
3139 if (rs_num >= PF_RULESET_MAX) {
3140 error = EINVAL;
3141 break;
3142 }
3143 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3144 pf_rulequeue);
3145 if (tail)
3146 pr->nr = tail->nr + 1;
3147 else
3148 pr->nr = 0;
3149 pr->ticket = ruleset->rules[rs_num].active.ticket;
3150 break;
3151 }
3152
3153 case DIOCGETRULE: {
3154 struct pf_ruleset *ruleset;
3155 struct pf_rule *rule;
3156 int rs_num, i;
3157
3158 pr->anchor[sizeof (pr->anchor) - 1] = '\0';
3159 pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
3160 ruleset = pf_find_ruleset(pr->anchor);
3161 if (ruleset == NULL) {
3162 error = EINVAL;
3163 break;
3164 }
3165 rs_num = pf_get_ruleset_number(pr->rule.action);
3166 if (rs_num >= PF_RULESET_MAX) {
3167 error = EINVAL;
3168 break;
3169 }
3170 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
3171 error = EBUSY;
3172 break;
3173 }
3174 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3175 while ((rule != NULL) && (rule->nr != pr->nr))
3176 rule = TAILQ_NEXT(rule, entries);
3177 if (rule == NULL) {
3178 error = EBUSY;
3179 break;
3180 }
3181 pf_rule_copyout(rule, &pr->rule);
3182 if (pf_anchor_copyout(ruleset, rule, pr)) {
3183 error = EBUSY;
3184 break;
3185 }
3186 pfi_dynaddr_copyout(&pr->rule.src.addr);
3187 pfi_dynaddr_copyout(&pr->rule.dst.addr);
3188 pf_tbladdr_copyout(&pr->rule.src.addr);
3189 pf_tbladdr_copyout(&pr->rule.dst.addr);
3190 pf_rtlabel_copyout(&pr->rule.src.addr);
3191 pf_rtlabel_copyout(&pr->rule.dst.addr);
3192 for (i = 0; i < PF_SKIP_COUNT; ++i)
3193 if (rule->skip[i].ptr == NULL)
3194 pr->rule.skip[i].nr = -1;
3195 else
3196 pr->rule.skip[i].nr =
3197 rule->skip[i].ptr->nr;
3198
3199 if (pr->action == PF_GET_CLR_CNTR) {
3200 rule->evaluations = 0;
3201 rule->packets[0] = rule->packets[1] = 0;
3202 rule->bytes[0] = rule->bytes[1] = 0;
3203 }
3204 break;
3205 }
3206
3207 case DIOCCHANGERULE: {
3208 struct pfioc_rule *pcr = pr;
3209 struct pf_ruleset *ruleset;
3210 struct pf_rule *oldrule = NULL, *newrule = NULL;
3211 struct pf_pooladdr *pa;
3212 u_int32_t nr = 0;
3213 int rs_num;
3214
3215 if (!(pcr->action == PF_CHANGE_REMOVE ||
3216 pcr->action == PF_CHANGE_GET_TICKET) &&
3217 pcr->pool_ticket != ticket_pabuf) {
3218 error = EBUSY;
3219 break;
3220 }
3221
3222 if (pcr->action < PF_CHANGE_ADD_HEAD ||
3223 pcr->action > PF_CHANGE_GET_TICKET) {
3224 error = EINVAL;
3225 break;
3226 }
3227 pcr->anchor[sizeof (pcr->anchor) - 1] = '\0';
3228 pcr->anchor_call[sizeof (pcr->anchor_call) - 1] = '\0';
3229 ruleset = pf_find_ruleset(pcr->anchor);
3230 if (ruleset == NULL) {
3231 error = EINVAL;
3232 break;
3233 }
3234 rs_num = pf_get_ruleset_number(pcr->rule.action);
3235 if (rs_num >= PF_RULESET_MAX) {
3236 error = EINVAL;
3237 break;
3238 }
3239
3240 if (pcr->action == PF_CHANGE_GET_TICKET) {
3241 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3242 break;
3243 } else {
3244 if (pcr->ticket !=
3245 ruleset->rules[rs_num].active.ticket) {
3246 error = EINVAL;
3247 break;
3248 }
3249 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3250 error = EINVAL;
3251 break;
3252 }
3253 }
3254
3255 if (pcr->action != PF_CHANGE_REMOVE) {
3256 newrule = pool_get(&pf_rule_pl, PR_WAITOK);
3257 if (newrule == NULL) {
3258 error = ENOMEM;
3259 break;
3260 }
3261 pf_rule_copyin(&pcr->rule, newrule, p, minordev);
3262 #if !INET
3263 if (newrule->af == AF_INET) {
3264 pool_put(&pf_rule_pl, newrule);
3265 error = EAFNOSUPPORT;
3266 break;
3267 }
3268 #endif /* INET */
3269 #if !INET6
3270 if (newrule->af == AF_INET6) {
3271 pool_put(&pf_rule_pl, newrule);
3272 error = EAFNOSUPPORT;
3273 break;
3274 }
3275 #endif /* INET6 */
3276 if (newrule->ifname[0]) {
3277 newrule->kif = pfi_kif_get(newrule->ifname);
3278 if (newrule->kif == NULL) {
3279 pool_put(&pf_rule_pl, newrule);
3280 error = EINVAL;
3281 break;
3282 }
3283 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
3284 } else
3285 newrule->kif = NULL;
3286
3287 #if PF_ALTQ
3288 /* set queue IDs */
3289 if (altq_allowed && newrule->qname[0] != '\0') {
3290 if ((newrule->qid =
3291 pf_qname2qid(newrule->qname)) == 0)
3292 error = EBUSY;
3293 else if (newrule->pqname[0] != '\0') {
3294 if ((newrule->pqid =
3295 pf_qname2qid(newrule->pqname)) == 0)
3296 error = EBUSY;
3297 } else
3298 newrule->pqid = newrule->qid;
3299 }
3300 #endif /* PF_ALTQ */
3301 if (newrule->tagname[0])
3302 if ((newrule->tag =
3303 pf_tagname2tag(newrule->tagname)) == 0)
3304 error = EBUSY;
3305 if (newrule->match_tagname[0])
3306 if ((newrule->match_tag = pf_tagname2tag(
3307 newrule->match_tagname)) == 0)
3308 error = EBUSY;
3309 if (newrule->rt && !newrule->direction)
3310 error = EINVAL;
3311 #if PFLOG
3312 if (!newrule->log)
3313 newrule->logif = 0;
3314 if (newrule->logif >= PFLOGIFS_MAX)
3315 error = EINVAL;
3316 #endif /* PFLOG */
3317 if (pf_rtlabel_add(&newrule->src.addr) ||
3318 pf_rtlabel_add(&newrule->dst.addr))
3319 error = EBUSY;
3320 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
3321 error = EINVAL;
3322 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
3323 error = EINVAL;
3324 if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
3325 error = EINVAL;
3326 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
3327 error = EINVAL;
3328 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
3329 error = EINVAL;
3330 TAILQ_FOREACH(pa, &pf_pabuf, entries)
3331 if (pf_tbladdr_setup(ruleset, &pa->addr))
3332 error = EINVAL;
3333
3334 if (newrule->overload_tblname[0]) {
3335 if ((newrule->overload_tbl = pfr_attach_table(
3336 ruleset, newrule->overload_tblname)) ==
3337 NULL)
3338 error = EINVAL;
3339 else
3340 newrule->overload_tbl->pfrkt_flags |=
3341 PFR_TFLAG_ACTIVE;
3342 }
3343
3344 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
3345 if (((((newrule->action == PF_NAT) ||
3346 (newrule->action == PF_RDR) ||
3347 (newrule->action == PF_BINAT) ||
3348 (newrule->rt > PF_FASTROUTE)) &&
3349 !newrule->anchor)) &&
3350 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
3351 error = EINVAL;
3352
3353 if (error) {
3354 pf_rm_rule(NULL, newrule);
3355 break;
3356 }
3357 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3358 newrule->evaluations = 0;
3359 newrule->packets[0] = newrule->packets[1] = 0;
3360 newrule->bytes[0] = newrule->bytes[1] = 0;
3361 }
3362 pf_empty_pool(&pf_pabuf);
3363
3364 if (pcr->action == PF_CHANGE_ADD_HEAD)
3365 oldrule = TAILQ_FIRST(
3366 ruleset->rules[rs_num].active.ptr);
3367 else if (pcr->action == PF_CHANGE_ADD_TAIL)
3368 oldrule = TAILQ_LAST(
3369 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
3370 else {
3371 oldrule = TAILQ_FIRST(
3372 ruleset->rules[rs_num].active.ptr);
3373 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
3374 oldrule = TAILQ_NEXT(oldrule, entries);
3375 if (oldrule == NULL) {
3376 if (newrule != NULL)
3377 pf_rm_rule(NULL, newrule);
3378 error = EINVAL;
3379 break;
3380 }
3381 }
3382
3383 if (pcr->action == PF_CHANGE_REMOVE) {
3384 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
3385 ruleset->rules[rs_num].active.rcount--;
3386 } else {
3387 if (oldrule == NULL)
3388 TAILQ_INSERT_TAIL(
3389 ruleset->rules[rs_num].active.ptr,
3390 newrule, entries);
3391 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3392 pcr->action == PF_CHANGE_ADD_BEFORE)
3393 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3394 else
3395 TAILQ_INSERT_AFTER(
3396 ruleset->rules[rs_num].active.ptr,
3397 oldrule, newrule, entries);
3398 ruleset->rules[rs_num].active.rcount++;
3399 }
3400
3401 nr = 0;
3402 TAILQ_FOREACH(oldrule,
3403 ruleset->rules[rs_num].active.ptr, entries)
3404 oldrule->nr = nr++;
3405
3406 ruleset->rules[rs_num].active.ticket++;
3407
3408 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3409 pf_remove_if_empty_ruleset(ruleset);
3410
3411 break;
3412 }
3413
3414 case DIOCINSERTRULE: {
3415 struct pf_ruleset *ruleset;
3416 struct pf_rule *rule, *tail, *r;
3417 int rs_num;
3418 int is_anchor;
3419
3420 pr->anchor[sizeof (pr->anchor) - 1] = '\0';
3421 pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
3422 is_anchor = (pr->anchor_call[0] != '\0');
3423
3424 if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
3425 pr->rule.owner, is_anchor, &error)) == NULL)
3426 break;
3427
3428 rs_num = pf_get_ruleset_number(pr->rule.action);
3429 if (rs_num >= PF_RULESET_MAX) {
3430 error = EINVAL;
3431 break;
3432 }
3433 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3434 error = EINVAL;
3435 break;
3436 }
3437
3438 /* make sure this anchor rule doesn't exist already */
3439 if (is_anchor) {
3440 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3441 while (r) {
3442 if (r->anchor &&
3443 ((strcmp(r->anchor->name,
3444 pr->anchor_call)) == 0)) {
3445 if (((strcmp(pr->rule.owner,
3446 r->owner)) == 0) ||
3447 ((strcmp(r->owner, "")) == 0))
3448 error = EEXIST;
3449 else
3450 error = EPERM;
3451 break;
3452 }
3453 r = TAILQ_NEXT(r, entries);
3454 }
3455 }
3456
3457 rule = pool_get(&pf_rule_pl, PR_WAITOK);
3458 if (rule == NULL) {
3459 error = ENOMEM;
3460 break;
3461 }
3462 pf_rule_copyin(&pr->rule, rule, p, minordev);
3463 #if !INET
3464 if (rule->af == AF_INET) {
3465 pool_put(&pf_rule_pl, rule);
3466 error = EAFNOSUPPORT;
3467 break;
3468 }
3469 #endif /* INET */
3470 #if !INET6
3471 if (rule->af == AF_INET6) {
3472 pool_put(&pf_rule_pl, rule);
3473 error = EAFNOSUPPORT;
3474 break;
3475 }
3476
3477 #endif /* INET6 */
3478 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3479 while ((r != NULL) && (rule->priority >= (unsigned)r->priority))
3480 r = TAILQ_NEXT(r, entries);
3481 if (r == NULL) {
3482 if ((tail =
3483 TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3484 pf_rulequeue)) != NULL)
3485 rule->nr = tail->nr + 1;
3486 else
3487 rule->nr = 0;
3488 } else {
3489 rule->nr = r->nr;
3490 }
3491
3492 if ((error = pf_rule_setup(pr, rule, ruleset)))
3493 break;
3494
3495 if (rule->anchor != NULL)
3496 strncpy(rule->anchor->owner, rule->owner,
3497 PF_OWNER_NAME_SIZE);
3498
3499 if (r) {
3500 TAILQ_INSERT_BEFORE(r, rule, entries);
3501 while (r && ++r->nr)
3502 r = TAILQ_NEXT(r, entries);
3503 } else
3504 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].active.ptr,
3505 rule, entries);
3506 ruleset->rules[rs_num].active.rcount++;
3507
3508 /* Calculate checksum for the main ruleset */
3509 if (ruleset == &pf_main_ruleset)
3510 error = pf_setup_pfsync_matching(ruleset);
3511
3512 pf_ruleset_cleanup(ruleset, rs_num);
3513 rule->ticket = ruleset->rules[rs_num].active.ticket;
3514
3515 pr->rule.ticket = rule->ticket;
3516 pf_rule_copyout(rule, &pr->rule);
3517 if (rule->rule_flag & PFRULE_PFM)
3518 pffwrules++;
3519 break;
3520 }
3521
3522 case DIOCDELETERULE: {
3523 pr->anchor[sizeof (pr->anchor) - 1] = '\0';
3524 pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
3525
3526 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3527 error = EINVAL;
3528 break;
3529 }
3530
3531 if (pr->rule.ticket) {
3532 if ((error = pf_delete_rule_by_ticket(pr)))
3533 break;
3534 } else
3535 pf_delete_rule_by_owner(pr->rule.owner);
3536 pr->nr = pffwrules;
3537 break;
3538 }
3539
3540 default:
3541 VERIFY(0);
3542 /* NOTREACHED */
3543 }
3544
3545 return (error);
3546 }
3547
3548 static int
3549 pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p)
3550 {
3551 #pragma unused(p)
3552 int error = 0;
3553
3554 switch (cmd) {
3555 case DIOCCLRSTATES: {
3556 struct pf_state *s, *nexts;
3557 int killed = 0;
3558
3559 psk->psk_ifname[sizeof (psk->psk_ifname) - 1] = '\0';
3560 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
3561 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3562
3563 if (!psk->psk_ifname[0] || strcmp(psk->psk_ifname,
3564 s->kif->pfik_name) == 0) {
3565 #if NPFSYNC
3566 /* don't send out individual delete messages */
3567 s->sync_flags = PFSTATE_NOSYNC;
3568 #endif
3569 pf_unlink_state(s);
3570 killed++;
3571 }
3572 }
3573 psk->psk_af = killed;
3574 #if NPFSYNC
3575 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
3576 #endif
3577 break;
3578 }
3579
3580 case DIOCKILLSTATES: {
3581 struct pf_state *s, *nexts;
3582 struct pf_state_key *sk;
3583 struct pf_state_host *src, *dst;
3584 int killed = 0;
3585
3586 for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
3587 s = nexts) {
3588 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3589 sk = s->state_key;
3590
3591 if (sk->direction == PF_OUT) {
3592 src = &sk->lan;
3593 dst = &sk->ext;
3594 } else {
3595 src = &sk->ext;
3596 dst = &sk->lan;
3597 }
3598 if ((!psk->psk_af || sk->af == psk->psk_af) &&
3599 (!psk->psk_proto || psk->psk_proto == sk->proto) &&
3600 PF_MATCHA(psk->psk_src.neg,
3601 &psk->psk_src.addr.v.a.addr,
3602 &psk->psk_src.addr.v.a.mask,
3603 &src->addr, sk->af) &&
3604 PF_MATCHA(psk->psk_dst.neg,
3605 &psk->psk_dst.addr.v.a.addr,
3606 &psk->psk_dst.addr.v.a.mask,
3607 &dst->addr, sk->af) &&
3608 (pf_match_xport(psk->psk_proto,
3609 psk->psk_proto_variant, &psk->psk_src.xport,
3610 &src->xport)) &&
3611 (pf_match_xport(psk->psk_proto,
3612 psk->psk_proto_variant, &psk->psk_dst.xport,
3613 &dst->xport)) &&
3614 (!psk->psk_ifname[0] || strcmp(psk->psk_ifname,
3615 s->kif->pfik_name) == 0)) {
3616 #if NPFSYNC
3617 /* send immediate delete of state */
3618 pfsync_delete_state(s);
3619 s->sync_flags |= PFSTATE_NOSYNC;
3620 #endif
3621 pf_unlink_state(s);
3622 killed++;
3623 }
3624 }
3625 psk->psk_af = killed;
3626 break;
3627 }
3628
3629 default:
3630 VERIFY(0);
3631 /* NOTREACHED */
3632 }
3633
3634 return (error);
3635 }
3636
3637 static int
3638 pfioctl_ioc_state(u_long cmd, struct pfioc_state *ps, struct proc *p)
3639 {
3640 #pragma unused(p)
3641 int error = 0;
3642
3643 switch (cmd) {
3644 case DIOCADDSTATE: {
3645 struct pfsync_state *sp = &ps->state;
3646 struct pf_state *s;
3647 struct pf_state_key *sk;
3648 struct pfi_kif *kif;
3649
3650 if (sp->timeout >= PFTM_MAX &&
3651 sp->timeout != PFTM_UNTIL_PACKET) {
3652 error = EINVAL;
3653 break;
3654 }
3655 s = pool_get(&pf_state_pl, PR_WAITOK);
3656 if (s == NULL) {
3657 error = ENOMEM;
3658 break;
3659 }
3660 bzero(s, sizeof (struct pf_state));
3661 if ((sk = pf_alloc_state_key(s, NULL)) == NULL) {
3662 pool_put(&pf_state_pl, s);
3663 error = ENOMEM;
3664 break;
3665 }
3666 pf_state_import(sp, sk, s);
3667 kif = pfi_kif_get(sp->ifname);
3668 if (kif == NULL) {
3669 pool_put(&pf_state_pl, s);
3670 pool_put(&pf_state_key_pl, sk);
3671 error = ENOENT;
3672 break;
3673 }
3674 TAILQ_INIT(&s->unlink_hooks);
3675 s->state_key->app_state = 0;
3676 if (pf_insert_state(kif, s)) {
3677 pfi_kif_unref(kif, PFI_KIF_REF_NONE);
3678 pool_put(&pf_state_pl, s);
3679 error = EEXIST;
3680 break;
3681 }
3682 pf_default_rule.states++;
3683 VERIFY(pf_default_rule.states != 0);
3684 break;
3685 }
3686
3687 case DIOCGETSTATE: {
3688 struct pf_state *s;
3689 struct pf_state_cmp id_key;
3690
3691 bcopy(ps->state.id, &id_key.id, sizeof (id_key.id));
3692 id_key.creatorid = ps->state.creatorid;
3693
3694 s = pf_find_state_byid(&id_key);
3695 if (s == NULL) {
3696 error = ENOENT;
3697 break;
3698 }
3699
3700 pf_state_export(&ps->state, s->state_key, s);
3701 break;
3702 }
3703
3704 default:
3705 VERIFY(0);
3706 /* NOTREACHED */
3707 }
3708
3709 return (error);
3710 }
3711
3712 static int
3713 pfioctl_ioc_states(u_long cmd, struct pfioc_states_32 *ps32,
3714 struct pfioc_states_64 *ps64, struct proc *p)
3715 {
3716 int p64 = proc_is64bit(p);
3717 int error = 0;
3718
3719 switch (cmd) {
3720 case DIOCGETSTATES: { /* struct pfioc_states */
3721 struct pf_state *state;
3722 struct pfsync_state *pstore;
3723 user_addr_t buf;
3724 u_int32_t nr = 0;
3725 int len, size;
3726
3727 len = (p64 ? ps64->ps_len : ps32->ps_len);
3728 if (len == 0) {
3729 size = sizeof (struct pfsync_state) * pf_status.states;
3730 if (p64)
3731 ps64->ps_len = size;
3732 else
3733 ps32->ps_len = size;
3734 break;
3735 }
3736
3737 pstore = _MALLOC(sizeof (*pstore), M_TEMP, M_WAITOK);
3738 if (pstore == NULL) {
3739 error = ENOMEM;
3740 break;
3741 }
3742 buf = (p64 ? ps64->ps_buf : ps32->ps_buf);
3743
3744 state = TAILQ_FIRST(&state_list);
3745 while (state) {
3746 if (state->timeout != PFTM_UNLINKED) {
3747 if ((nr + 1) * sizeof (*pstore) > (unsigned)len)
3748 break;
3749
3750 pf_state_export(pstore,
3751 state->state_key, state);
3752 error = copyout(pstore, buf, sizeof (*pstore));
3753 if (error) {
3754 _FREE(pstore, M_TEMP);
3755 goto fail;
3756 }
3757 buf += sizeof (*pstore);
3758 nr++;
3759 }
3760 state = TAILQ_NEXT(state, entry_list);
3761 }
3762
3763 size = sizeof (struct pfsync_state) * nr;
3764 if (p64)
3765 ps64->ps_len = size;
3766 else
3767 ps32->ps_len = size;
3768
3769 _FREE(pstore, M_TEMP);
3770 break;
3771 }
3772
3773 default:
3774 VERIFY(0);
3775 /* NOTREACHED */
3776 }
3777 fail:
3778 return (error);
3779 }
3780
3781 static int
3782 pfioctl_ioc_natlook(u_long cmd, struct pfioc_natlook *pnl, struct proc *p)
3783 {
3784 #pragma unused(p)
3785 int error = 0;
3786
3787 switch (cmd) {
3788 case DIOCNATLOOK: {
3789 struct pf_state_key *sk;
3790 struct pf_state *state;
3791 struct pf_state_key_cmp key;
3792 int m = 0, direction = pnl->direction;
3793
3794 key.af = pnl->af;
3795 key.proto = pnl->proto;
3796 key.proto_variant = pnl->proto_variant;
3797
3798 if (!pnl->proto ||
3799 PF_AZERO(&pnl->saddr, pnl->af) ||
3800 PF_AZERO(&pnl->daddr, pnl->af) ||
3801 ((pnl->proto == IPPROTO_TCP ||
3802 pnl->proto == IPPROTO_UDP) &&
3803 (!pnl->dxport.port || !pnl->sxport.port)))
3804 error = EINVAL;
3805 else {
3806 /*
3807 * userland gives us source and dest of connection,
3808 * reverse the lookup so we ask for what happens with
3809 * the return traffic, enabling us to find it in the
3810 * state tree.
3811 */
3812 if (direction == PF_IN) {
3813 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
3814 memcpy(&key.ext.xport, &pnl->dxport,
3815 sizeof (key.ext.xport));
3816 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
3817 memcpy(&key.gwy.xport, &pnl->sxport,
3818 sizeof (key.gwy.xport));
3819 state = pf_find_state_all(&key, PF_IN, &m);
3820 } else {
3821 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
3822 memcpy(&key.lan.xport, &pnl->dxport,
3823 sizeof (key.lan.xport));
3824 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
3825 memcpy(&key.ext.xport, &pnl->sxport,
3826 sizeof (key.ext.xport));
3827 state = pf_find_state_all(&key, PF_OUT, &m);
3828 }
3829 if (m > 1)
3830 error = E2BIG; /* more than one state */
3831 else if (state != NULL) {
3832 sk = state->state_key;
3833 if (direction == PF_IN) {
3834 PF_ACPY(&pnl->rsaddr, &sk->lan.addr,
3835 sk->af);
3836 memcpy(&pnl->rsxport, &sk->lan.xport,
3837 sizeof (pnl->rsxport));
3838 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
3839 pnl->af);
3840 memcpy(&pnl->rdxport, &pnl->dxport,
3841 sizeof (pnl->rdxport));
3842 } else {
3843 PF_ACPY(&pnl->rdaddr, &sk->gwy.addr,
3844 sk->af);
3845 memcpy(&pnl->rdxport, &sk->gwy.xport,
3846 sizeof (pnl->rdxport));
3847 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
3848 pnl->af);
3849 memcpy(&pnl->rsxport, &pnl->sxport,
3850 sizeof (pnl->rsxport));
3851 }
3852 } else
3853 error = ENOENT;
3854 }
3855 break;
3856 }
3857
3858 default:
3859 VERIFY(0);
3860 /* NOTREACHED */
3861 }
3862
3863 return (error);
3864 }
3865
3866 static int
3867 pfioctl_ioc_tm(u_long cmd, struct pfioc_tm *pt, struct proc *p)
3868 {
3869 #pragma unused(p)
3870 int error = 0;
3871
3872 switch (cmd) {
3873 case DIOCSETTIMEOUT: {
3874 int old;
3875
3876 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
3877 pt->seconds < 0) {
3878 error = EINVAL;
3879 goto fail;
3880 }
3881 old = pf_default_rule.timeout[pt->timeout];
3882 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
3883 pt->seconds = 1;
3884 pf_default_rule.timeout[pt->timeout] = pt->seconds;
3885 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
3886 wakeup(pf_purge_thread_fn);
3887 pt->seconds = old;
3888 break;
3889 }
3890
3891 case DIOCGETTIMEOUT: {
3892 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
3893 error = EINVAL;
3894 goto fail;
3895 }
3896 pt->seconds = pf_default_rule.timeout[pt->timeout];
3897 break;
3898 }
3899
3900 default:
3901 VERIFY(0);
3902 /* NOTREACHED */
3903 }
3904 fail:
3905 return (error);
3906 }
3907
3908 static int
3909 pfioctl_ioc_limit(u_long cmd, struct pfioc_limit *pl, struct proc *p)
3910 {
3911 #pragma unused(p)
3912 int error = 0;
3913
3914 switch (cmd) {
3915 case DIOCGETLIMIT: {
3916
3917 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
3918 error = EINVAL;
3919 goto fail;
3920 }
3921 pl->limit = pf_pool_limits[pl->index].limit;
3922 break;
3923 }
3924
3925 case DIOCSETLIMIT: {
3926 int old_limit;
3927
3928 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
3929 pf_pool_limits[pl->index].pp == NULL) {
3930 error = EINVAL;
3931 goto fail;
3932 }
3933 pool_sethardlimit(pf_pool_limits[pl->index].pp,
3934 pl->limit, NULL, 0);
3935 old_limit = pf_pool_limits[pl->index].limit;
3936 pf_pool_limits[pl->index].limit = pl->limit;
3937 pl->limit = old_limit;
3938 break;
3939 }
3940
3941 default:
3942 VERIFY(0);
3943 /* NOTREACHED */
3944 }
3945 fail:
3946 return (error);
3947 }
3948
3949 static int
3950 pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p)
3951 {
3952 #pragma unused(p)
3953 struct pf_pooladdr *pa = NULL;
3954 struct pf_pool *pool = NULL;
3955 int error = 0;
3956
3957 switch (cmd) {
3958 case DIOCBEGINADDRS: {
3959 pf_empty_pool(&pf_pabuf);
3960 pp->ticket = ++ticket_pabuf;
3961 break;
3962 }
3963
3964 case DIOCADDADDR: {
3965 pp->anchor[sizeof (pp->anchor) - 1] = '\0';
3966 if (pp->ticket != ticket_pabuf) {
3967 error = EBUSY;
3968 break;
3969 }
3970 #if !INET
3971 if (pp->af == AF_INET) {
3972 error = EAFNOSUPPORT;
3973 break;
3974 }
3975 #endif /* INET */
3976 #if !INET6
3977 if (pp->af == AF_INET6) {
3978 error = EAFNOSUPPORT;
3979 break;
3980 }
3981 #endif /* INET6 */
3982 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
3983 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
3984 pp->addr.addr.type != PF_ADDR_TABLE) {
3985 error = EINVAL;
3986 break;
3987 }
3988 pa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3989 if (pa == NULL) {
3990 error = ENOMEM;
3991 break;
3992 }
3993 pf_pooladdr_copyin(&pp->addr, pa);
3994 if (pa->ifname[0]) {
3995 pa->kif = pfi_kif_get(pa->ifname);
3996 if (pa->kif == NULL) {
3997 pool_put(&pf_pooladdr_pl, pa);
3998 error = EINVAL;
3999 break;
4000 }
4001 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
4002 }
4003 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
4004 pfi_dynaddr_remove(&pa->addr);
4005 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
4006 pool_put(&pf_pooladdr_pl, pa);
4007 error = EINVAL;
4008 break;
4009 }
4010 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
4011 break;
4012 }
4013
4014 case DIOCGETADDRS: {
4015 pp->nr = 0;
4016 pp->anchor[sizeof (pp->anchor) - 1] = '\0';
4017 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
4018 pp->r_num, 0, 1, 0);
4019 if (pool == NULL) {
4020 error = EBUSY;
4021 break;
4022 }
4023 TAILQ_FOREACH(pa, &pool->list, entries)
4024 pp->nr++;
4025 break;
4026 }
4027
4028 case DIOCGETADDR: {
4029 u_int32_t nr = 0;
4030
4031 pp->anchor[sizeof (pp->anchor) - 1] = '\0';
4032 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
4033 pp->r_num, 0, 1, 1);
4034 if (pool == NULL) {
4035 error = EBUSY;
4036 break;
4037 }
4038 pa = TAILQ_FIRST(&pool->list);
4039 while ((pa != NULL) && (nr < pp->nr)) {
4040 pa = TAILQ_NEXT(pa, entries);
4041 nr++;
4042 }
4043 if (pa == NULL) {
4044 error = EBUSY;
4045 break;
4046 }
4047 pf_pooladdr_copyout(pa, &pp->addr);
4048 pfi_dynaddr_copyout(&pp->addr.addr);
4049 pf_tbladdr_copyout(&pp->addr.addr);
4050 pf_rtlabel_copyout(&pp->addr.addr);
4051 break;
4052 }
4053
4054 case DIOCCHANGEADDR: {
4055 struct pfioc_pooladdr *pca = pp;
4056 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
4057 struct pf_ruleset *ruleset;
4058
4059 if (pca->action < PF_CHANGE_ADD_HEAD ||
4060 pca->action > PF_CHANGE_REMOVE) {
4061 error = EINVAL;
4062 break;
4063 }
4064 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
4065 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
4066 pca->addr.addr.type != PF_ADDR_TABLE) {
4067 error = EINVAL;
4068 break;
4069 }
4070
4071 pca->anchor[sizeof (pca->anchor) - 1] = '\0';
4072 ruleset = pf_find_ruleset(pca->anchor);
4073 if (ruleset == NULL) {
4074 error = EBUSY;
4075 break;
4076 }
4077 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
4078 pca->r_num, pca->r_last, 1, 1);
4079 if (pool == NULL) {
4080 error = EBUSY;
4081 break;
4082 }
4083 if (pca->action != PF_CHANGE_REMOVE) {
4084 newpa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
4085 if (newpa == NULL) {
4086 error = ENOMEM;
4087 break;
4088 }
4089 pf_pooladdr_copyin(&pca->addr, newpa);
4090 #if !INET
4091 if (pca->af == AF_INET) {
4092 pool_put(&pf_pooladdr_pl, newpa);
4093 error = EAFNOSUPPORT;
4094 break;
4095 }
4096 #endif /* INET */
4097 #if !INET6
4098 if (pca->af == AF_INET6) {
4099 pool_put(&pf_pooladdr_pl, newpa);
4100 error = EAFNOSUPPORT;
4101 break;
4102 }
4103 #endif /* INET6 */
4104 if (newpa->ifname[0]) {
4105 newpa->kif = pfi_kif_get(newpa->ifname);
4106 if (newpa->kif == NULL) {
4107 pool_put(&pf_pooladdr_pl, newpa);
4108 error = EINVAL;
4109 break;
4110 }
4111 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
4112 } else
4113 newpa->kif = NULL;
4114 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
4115 pf_tbladdr_setup(ruleset, &newpa->addr)) {
4116 pfi_dynaddr_remove(&newpa->addr);
4117 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
4118 pool_put(&pf_pooladdr_pl, newpa);
4119 error = EINVAL;
4120 break;
4121 }
4122 }
4123
4124 if (pca->action == PF_CHANGE_ADD_HEAD)
4125 oldpa = TAILQ_FIRST(&pool->list);
4126 else if (pca->action == PF_CHANGE_ADD_TAIL)
4127 oldpa = TAILQ_LAST(&pool->list, pf_palist);
4128 else {
4129 int i = 0;
4130
4131 oldpa = TAILQ_FIRST(&pool->list);
4132 while ((oldpa != NULL) && (i < (int)pca->nr)) {
4133 oldpa = TAILQ_NEXT(oldpa, entries);
4134 i++;
4135 }
4136 if (oldpa == NULL) {
4137 error = EINVAL;
4138 break;
4139 }
4140 }
4141
4142 if (pca->action == PF_CHANGE_REMOVE) {
4143 TAILQ_REMOVE(&pool->list, oldpa, entries);
4144 pfi_dynaddr_remove(&oldpa->addr);
4145 pf_tbladdr_remove(&oldpa->addr);
4146 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
4147 pool_put(&pf_pooladdr_pl, oldpa);
4148 } else {
4149 if (oldpa == NULL)
4150 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4151 else if (pca->action == PF_CHANGE_ADD_HEAD ||
4152 pca->action == PF_CHANGE_ADD_BEFORE)
4153 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4154 else
4155 TAILQ_INSERT_AFTER(&pool->list, oldpa,
4156 newpa, entries);
4157 }
4158
4159 pool->cur = TAILQ_FIRST(&pool->list);
4160 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
4161 pca->af);
4162 break;
4163 }
4164
4165 default:
4166 VERIFY(0);
4167 /* NOTREACHED */
4168 }
4169
4170 return (error);
4171 }
4172
4173 static int
4174 pfioctl_ioc_ruleset(u_long cmd, struct pfioc_ruleset *pr, struct proc *p)
4175 {
4176 #pragma unused(p)
4177 int error = 0;
4178
4179 switch (cmd) {
4180 case DIOCGETRULESETS: {
4181 struct pf_ruleset *ruleset;
4182 struct pf_anchor *anchor;
4183
4184 pr->path[sizeof (pr->path) - 1] = '\0';
4185 pr->name[sizeof (pr->name) - 1] = '\0';
4186 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
4187 error = EINVAL;
4188 break;
4189 }
4190 pr->nr = 0;
4191 if (ruleset->anchor == NULL) {
4192 /* XXX kludge for pf_main_ruleset */
4193 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4194 if (anchor->parent == NULL)
4195 pr->nr++;
4196 } else {
4197 RB_FOREACH(anchor, pf_anchor_node,
4198 &ruleset->anchor->children)
4199 pr->nr++;
4200 }
4201 break;
4202 }
4203
4204 case DIOCGETRULESET: {
4205 struct pf_ruleset *ruleset;
4206 struct pf_anchor *anchor;
4207 u_int32_t nr = 0;
4208
4209 pr->path[sizeof (pr->path) - 1] = '\0';
4210 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
4211 error = EINVAL;
4212 break;
4213 }
4214 pr->name[0] = 0;
4215 if (ruleset->anchor == NULL) {
4216 /* XXX kludge for pf_main_ruleset */
4217 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4218 if (anchor->parent == NULL && nr++ == pr->nr) {
4219 strlcpy(pr->name, anchor->name,
4220 sizeof (pr->name));
4221 break;
4222 }
4223 } else {
4224 RB_FOREACH(anchor, pf_anchor_node,
4225 &ruleset->anchor->children)
4226 if (nr++ == pr->nr) {
4227 strlcpy(pr->name, anchor->name,
4228 sizeof (pr->name));
4229 break;
4230 }
4231 }
4232 if (!pr->name[0])
4233 error = EBUSY;
4234 break;
4235 }
4236
4237 default:
4238 VERIFY(0);
4239 /* NOTREACHED */
4240 }
4241
4242 return (error);
4243 }
4244
4245 static int
4246 pfioctl_ioc_trans(u_long cmd, struct pfioc_trans_32 *io32,
4247 struct pfioc_trans_64 *io64, struct proc *p)
4248 {
4249 int p64 = proc_is64bit(p);
4250 int error = 0, esize, size;
4251 user_addr_t buf;
4252
4253 esize = (p64 ? io64->esize : io32->esize);
4254 size = (p64 ? io64->size : io32->size);
4255 buf = (p64 ? io64->array : io32->array);
4256
4257 switch (cmd) {
4258 case DIOCXBEGIN: {
4259 struct pfioc_trans_e *ioe;
4260 struct pfr_table *table;
4261 int i;
4262
4263 if (esize != sizeof (*ioe)) {
4264 error = ENODEV;
4265 goto fail;
4266 }
4267 ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK);
4268 table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK);
4269 for (i = 0; i < size; i++, buf += sizeof (*ioe)) {
4270 if (copyin(buf, ioe, sizeof (*ioe))) {
4271 _FREE(table, M_TEMP);
4272 _FREE(ioe, M_TEMP);
4273 error = EFAULT;
4274 goto fail;
4275 }
4276 ioe->anchor[sizeof (ioe->anchor) - 1] = '\0';
4277 switch (ioe->rs_num) {
4278 case PF_RULESET_ALTQ:
4279 #if PF_ALTQ
4280 if (altq_allowed) {
4281 if (ioe->anchor[0]) {
4282 _FREE(table, M_TEMP);
4283 _FREE(ioe, M_TEMP);
4284 error = EINVAL;
4285 goto fail;
4286 }
4287 error = pf_begin_altq(&ioe->ticket);
4288 if (error != 0) {
4289 _FREE(table, M_TEMP);
4290 _FREE(ioe, M_TEMP);
4291 goto fail;
4292 }
4293 }
4294 #endif /* PF_ALTQ */
4295 break;
4296 case PF_RULESET_TABLE:
4297 bzero(table, sizeof (*table));
4298 strlcpy(table->pfrt_anchor, ioe->anchor,
4299 sizeof (table->pfrt_anchor));
4300 if ((error = pfr_ina_begin(table,
4301 &ioe->ticket, NULL, 0))) {
4302 _FREE(table, M_TEMP);
4303 _FREE(ioe, M_TEMP);
4304 goto fail;
4305 }
4306 break;
4307 default:
4308 if ((error = pf_begin_rules(&ioe->ticket,
4309 ioe->rs_num, ioe->anchor))) {
4310 _FREE(table, M_TEMP);
4311 _FREE(ioe, M_TEMP);
4312 goto fail;
4313 }
4314 break;
4315 }
4316 if (copyout(ioe, buf, sizeof (*ioe))) {
4317 _FREE(table, M_TEMP);
4318 _FREE(ioe, M_TEMP);
4319 error = EFAULT;
4320 goto fail;
4321 }
4322 }
4323 _FREE(table, M_TEMP);
4324 _FREE(ioe, M_TEMP);
4325 break;
4326 }
4327
4328 case DIOCXROLLBACK: {
4329 struct pfioc_trans_e *ioe;
4330 struct pfr_table *table;
4331 int i;
4332
4333 if (esize != sizeof (*ioe)) {
4334 error = ENODEV;
4335 goto fail;
4336 }
4337 ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK);
4338 table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK);
4339 for (i = 0; i < size; i++, buf += sizeof (*ioe)) {
4340 if (copyin(buf, ioe, sizeof (*ioe))) {
4341 _FREE(table, M_TEMP);
4342 _FREE(ioe, M_TEMP);
4343 error = EFAULT;
4344 goto fail;
4345 }
4346 ioe->anchor[sizeof (ioe->anchor) - 1] = '\0';
4347 switch (ioe->rs_num) {
4348 case PF_RULESET_ALTQ:
4349 #if PF_ALTQ
4350 if (altq_allowed) {
4351 if (ioe->anchor[0]) {
4352 _FREE(table, M_TEMP);
4353 _FREE(ioe, M_TEMP);
4354 error = EINVAL;
4355 goto fail;
4356 }
4357 error = pf_rollback_altq(ioe->ticket);
4358 if (error != 0) {
4359 _FREE(table, M_TEMP);
4360 _FREE(ioe, M_TEMP);
4361 goto fail; /* really bad */
4362 }
4363 }
4364 #endif /* PF_ALTQ */
4365 break;
4366 case PF_RULESET_TABLE:
4367 bzero(table, sizeof (*table));
4368 strlcpy(table->pfrt_anchor, ioe->anchor,
4369 sizeof (table->pfrt_anchor));
4370 if ((error = pfr_ina_rollback(table,
4371 ioe->ticket, NULL, 0))) {
4372 _FREE(table, M_TEMP);
4373 _FREE(ioe, M_TEMP);
4374 goto fail; /* really bad */
4375 }
4376 break;
4377 default:
4378 if ((error = pf_rollback_rules(ioe->ticket,
4379 ioe->rs_num, ioe->anchor))) {
4380 _FREE(table, M_TEMP);
4381 _FREE(ioe, M_TEMP);
4382 goto fail; /* really bad */
4383 }
4384 break;
4385 }
4386 }
4387 _FREE(table, M_TEMP);
4388 _FREE(ioe, M_TEMP);
4389 break;
4390 }
4391
4392 case DIOCXCOMMIT: {
4393 struct pfioc_trans_e *ioe;
4394 struct pfr_table *table;
4395 struct pf_ruleset *rs;
4396 user_addr_t _buf = buf;
4397 int i;
4398
4399 if (esize != sizeof (*ioe)) {
4400 error = ENODEV;
4401 goto fail;
4402 }
4403 ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK);
4404 table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK);
4405 /* first makes sure everything will succeed */
4406 for (i = 0; i < size; i++, buf += sizeof (*ioe)) {
4407 if (copyin(buf, ioe, sizeof (*ioe))) {
4408 _FREE(table, M_TEMP);
4409 _FREE(ioe, M_TEMP);
4410 error = EFAULT;
4411 goto fail;
4412 }
4413 ioe->anchor[sizeof (ioe->anchor) - 1] = '\0';
4414 switch (ioe->rs_num) {
4415 case PF_RULESET_ALTQ:
4416 #if PF_ALTQ
4417 if (altq_allowed) {
4418 if (ioe->anchor[0]) {
4419 _FREE(table, M_TEMP);
4420 _FREE(ioe, M_TEMP);
4421 error = EINVAL;
4422 goto fail;
4423 }
4424 if (!altqs_inactive_open ||
4425 ioe->ticket !=
4426 ticket_altqs_inactive) {
4427 _FREE(table, M_TEMP);
4428 _FREE(ioe, M_TEMP);
4429 error = EBUSY;
4430 goto fail;
4431 }
4432 }
4433 #endif /* PF_ALTQ */
4434 break;
4435 case PF_RULESET_TABLE:
4436 rs = pf_find_ruleset(ioe->anchor);
4437 if (rs == NULL || !rs->topen || ioe->ticket !=
4438 rs->tticket) {
4439 _FREE(table, M_TEMP);
4440 _FREE(ioe, M_TEMP);
4441 error = EBUSY;
4442 goto fail;
4443 }
4444 break;
4445 default:
4446 if (ioe->rs_num < 0 || ioe->rs_num >=
4447 PF_RULESET_MAX) {
4448 _FREE(table, M_TEMP);
4449 _FREE(ioe, M_TEMP);
4450 error = EINVAL;
4451 goto fail;
4452 }
4453 rs = pf_find_ruleset(ioe->anchor);
4454 if (rs == NULL ||
4455 !rs->rules[ioe->rs_num].inactive.open ||
4456 rs->rules[ioe->rs_num].inactive.ticket !=
4457 ioe->ticket) {
4458 _FREE(table, M_TEMP);
4459 _FREE(ioe, M_TEMP);
4460 error = EBUSY;
4461 goto fail;
4462 }
4463 break;
4464 }
4465 }
4466 buf = _buf;
4467 /* now do the commit - no errors should happen here */
4468 for (i = 0; i < size; i++, buf += sizeof (*ioe)) {
4469 if (copyin(buf, ioe, sizeof (*ioe))) {
4470 _FREE(table, M_TEMP);
4471 _FREE(ioe, M_TEMP);
4472 error = EFAULT;
4473 goto fail;
4474 }
4475 ioe->anchor[sizeof (ioe->anchor) - 1] = '\0';
4476 switch (ioe->rs_num) {
4477 case PF_RULESET_ALTQ:
4478 #if PF_ALTQ
4479 if (altq_allowed &&
4480 (error = pf_commit_altq(ioe->ticket))) {
4481 _FREE(table, M_TEMP);
4482 _FREE(ioe, M_TEMP);
4483 goto fail; /* really bad */
4484 }
4485 #endif /* PF_ALTQ */
4486 break;
4487 case PF_RULESET_TABLE:
4488 bzero(table, sizeof (*table));
4489 strlcpy(table->pfrt_anchor, ioe->anchor,
4490 sizeof (table->pfrt_anchor));
4491 if ((error = pfr_ina_commit(table, ioe->ticket,
4492 NULL, NULL, 0))) {
4493 _FREE(table, M_TEMP);
4494 _FREE(ioe, M_TEMP);
4495 goto fail; /* really bad */
4496 }
4497 break;
4498 default:
4499 if ((error = pf_commit_rules(ioe->ticket,
4500 ioe->rs_num, ioe->anchor))) {
4501 _FREE(table, M_TEMP);
4502 _FREE(ioe, M_TEMP);
4503 goto fail; /* really bad */
4504 }
4505 break;
4506 }
4507 }
4508 _FREE(table, M_TEMP);
4509 _FREE(ioe, M_TEMP);
4510 break;
4511 }
4512
4513 default:
4514 VERIFY(0);
4515 /* NOTREACHED */
4516 }
4517 fail:
4518 return (error);
4519 }
4520
4521 static int
4522 pfioctl_ioc_src_nodes(u_long cmd, struct pfioc_src_nodes_32 *psn32,
4523 struct pfioc_src_nodes_64 *psn64, struct proc *p)
4524 {
4525 int p64 = proc_is64bit(p);
4526 int error = 0;
4527
4528 switch (cmd) {
4529 case DIOCGETSRCNODES: {
4530 struct pf_src_node *n, *pstore;
4531 user_addr_t buf;
4532 u_int32_t nr = 0;
4533 int space, size;
4534
4535 space = (p64 ? psn64->psn_len : psn32->psn_len);
4536 if (space == 0) {
4537 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
4538 nr++;
4539
4540 size = sizeof (struct pf_src_node) * nr;
4541 if (p64)
4542 psn64->psn_len = size;
4543 else
4544 psn32->psn_len = size;
4545 break;
4546 }
4547
4548 pstore = _MALLOC(sizeof (*pstore), M_TEMP, M_WAITOK);
4549 if (pstore == NULL) {
4550 error = ENOMEM;
4551 break;
4552 }
4553 buf = (p64 ? psn64->psn_buf : psn32->psn_buf);
4554
4555 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
4556 uint64_t secs = pf_time_second(), diff;
4557
4558 if ((nr + 1) * sizeof (*pstore) > (unsigned)space)
4559 break;
4560
4561 bcopy(n, pstore, sizeof (*pstore));
4562 if (n->rule.ptr != NULL)
4563 pstore->rule.nr = n->rule.ptr->nr;
4564 pstore->creation = secs - pstore->creation;
4565 if (pstore->expire > secs)
4566 pstore->expire -= secs;
4567 else
4568 pstore->expire = 0;
4569
4570 /* adjust the connection rate estimate */
4571 diff = secs - n->conn_rate.last;
4572 if (diff >= n->conn_rate.seconds)
4573 pstore->conn_rate.count = 0;
4574 else
4575 pstore->conn_rate.count -=
4576 n->conn_rate.count * diff /
4577 n->conn_rate.seconds;
4578
4579 _RB_PARENT(pstore, entry) = NULL;
4580 RB_LEFT(pstore, entry) = RB_RIGHT(pstore, entry) = NULL;
4581 pstore->kif = NULL;
4582
4583 error = copyout(pstore, buf, sizeof (*pstore));
4584 if (error) {
4585 _FREE(pstore, M_TEMP);
4586 goto fail;
4587 }
4588 buf += sizeof (*pstore);
4589 nr++;
4590 }
4591
4592 size = sizeof (struct pf_src_node) * nr;
4593 if (p64)
4594 psn64->psn_len = size;
4595 else
4596 psn32->psn_len = size;
4597
4598 _FREE(pstore, M_TEMP);
4599 break;
4600 }
4601
4602 default:
4603 VERIFY(0);
4604 /* NOTREACHED */
4605 }
4606 fail:
4607 return (error);
4608
4609 }
4610
4611 static int
4612 pfioctl_ioc_src_node_kill(u_long cmd, struct pfioc_src_node_kill *psnk,
4613 struct proc *p)
4614 {
4615 #pragma unused(p)
4616 int error = 0;
4617
4618 switch (cmd) {
4619 case DIOCKILLSRCNODES: {
4620 struct pf_src_node *sn;
4621 struct pf_state *s;
4622 int killed = 0;
4623
4624 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
4625 if (PF_MATCHA(psnk->psnk_src.neg,
4626 &psnk->psnk_src.addr.v.a.addr,
4627 &psnk->psnk_src.addr.v.a.mask,
4628 &sn->addr, sn->af) &&
4629 PF_MATCHA(psnk->psnk_dst.neg,
4630 &psnk->psnk_dst.addr.v.a.addr,
4631 &psnk->psnk_dst.addr.v.a.mask,
4632 &sn->raddr, sn->af)) {
4633 /* Handle state to src_node linkage */
4634 if (sn->states != 0) {
4635 RB_FOREACH(s, pf_state_tree_id,
4636 &tree_id) {
4637 if (s->src_node == sn)
4638 s->src_node = NULL;
4639 if (s->nat_src_node == sn)
4640 s->nat_src_node = NULL;
4641 }
4642 sn->states = 0;
4643 }
4644 sn->expire = 1;
4645 killed++;
4646 }
4647 }
4648
4649 if (killed > 0)
4650 pf_purge_expired_src_nodes();
4651
4652 psnk->psnk_af = killed;
4653 break;
4654 }
4655
4656 default:
4657 VERIFY(0);
4658 /* NOTREACHED */
4659 }
4660
4661 return (error);
4662 }
4663
4664 static int
4665 pfioctl_ioc_iface(u_long cmd, struct pfioc_iface_32 *io32,
4666 struct pfioc_iface_64 *io64, struct proc *p)
4667 {
4668 int p64 = proc_is64bit(p);
4669 int error = 0;
4670
4671 switch (cmd) {
4672 case DIOCIGETIFACES: {
4673 user_addr_t buf;
4674 int esize;
4675
4676 buf = (p64 ? io64->pfiio_buffer : io32->pfiio_buffer);
4677 esize = (p64 ? io64->pfiio_esize : io32->pfiio_esize);
4678
4679 /* esize must be that of the user space version of pfi_kif */
4680 if (esize != sizeof (struct pfi_uif)) {
4681 error = ENODEV;
4682 break;
4683 }
4684 if (p64)
4685 io64->pfiio_name[sizeof (io64->pfiio_name) - 1] = '\0';
4686 else
4687 io32->pfiio_name[sizeof (io32->pfiio_name) - 1] = '\0';
4688 error = pfi_get_ifaces(
4689 p64 ? io64->pfiio_name : io32->pfiio_name, buf,
4690 p64 ? &io64->pfiio_size : &io32->pfiio_size);
4691 break;
4692 }
4693
4694 case DIOCSETIFFLAG: {
4695 if (p64)
4696 io64->pfiio_name[sizeof (io64->pfiio_name) - 1] = '\0';
4697 else
4698 io32->pfiio_name[sizeof (io32->pfiio_name) - 1] = '\0';
4699
4700 error = pfi_set_flags(
4701 p64 ? io64->pfiio_name : io32->pfiio_name,
4702 p64 ? io64->pfiio_flags : io32->pfiio_flags);
4703 break;
4704 }
4705
4706 case DIOCCLRIFFLAG: {
4707 if (p64)
4708 io64->pfiio_name[sizeof (io64->pfiio_name) - 1] = '\0';
4709 else
4710 io32->pfiio_name[sizeof (io32->pfiio_name) - 1] = '\0';
4711
4712 error = pfi_clear_flags(
4713 p64 ? io64->pfiio_name : io32->pfiio_name,
4714 p64 ? io64->pfiio_flags : io32->pfiio_flags);
4715 break;
4716 }
4717
4718 default:
4719 VERIFY(0);
4720 /* NOTREACHED */
4721 }
4722
4723 return (error);
4724 }
4725
4726 int
4727 pf_af_hook(struct ifnet *ifp, struct mbuf **mppn, struct mbuf **mp,
4728 unsigned int af, int input, struct ip_fw_args *fwa)
4729 {
4730 int error = 0, reentry;
4731 struct mbuf *nextpkt;
4732
4733 reentry = net_thread_check_lock(NET_THREAD_HELD_PF);
4734 if (!reentry) {
4735 lck_rw_lock_shared(pf_perim_lock);
4736 if (!pf_is_enabled)
4737 goto done;
4738
4739 lck_mtx_lock(pf_lock);
4740 net_thread_set_lock(NET_THREAD_HELD_PF);
4741 }
4742
4743 if (mppn != NULL && *mppn != NULL)
4744 VERIFY(*mppn == *mp);
4745 if ((nextpkt = (*mp)->m_nextpkt) != NULL)
4746 (*mp)->m_nextpkt = NULL;
4747
4748 switch (af) {
4749 #if INET
4750 case AF_INET: {
4751 error = pf_inet_hook(ifp, mp, input, fwa);
4752 break;
4753 }
4754 #endif /* INET */
4755 #if INET6
4756 case AF_INET6:
4757 error = pf_inet6_hook(ifp, mp, input, fwa);
4758 break;
4759 #endif /* INET6 */
4760 default:
4761 break;
4762 }
4763
4764 /* When packet valid, link to the next packet */
4765 if (*mp != NULL && nextpkt != NULL) {
4766 struct mbuf *m = *mp;
4767 while (m->m_nextpkt != NULL)
4768 m = m->m_nextpkt;
4769 m->m_nextpkt = nextpkt;
4770 }
4771 /* Fix up linkage of previous packet in the chain */
4772 if (mppn != NULL) {
4773 if (*mp != NULL)
4774 *mppn = *mp;
4775 else
4776 *mppn = nextpkt;
4777 }
4778 if (!reentry) {
4779 net_thread_unset_lock(NET_THREAD_HELD_PF);
4780 lck_mtx_unlock(pf_lock);
4781 }
4782 done:
4783 if (!reentry)
4784 lck_rw_done(pf_perim_lock);
4785
4786 return (error);
4787 }
4788
4789
4790 #if INET
4791 static int
4792 pf_inet_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4793 struct ip_fw_args *fwa)
4794 {
4795 struct mbuf *m = *mp;
4796 #if BYTE_ORDER != BIG_ENDIAN
4797 struct ip *ip = mtod(m, struct ip *);
4798 #endif
4799 int error = 0;
4800
4801 /*
4802 * If the packet is outbound, is originated locally, is flagged for
4803 * delayed UDP/TCP checksum calculation, and is about to be processed
4804 * for an interface that doesn't support the appropriate checksum
4805 * offloading, then calculated the checksum here so that PF can adjust
4806 * it properly.
4807 */
4808 if (!input && m->m_pkthdr.rcvif == NULL) {
4809 static const int mask = CSUM_DELAY_DATA;
4810 const int flags = m->m_pkthdr.csum_flags &
4811 ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4812
4813 if (flags & mask) {
4814 in_delayed_cksum(m);
4815 m->m_pkthdr.csum_flags &= ~mask;
4816 }
4817 }
4818
4819 #if BYTE_ORDER != BIG_ENDIAN
4820 HTONS(ip->ip_len);
4821 HTONS(ip->ip_off);
4822 #endif
4823 if (pf_test(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4824 if (*mp != NULL) {
4825 m_freem(*mp);
4826 *mp = NULL;
4827 error = EHOSTUNREACH;
4828 } else {
4829 error = ENOBUFS;
4830 }
4831 }
4832 #if BYTE_ORDER != BIG_ENDIAN
4833 else {
4834 if (*mp != NULL) {
4835 ip = mtod(*mp, struct ip *);
4836 NTOHS(ip->ip_len);
4837 NTOHS(ip->ip_off);
4838 }
4839 }
4840 #endif
4841 return (error);
4842 }
4843 #endif /* INET */
4844
4845 #if INET6
4846 int
4847 pf_inet6_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4848 struct ip_fw_args *fwa)
4849 {
4850 int error = 0;
4851
4852 /*
4853 * If the packet is outbound, is originated locally, is flagged for
4854 * delayed UDP/TCP checksum calculation, and is about to be processed
4855 * for an interface that doesn't support the appropriate checksum
4856 * offloading, then calculated the checksum here so that PF can adjust
4857 * it properly.
4858 */
4859 if (!input && (*mp)->m_pkthdr.rcvif == NULL) {
4860 static const int mask = CSUM_DELAY_IPV6_DATA;
4861 const int flags = (*mp)->m_pkthdr.csum_flags &
4862 ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4863
4864 if (flags & mask) {
4865 in6_delayed_cksum(*mp, sizeof(struct ip6_hdr));
4866 (*mp)->m_pkthdr.csum_flags &= ~mask;
4867 }
4868 }
4869
4870 if (pf_test6(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4871 if (*mp != NULL) {
4872 m_freem(*mp);
4873 *mp = NULL;
4874 error = EHOSTUNREACH;
4875 } else {
4876 error = ENOBUFS;
4877 }
4878 }
4879 return (error);
4880 }
4881 #endif /* INET6 */
4882
4883 int
4884 pf_ifaddr_hook(struct ifnet *ifp, unsigned long cmd)
4885 {
4886 lck_rw_lock_shared(pf_perim_lock);
4887 lck_mtx_lock(pf_lock);
4888
4889 switch (cmd) {
4890 case SIOCSIFADDR:
4891 case SIOCAIFADDR:
4892 case SIOCDIFADDR:
4893 #if INET6
4894 case SIOCAIFADDR_IN6_32:
4895 case SIOCAIFADDR_IN6_64:
4896 case SIOCDIFADDR_IN6:
4897 #endif /* INET6 */
4898 if (ifp->if_pf_kif != NULL)
4899 pfi_kifaddr_update(ifp->if_pf_kif);
4900 break;
4901 default:
4902 panic("%s: unexpected ioctl %lu", __func__, cmd);
4903 /* NOTREACHED */
4904 }
4905
4906 lck_mtx_unlock(pf_lock);
4907 lck_rw_done(pf_perim_lock);
4908 return (0);
4909 }
4910
4911 /*
4912 * Caller acquires dlil lock as writer (exclusive)
4913 */
4914 void
4915 pf_ifnet_hook(struct ifnet *ifp, int attach)
4916 {
4917 lck_rw_lock_shared(pf_perim_lock);
4918 lck_mtx_lock(pf_lock);
4919 if (attach)
4920 pfi_attach_ifnet(ifp);
4921 else
4922 pfi_detach_ifnet(ifp);
4923 lck_mtx_unlock(pf_lock);
4924 lck_rw_done(pf_perim_lock);
4925 }
4926
4927 static void
4928 pf_attach_hooks(void)
4929 {
4930 ifnet_head_lock_shared();
4931 /*
4932 * Check against ifnet_addrs[] before proceeding, in case this
4933 * is called very early on, e.g. during dlil_init() before any
4934 * network interface is attached.
4935 */
4936 if (ifnet_addrs != NULL) {
4937 int i;
4938
4939 for (i = 0; i <= if_index; i++) {
4940 struct ifnet *ifp = ifindex2ifnet[i];
4941 if (ifp != NULL) {
4942 pfi_attach_ifnet(ifp);
4943 }
4944 }
4945 }
4946 ifnet_head_done();
4947 }
4948
4949 #if 0
4950 /* currently unused along with pfdetach() */
4951 static void
4952 pf_detach_hooks(void)
4953 {
4954 ifnet_head_lock_shared();
4955 if (ifnet_addrs != NULL) {
4956 for (i = 0; i <= if_index; i++) {
4957 int i;
4958
4959 struct ifnet *ifp = ifindex2ifnet[i];
4960 if (ifp != NULL && ifp->if_pf_kif != NULL) {
4961 pfi_detach_ifnet(ifp);
4962 }
4963 }
4964 }
4965 ifnet_head_done();
4966 }
4967 #endif