]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/pf_ioctl.c
xnu-2782.30.5.tar.gz
[apple/xnu.git] / bsd / net / pf_ioctl.c
1 /*
2 * Copyright (c) 2007-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */
30 /* $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
31
32 /*
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * - Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * - Redistributions in binary form must reproduce the above
44 * copyright notice, this list of conditions and the following
45 * disclaimer in the documentation and/or other materials provided
46 * with the distribution.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 *
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
64 *
65 */
66
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/mbuf.h>
71 #include <sys/filio.h>
72 #include <sys/fcntl.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/kernel.h>
76 #include <sys/time.h>
77 #include <sys/proc_internal.h>
78 #include <sys/malloc.h>
79 #include <sys/kauth.h>
80 #include <sys/conf.h>
81 #include <sys/mcache.h>
82 #include <sys/queue.h>
83
84 #include <mach/vm_param.h>
85
86 #include <net/dlil.h>
87 #include <net/if.h>
88 #include <net/if_types.h>
89 #include <net/route.h>
90
91 #include <netinet/in.h>
92 #include <netinet/in_var.h>
93 #include <netinet/in_systm.h>
94 #include <netinet/ip.h>
95 #include <netinet/ip_var.h>
96 #include <netinet/ip_icmp.h>
97 #include <netinet/if_ether.h>
98
99 #if DUMMYNET
100 #include <netinet/ip_dummynet.h>
101 #else
102 struct ip_fw_args;
103 #endif /* DUMMYNET */
104
105 #include <libkern/crypto/md5.h>
106
107 #include <machine/machine_routines.h>
108
109 #include <miscfs/devfs/devfs.h>
110
111 #include <net/pfvar.h>
112
113 #if NPFSYNC
114 #include <net/if_pfsync.h>
115 #endif /* NPFSYNC */
116
117 #if PFLOG
118 #include <net/if_pflog.h>
119 #endif /* PFLOG */
120
121 #if INET6
122 #include <netinet/ip6.h>
123 #include <netinet/in_pcb.h>
124 #endif /* INET6 */
125
126 #if PF_ALTQ
127 #include <net/altq/altq.h>
128 #include <net/altq/altq_cbq.h>
129 #include <net/classq/classq_red.h>
130 #include <net/classq/classq_rio.h>
131 #include <net/classq/classq_blue.h>
132 #include <net/classq/classq_sfb.h>
133 #endif /* PF_ALTQ */
134
135 #include <dev/random/randomdev.h>
136
137 #if 0
138 static void pfdetach(void);
139 #endif
140 static int pfopen(dev_t, int, int, struct proc *);
141 static int pfclose(dev_t, int, int, struct proc *);
142 static int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
143 static int pfioctl_ioc_table(u_long, struct pfioc_table_32 *,
144 struct pfioc_table_64 *, struct proc *);
145 static int pfioctl_ioc_tokens(u_long, struct pfioc_tokens_32 *,
146 struct pfioc_tokens_64 *, struct proc *);
147 static int pfioctl_ioc_rule(u_long, int, struct pfioc_rule *, struct proc *);
148 static int pfioctl_ioc_state_kill(u_long, struct pfioc_state_kill *,
149 struct proc *);
150 static int pfioctl_ioc_state(u_long, struct pfioc_state *, struct proc *);
151 static int pfioctl_ioc_states(u_long, struct pfioc_states_32 *,
152 struct pfioc_states_64 *, struct proc *);
153 static int pfioctl_ioc_natlook(u_long, struct pfioc_natlook *, struct proc *);
154 static int pfioctl_ioc_tm(u_long, struct pfioc_tm *, struct proc *);
155 static int pfioctl_ioc_limit(u_long, struct pfioc_limit *, struct proc *);
156 static int pfioctl_ioc_pooladdr(u_long, struct pfioc_pooladdr *, struct proc *);
157 static int pfioctl_ioc_ruleset(u_long, struct pfioc_ruleset *, struct proc *);
158 static int pfioctl_ioc_trans(u_long, struct pfioc_trans_32 *,
159 struct pfioc_trans_64 *, struct proc *);
160 static int pfioctl_ioc_src_nodes(u_long, struct pfioc_src_nodes_32 *,
161 struct pfioc_src_nodes_64 *, struct proc *);
162 static int pfioctl_ioc_src_node_kill(u_long, struct pfioc_src_node_kill *,
163 struct proc *);
164 static int pfioctl_ioc_iface(u_long, struct pfioc_iface_32 *,
165 struct pfioc_iface_64 *, struct proc *);
166 static struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
167 u_int8_t, u_int8_t, u_int8_t);
168 static void pf_mv_pool(struct pf_palist *, struct pf_palist *);
169 static void pf_empty_pool(struct pf_palist *);
170 #if PF_ALTQ
171 static int pf_begin_altq(u_int32_t *);
172 static int pf_rollback_altq(u_int32_t);
173 static int pf_commit_altq(u_int32_t);
174 static int pf_enable_altq(struct pf_altq *);
175 static int pf_disable_altq(struct pf_altq *);
176 static void pf_altq_copyin(struct pf_altq *, struct pf_altq *);
177 static void pf_altq_copyout(struct pf_altq *, struct pf_altq *);
178 #endif /* PF_ALTQ */
179 static int pf_begin_rules(u_int32_t *, int, const char *);
180 static int pf_rollback_rules(u_int32_t, int, char *);
181 static int pf_setup_pfsync_matching(struct pf_ruleset *);
182 static void pf_hash_rule(MD5_CTX *, struct pf_rule *);
183 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *, u_int8_t);
184 static int pf_commit_rules(u_int32_t, int, char *);
185 static void pf_rule_copyin(struct pf_rule *, struct pf_rule *, struct proc *,
186 int);
187 static void pf_rule_copyout(struct pf_rule *, struct pf_rule *);
188 static void pf_state_export(struct pfsync_state *, struct pf_state_key *,
189 struct pf_state *);
190 static void pf_state_import(struct pfsync_state *, struct pf_state_key *,
191 struct pf_state *);
192 static void pf_pooladdr_copyin(struct pf_pooladdr *, struct pf_pooladdr *);
193 static void pf_pooladdr_copyout(struct pf_pooladdr *, struct pf_pooladdr *);
194 static void pf_expire_states_and_src_nodes(struct pf_rule *);
195 static void pf_delete_rule_from_ruleset(struct pf_ruleset *,
196 int, struct pf_rule *);
197 static void pf_addrwrap_setup(struct pf_addr_wrap *);
198 static int pf_rule_setup(struct pfioc_rule *, struct pf_rule *,
199 struct pf_ruleset *);
200 static void pf_delete_rule_by_owner(char *, u_int32_t);
201 static int pf_delete_rule_by_ticket(struct pfioc_rule *, u_int32_t);
202 static void pf_ruleset_cleanup(struct pf_ruleset *, int);
203 static void pf_deleterule_anchor_step_out(struct pf_ruleset **,
204 int, struct pf_rule **);
205
206 #define PF_CDEV_MAJOR (-1)
207
208 static struct cdevsw pf_cdevsw = {
209 /* open */ pfopen,
210 /* close */ pfclose,
211 /* read */ eno_rdwrt,
212 /* write */ eno_rdwrt,
213 /* ioctl */ pfioctl,
214 /* stop */ eno_stop,
215 /* reset */ eno_reset,
216 /* tty */ NULL,
217 /* select */ eno_select,
218 /* mmap */ eno_mmap,
219 /* strategy */ eno_strat,
220 /* getc */ eno_getc,
221 /* putc */ eno_putc,
222 /* type */ 0
223 };
224
225 static void pf_attach_hooks(void);
226 #if 0
227 /* currently unused along with pfdetach() */
228 static void pf_detach_hooks(void);
229 #endif
230
231 /*
232 * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer,
233 * and used in pf_af_hook() for performance optimization, such that packets
234 * will enter pf_test() or pf_test6() only when PF is running.
235 */
236 int pf_is_enabled = 0;
237
238 #if PF_ALTQ
239 u_int32_t altq_allowed = 0;
240 #endif /* PF_ALTQ */
241
242 u_int32_t pf_hash_seed;
243
244 /*
245 * These are the pf enabled reference counting variables
246 */
247 static u_int64_t pf_enabled_ref_count;
248 static u_int32_t nr_tokens = 0;
249 static u_int64_t pffwrules;
250 static u_int32_t pfdevcnt;
251
252 SLIST_HEAD(list_head, pfioc_kernel_token);
253 static struct list_head token_list_head;
254
255 struct pf_rule pf_default_rule;
256 #if PF_ALTQ
257 static int pf_altq_running;
258 #endif /* PF_ALTQ */
259
260 #define TAGID_MAX 50000
261 #if !PF_ALTQ
262 static TAILQ_HEAD(pf_tags, pf_tagname) pf_tags =
263 TAILQ_HEAD_INITIALIZER(pf_tags);
264 #else /* PF_ALTQ */
265 static TAILQ_HEAD(pf_tags, pf_tagname)
266 pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
267 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
268 #endif /* PF_ALTQ */
269
270 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
271 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
272 #endif
273 static u_int16_t tagname2tag(struct pf_tags *, char *);
274 static void tag2tagname(struct pf_tags *, u_int16_t, char *);
275 static void tag_unref(struct pf_tags *, u_int16_t);
276 static int pf_rtlabel_add(struct pf_addr_wrap *);
277 static void pf_rtlabel_remove(struct pf_addr_wrap *);
278 static void pf_rtlabel_copyout(struct pf_addr_wrap *);
279
280 #if INET
281 static int pf_inet_hook(struct ifnet *, struct mbuf **, int,
282 struct ip_fw_args *);
283 #endif /* INET */
284 #if INET6
285 static int pf_inet6_hook(struct ifnet *, struct mbuf **, int,
286 struct ip_fw_args *);
287 #endif /* INET6 */
288
289 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
290
291 /*
292 * Helper macros for ioctl structures which vary in size (32-bit vs. 64-bit)
293 */
294 #define PFIOCX_STRUCT_DECL(s) \
295 struct { \
296 union { \
297 struct s##_32 _s##_32; \
298 struct s##_64 _s##_64; \
299 } _u; \
300 } *s##_un = NULL \
301
302 #define PFIOCX_STRUCT_BEGIN(a, s, _action) { \
303 VERIFY(s##_un == NULL); \
304 s##_un = _MALLOC(sizeof (*s##_un), M_TEMP, M_WAITOK|M_ZERO); \
305 if (s##_un == NULL) { \
306 _action \
307 } else { \
308 if (p64) \
309 bcopy(a, &s##_un->_u._s##_64, \
310 sizeof (struct s##_64)); \
311 else \
312 bcopy(a, &s##_un->_u._s##_32, \
313 sizeof (struct s##_32)); \
314 } \
315 }
316
317 #define PFIOCX_STRUCT_END(s, a) { \
318 VERIFY(s##_un != NULL); \
319 if (p64) \
320 bcopy(&s##_un->_u._s##_64, a, sizeof (struct s##_64)); \
321 else \
322 bcopy(&s##_un->_u._s##_32, a, sizeof (struct s##_32)); \
323 _FREE(s##_un, M_TEMP); \
324 s##_un = NULL; \
325 }
326
327 #define PFIOCX_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
328 #define PFIOCX_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
329
330 /*
331 * Helper macros for regular ioctl structures.
332 */
333 #define PFIOC_STRUCT_BEGIN(a, v, _action) { \
334 VERIFY((v) == NULL); \
335 (v) = _MALLOC(sizeof (*(v)), M_TEMP, M_WAITOK|M_ZERO); \
336 if ((v) == NULL) { \
337 _action \
338 } else { \
339 bcopy(a, v, sizeof (*(v))); \
340 } \
341 }
342
343 #define PFIOC_STRUCT_END(v, a) { \
344 VERIFY((v) != NULL); \
345 bcopy(v, a, sizeof (*(v))); \
346 _FREE(v, M_TEMP); \
347 (v) = NULL; \
348 }
349
350 #define PFIOC_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
351 #define PFIOC_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
352
353 static lck_attr_t *pf_perim_lock_attr;
354 static lck_grp_t *pf_perim_lock_grp;
355 static lck_grp_attr_t *pf_perim_lock_grp_attr;
356
357 static lck_attr_t *pf_lock_attr;
358 static lck_grp_t *pf_lock_grp;
359 static lck_grp_attr_t *pf_lock_grp_attr;
360
361 struct thread *pf_purge_thread;
362
363 extern void pfi_kifaddr_update(void *);
364
365 /* pf enable ref-counting helper functions */
366 static u_int64_t generate_token(struct proc *);
367 static int remove_token(struct pfioc_remove_token *);
368 static void invalidate_all_tokens(void);
369
370 static u_int64_t
371 generate_token(struct proc *p)
372 {
373 u_int64_t token_value;
374 struct pfioc_kernel_token *new_token;
375
376 new_token = _MALLOC(sizeof (struct pfioc_kernel_token), M_TEMP,
377 M_WAITOK|M_ZERO);
378
379 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
380
381 if (new_token == NULL) {
382 /* malloc failed! bail! */
383 printf("%s: unable to allocate pf token structure!", __func__);
384 return (0);
385 }
386
387 token_value = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)new_token);
388
389 new_token->token.token_value = token_value;
390 new_token->token.pid = proc_pid(p);
391 proc_name(new_token->token.pid, new_token->token.proc_name,
392 sizeof (new_token->token.proc_name));
393 new_token->token.timestamp = pf_calendar_time_second();
394
395 SLIST_INSERT_HEAD(&token_list_head, new_token, next);
396 nr_tokens++;
397
398 return (token_value);
399 }
400
401 static int
402 remove_token(struct pfioc_remove_token *tok)
403 {
404 struct pfioc_kernel_token *entry, *tmp;
405
406 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
407
408 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
409 if (tok->token_value == entry->token.token_value) {
410 SLIST_REMOVE(&token_list_head, entry,
411 pfioc_kernel_token, next);
412 _FREE(entry, M_TEMP);
413 nr_tokens--;
414 return (0); /* success */
415 }
416 }
417
418 printf("pf : remove failure\n");
419 return (ESRCH); /* failure */
420 }
421
422 static void
423 invalidate_all_tokens(void)
424 {
425 struct pfioc_kernel_token *entry, *tmp;
426
427 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
428
429 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
430 SLIST_REMOVE(&token_list_head, entry, pfioc_kernel_token, next);
431 _FREE(entry, M_TEMP);
432 }
433
434 nr_tokens = 0;
435 }
436
437 void
438 pfinit(void)
439 {
440 u_int32_t *t = pf_default_rule.timeout;
441 int maj;
442
443 pf_perim_lock_grp_attr = lck_grp_attr_alloc_init();
444 pf_perim_lock_grp = lck_grp_alloc_init("pf_perim",
445 pf_perim_lock_grp_attr);
446 pf_perim_lock_attr = lck_attr_alloc_init();
447 lck_rw_init(pf_perim_lock, pf_perim_lock_grp, pf_perim_lock_attr);
448
449 pf_lock_grp_attr = lck_grp_attr_alloc_init();
450 pf_lock_grp = lck_grp_alloc_init("pf", pf_lock_grp_attr);
451 pf_lock_attr = lck_attr_alloc_init();
452 lck_mtx_init(pf_lock, pf_lock_grp, pf_lock_attr);
453
454 pool_init(&pf_rule_pl, sizeof (struct pf_rule), 0, 0, 0, "pfrulepl",
455 NULL);
456 pool_init(&pf_src_tree_pl, sizeof (struct pf_src_node), 0, 0, 0,
457 "pfsrctrpl", NULL);
458 pool_init(&pf_state_pl, sizeof (struct pf_state), 0, 0, 0, "pfstatepl",
459 NULL);
460 pool_init(&pf_state_key_pl, sizeof (struct pf_state_key), 0, 0, 0,
461 "pfstatekeypl", NULL);
462 pool_init(&pf_app_state_pl, sizeof (struct pf_app_state), 0, 0, 0,
463 "pfappstatepl", NULL);
464 #if PF_ALTQ
465 pool_init(&pf_altq_pl, sizeof (struct pf_altq), 0, 0, 0, "pfaltqpl",
466 NULL);
467 #endif /* PF_ALTQ */
468 pool_init(&pf_pooladdr_pl, sizeof (struct pf_pooladdr), 0, 0, 0,
469 "pfpooladdrpl", NULL);
470 pfr_initialize();
471 pfi_initialize();
472 pf_osfp_initialize();
473
474 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
475 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
476
477 if (max_mem <= 256*1024*1024)
478 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
479 PFR_KENTRY_HIWAT_SMALL;
480
481 RB_INIT(&tree_src_tracking);
482 RB_INIT(&pf_anchors);
483 pf_init_ruleset(&pf_main_ruleset);
484 TAILQ_INIT(&pf_pabuf);
485 TAILQ_INIT(&state_list);
486 #if PF_ALTQ
487 TAILQ_INIT(&pf_altqs[0]);
488 TAILQ_INIT(&pf_altqs[1]);
489 pf_altqs_active = &pf_altqs[0];
490 pf_altqs_inactive = &pf_altqs[1];
491
492 PE_parse_boot_argn("altq", &altq_allowed, sizeof (altq_allowed));
493
494 _CASSERT(ALTRQ_PURGE == CLASSQRQ_PURGE);
495 _CASSERT(ALTRQ_PURGE_SC == CLASSQRQ_PURGE_SC);
496 _CASSERT(ALTRQ_EVENT == CLASSQRQ_EVENT);
497
498 _CASSERT(ALTDQ_REMOVE == CLASSQDQ_REMOVE);
499 _CASSERT(ALTDQ_POLL == CLASSQDQ_POLL);
500 #endif /* PF_ALTQ */
501
502 _CASSERT((SC_BE & SCIDX_MASK) == SCIDX_BE);
503 _CASSERT((SC_BK_SYS & SCIDX_MASK) == SCIDX_BK_SYS);
504 _CASSERT((SC_BK & SCIDX_MASK) == SCIDX_BK);
505 _CASSERT((SC_RD & SCIDX_MASK) == SCIDX_RD);
506 _CASSERT((SC_OAM & SCIDX_MASK) == SCIDX_OAM);
507 _CASSERT((SC_AV & SCIDX_MASK) == SCIDX_AV);
508 _CASSERT((SC_RV & SCIDX_MASK) == SCIDX_RV);
509 _CASSERT((SC_VI & SCIDX_MASK) == SCIDX_VI);
510 _CASSERT((SC_VO & SCIDX_MASK) == SCIDX_VO);
511 _CASSERT((SC_CTL & SCIDX_MASK) == SCIDX_CTL);
512
513 /* default rule should never be garbage collected */
514 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
515 pf_default_rule.action = PF_PASS;
516 pf_default_rule.nr = -1;
517 pf_default_rule.rtableid = IFSCOPE_NONE;
518
519 /* initialize default timeouts */
520 t[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
521 t[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
522 t[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
523 t[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
524 t[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
525 t[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
526 t[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
527 t[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
528 t[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
529 t[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
530 t[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
531 t[PFTM_GREv1_FIRST_PACKET] = PFTM_GREv1_FIRST_PACKET_VAL;
532 t[PFTM_GREv1_INITIATING] = PFTM_GREv1_INITIATING_VAL;
533 t[PFTM_GREv1_ESTABLISHED] = PFTM_GREv1_ESTABLISHED_VAL;
534 t[PFTM_ESP_FIRST_PACKET] = PFTM_ESP_FIRST_PACKET_VAL;
535 t[PFTM_ESP_INITIATING] = PFTM_ESP_INITIATING_VAL;
536 t[PFTM_ESP_ESTABLISHED] = PFTM_ESP_ESTABLISHED_VAL;
537 t[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
538 t[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
539 t[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
540 t[PFTM_FRAG] = PFTM_FRAG_VAL;
541 t[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
542 t[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
543 t[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
544 t[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
545 t[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
546
547 pf_normalize_init();
548 bzero(&pf_status, sizeof (pf_status));
549 pf_status.debug = PF_DEBUG_URGENT;
550 pf_hash_seed = RandomULong();
551
552 /* XXX do our best to avoid a conflict */
553 pf_status.hostid = random();
554
555 if (kernel_thread_start(pf_purge_thread_fn, NULL,
556 &pf_purge_thread) != 0) {
557 printf("%s: unable to start purge thread!", __func__);
558 return;
559 }
560
561 maj = cdevsw_add(PF_CDEV_MAJOR, &pf_cdevsw);
562 if (maj == -1) {
563 printf("%s: failed to allocate major number!\n", __func__);
564 return;
565 }
566 (void) devfs_make_node(makedev(maj, PFDEV_PF), DEVFS_CHAR,
567 UID_ROOT, GID_WHEEL, 0600, "pf", 0);
568
569 (void) devfs_make_node(makedev(maj, PFDEV_PFM), DEVFS_CHAR,
570 UID_ROOT, GID_WHEEL, 0600, "pfm", 0);
571
572 pf_attach_hooks();
573 }
574
575 #if 0
576 static void
577 pfdetach(void)
578 {
579 struct pf_anchor *anchor;
580 struct pf_state *state;
581 struct pf_src_node *node;
582 struct pfioc_table pt;
583 u_int32_t ticket;
584 int i;
585 char r = '\0';
586
587 pf_detach_hooks();
588
589 pf_status.running = 0;
590 wakeup(pf_purge_thread_fn);
591
592 /* clear the rulesets */
593 for (i = 0; i < PF_RULESET_MAX; i++)
594 if (pf_begin_rules(&ticket, i, &r) == 0)
595 pf_commit_rules(ticket, i, &r);
596 #if PF_ALTQ
597 if (pf_begin_altq(&ticket) == 0)
598 pf_commit_altq(ticket);
599 #endif /* PF_ALTQ */
600
601 /* clear states */
602 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
603 state->timeout = PFTM_PURGE;
604 #if NPFSYNC
605 state->sync_flags = PFSTATE_NOSYNC;
606 #endif
607 }
608 pf_purge_expired_states(pf_status.states);
609
610 #if NPFSYNC
611 pfsync_clear_states(pf_status.hostid, NULL);
612 #endif
613
614 /* clear source nodes */
615 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
616 state->src_node = NULL;
617 state->nat_src_node = NULL;
618 }
619 RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
620 node->expire = 1;
621 node->states = 0;
622 }
623 pf_purge_expired_src_nodes();
624
625 /* clear tables */
626 memset(&pt, '\0', sizeof (pt));
627 pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
628
629 /* destroy anchors */
630 while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
631 for (i = 0; i < PF_RULESET_MAX; i++)
632 if (pf_begin_rules(&ticket, i, anchor->name) == 0)
633 pf_commit_rules(ticket, i, anchor->name);
634 }
635
636 /* destroy main ruleset */
637 pf_remove_if_empty_ruleset(&pf_main_ruleset);
638
639 /* destroy the pools */
640 pool_destroy(&pf_pooladdr_pl);
641 #if PF_ALTQ
642 pool_destroy(&pf_altq_pl);
643 #endif /* PF_ALTQ */
644 pool_destroy(&pf_state_pl);
645 pool_destroy(&pf_rule_pl);
646 pool_destroy(&pf_src_tree_pl);
647
648 /* destroy subsystems */
649 pf_normalize_destroy();
650 pf_osfp_destroy();
651 pfr_destroy();
652 pfi_destroy();
653 }
654 #endif
655
656 static int
657 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
658 {
659 #pragma unused(flags, fmt, p)
660 if (minor(dev) >= PFDEV_MAX)
661 return (ENXIO);
662
663 if (minor(dev) == PFDEV_PFM) {
664 lck_mtx_lock(pf_lock);
665 if (pfdevcnt != 0) {
666 lck_mtx_unlock(pf_lock);
667 return (EBUSY);
668 }
669 pfdevcnt++;
670 lck_mtx_unlock(pf_lock);
671 }
672 return (0);
673 }
674
675 static int
676 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
677 {
678 #pragma unused(flags, fmt, p)
679 if (minor(dev) >= PFDEV_MAX)
680 return (ENXIO);
681
682 if (minor(dev) == PFDEV_PFM) {
683 lck_mtx_lock(pf_lock);
684 VERIFY(pfdevcnt > 0);
685 pfdevcnt--;
686 lck_mtx_unlock(pf_lock);
687 }
688 return (0);
689 }
690
691 static struct pf_pool *
692 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
693 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
694 u_int8_t check_ticket)
695 {
696 struct pf_ruleset *ruleset;
697 struct pf_rule *rule;
698 int rs_num;
699
700 ruleset = pf_find_ruleset(anchor);
701 if (ruleset == NULL)
702 return (NULL);
703 rs_num = pf_get_ruleset_number(rule_action);
704 if (rs_num >= PF_RULESET_MAX)
705 return (NULL);
706 if (active) {
707 if (check_ticket && ticket !=
708 ruleset->rules[rs_num].active.ticket)
709 return (NULL);
710 if (r_last)
711 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
712 pf_rulequeue);
713 else
714 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
715 } else {
716 if (check_ticket && ticket !=
717 ruleset->rules[rs_num].inactive.ticket)
718 return (NULL);
719 if (r_last)
720 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
721 pf_rulequeue);
722 else
723 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
724 }
725 if (!r_last) {
726 while ((rule != NULL) && (rule->nr != rule_number))
727 rule = TAILQ_NEXT(rule, entries);
728 }
729 if (rule == NULL)
730 return (NULL);
731
732 return (&rule->rpool);
733 }
734
735 static void
736 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
737 {
738 struct pf_pooladdr *mv_pool_pa;
739
740 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
741 TAILQ_REMOVE(poola, mv_pool_pa, entries);
742 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
743 }
744 }
745
746 static void
747 pf_empty_pool(struct pf_palist *poola)
748 {
749 struct pf_pooladdr *empty_pool_pa;
750
751 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
752 pfi_dynaddr_remove(&empty_pool_pa->addr);
753 pf_tbladdr_remove(&empty_pool_pa->addr);
754 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
755 TAILQ_REMOVE(poola, empty_pool_pa, entries);
756 pool_put(&pf_pooladdr_pl, empty_pool_pa);
757 }
758 }
759
760 void
761 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
762 {
763 if (rulequeue != NULL) {
764 if (rule->states <= 0) {
765 /*
766 * XXX - we need to remove the table *before* detaching
767 * the rule to make sure the table code does not delete
768 * the anchor under our feet.
769 */
770 pf_tbladdr_remove(&rule->src.addr);
771 pf_tbladdr_remove(&rule->dst.addr);
772 if (rule->overload_tbl)
773 pfr_detach_table(rule->overload_tbl);
774 }
775 TAILQ_REMOVE(rulequeue, rule, entries);
776 rule->entries.tqe_prev = NULL;
777 rule->nr = -1;
778 }
779
780 if (rule->states > 0 || rule->src_nodes > 0 ||
781 rule->entries.tqe_prev != NULL)
782 return;
783 pf_tag_unref(rule->tag);
784 pf_tag_unref(rule->match_tag);
785 #if PF_ALTQ
786 if (altq_allowed) {
787 if (rule->pqid != rule->qid)
788 pf_qid_unref(rule->pqid);
789 pf_qid_unref(rule->qid);
790 }
791 #endif /* PF_ALTQ */
792 pf_rtlabel_remove(&rule->src.addr);
793 pf_rtlabel_remove(&rule->dst.addr);
794 pfi_dynaddr_remove(&rule->src.addr);
795 pfi_dynaddr_remove(&rule->dst.addr);
796 if (rulequeue == NULL) {
797 pf_tbladdr_remove(&rule->src.addr);
798 pf_tbladdr_remove(&rule->dst.addr);
799 if (rule->overload_tbl)
800 pfr_detach_table(rule->overload_tbl);
801 }
802 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
803 pf_anchor_remove(rule);
804 pf_empty_pool(&rule->rpool.list);
805 pool_put(&pf_rule_pl, rule);
806 }
807
808 static u_int16_t
809 tagname2tag(struct pf_tags *head, char *tagname)
810 {
811 struct pf_tagname *tag, *p = NULL;
812 u_int16_t new_tagid = 1;
813
814 TAILQ_FOREACH(tag, head, entries)
815 if (strcmp(tagname, tag->name) == 0) {
816 tag->ref++;
817 return (tag->tag);
818 }
819
820 /*
821 * to avoid fragmentation, we do a linear search from the beginning
822 * and take the first free slot we find. if there is none or the list
823 * is empty, append a new entry at the end.
824 */
825
826 /* new entry */
827 if (!TAILQ_EMPTY(head))
828 for (p = TAILQ_FIRST(head); p != NULL &&
829 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
830 new_tagid = p->tag + 1;
831
832 if (new_tagid > TAGID_MAX)
833 return (0);
834
835 /* allocate and fill new struct pf_tagname */
836 tag = _MALLOC(sizeof (*tag), M_TEMP, M_WAITOK|M_ZERO);
837 if (tag == NULL)
838 return (0);
839 strlcpy(tag->name, tagname, sizeof (tag->name));
840 tag->tag = new_tagid;
841 tag->ref++;
842
843 if (p != NULL) /* insert new entry before p */
844 TAILQ_INSERT_BEFORE(p, tag, entries);
845 else /* either list empty or no free slot in between */
846 TAILQ_INSERT_TAIL(head, tag, entries);
847
848 return (tag->tag);
849 }
850
851 static void
852 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
853 {
854 struct pf_tagname *tag;
855
856 TAILQ_FOREACH(tag, head, entries)
857 if (tag->tag == tagid) {
858 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
859 return;
860 }
861 }
862
863 static void
864 tag_unref(struct pf_tags *head, u_int16_t tag)
865 {
866 struct pf_tagname *p, *next;
867
868 if (tag == 0)
869 return;
870
871 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
872 next = TAILQ_NEXT(p, entries);
873 if (tag == p->tag) {
874 if (--p->ref == 0) {
875 TAILQ_REMOVE(head, p, entries);
876 _FREE(p, M_TEMP);
877 }
878 break;
879 }
880 }
881 }
882
883 u_int16_t
884 pf_tagname2tag(char *tagname)
885 {
886 return (tagname2tag(&pf_tags, tagname));
887 }
888
889 void
890 pf_tag2tagname(u_int16_t tagid, char *p)
891 {
892 tag2tagname(&pf_tags, tagid, p);
893 }
894
895 void
896 pf_tag_ref(u_int16_t tag)
897 {
898 struct pf_tagname *t;
899
900 TAILQ_FOREACH(t, &pf_tags, entries)
901 if (t->tag == tag)
902 break;
903 if (t != NULL)
904 t->ref++;
905 }
906
907 void
908 pf_tag_unref(u_int16_t tag)
909 {
910 tag_unref(&pf_tags, tag);
911 }
912
913 static int
914 pf_rtlabel_add(struct pf_addr_wrap *a)
915 {
916 #pragma unused(a)
917 return (0);
918 }
919
920 static void
921 pf_rtlabel_remove(struct pf_addr_wrap *a)
922 {
923 #pragma unused(a)
924 }
925
926 static void
927 pf_rtlabel_copyout(struct pf_addr_wrap *a)
928 {
929 #pragma unused(a)
930 }
931
932 #if PF_ALTQ
933 u_int32_t
934 pf_qname2qid(char *qname)
935 {
936 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
937
938 return ((u_int32_t)tagname2tag(&pf_qids, qname));
939 }
940
941 void
942 pf_qid2qname(u_int32_t qid, char *p)
943 {
944 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
945
946 tag2tagname(&pf_qids, (u_int16_t)qid, p);
947 }
948
949 void
950 pf_qid_unref(u_int32_t qid)
951 {
952 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
953
954 tag_unref(&pf_qids, (u_int16_t)qid);
955 }
956
957 static int
958 pf_begin_altq(u_int32_t *ticket)
959 {
960 struct pf_altq *altq;
961 int error = 0;
962
963 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
964
965 /* Purge the old altq list */
966 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
967 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
968 if (altq->qname[0] == '\0') {
969 /* detach and destroy the discipline */
970 error = altq_remove(altq);
971 } else
972 pf_qid_unref(altq->qid);
973 pool_put(&pf_altq_pl, altq);
974 }
975 if (error)
976 return (error);
977 *ticket = ++ticket_altqs_inactive;
978 altqs_inactive_open = 1;
979 return (0);
980 }
981
982 static int
983 pf_rollback_altq(u_int32_t ticket)
984 {
985 struct pf_altq *altq;
986 int error = 0;
987
988 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
989
990 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
991 return (0);
992 /* Purge the old altq list */
993 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
994 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
995 if (altq->qname[0] == '\0') {
996 /* detach and destroy the discipline */
997 error = altq_remove(altq);
998 } else
999 pf_qid_unref(altq->qid);
1000 pool_put(&pf_altq_pl, altq);
1001 }
1002 altqs_inactive_open = 0;
1003 return (error);
1004 }
1005
1006 static int
1007 pf_commit_altq(u_int32_t ticket)
1008 {
1009 struct pf_altqqueue *old_altqs;
1010 struct pf_altq *altq;
1011 int err, error = 0;
1012
1013 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1014
1015 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
1016 return (EBUSY);
1017
1018 /* swap altqs, keep the old. */
1019 old_altqs = pf_altqs_active;
1020 pf_altqs_active = pf_altqs_inactive;
1021 pf_altqs_inactive = old_altqs;
1022 ticket_altqs_active = ticket_altqs_inactive;
1023
1024 /* Attach new disciplines */
1025 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1026 if (altq->qname[0] == '\0') {
1027 /* attach the discipline */
1028 error = altq_pfattach(altq);
1029 if (error == 0 && pf_altq_running)
1030 error = pf_enable_altq(altq);
1031 if (error != 0) {
1032 return (error);
1033 }
1034 }
1035 }
1036
1037 /* Purge the old altq list */
1038 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
1039 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
1040 if (altq->qname[0] == '\0') {
1041 /* detach and destroy the discipline */
1042 if (pf_altq_running)
1043 error = pf_disable_altq(altq);
1044 err = altq_pfdetach(altq);
1045 if (err != 0 && error == 0)
1046 error = err;
1047 err = altq_remove(altq);
1048 if (err != 0 && error == 0)
1049 error = err;
1050 } else
1051 pf_qid_unref(altq->qid);
1052 pool_put(&pf_altq_pl, altq);
1053 }
1054
1055 altqs_inactive_open = 0;
1056 return (error);
1057 }
1058
1059 static int
1060 pf_enable_altq(struct pf_altq *altq)
1061 {
1062 struct ifnet *ifp;
1063 struct ifclassq *ifq;
1064 int error = 0;
1065
1066 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1067
1068 if ((ifp = ifunit(altq->ifname)) == NULL)
1069 return (EINVAL);
1070
1071 ifq = &ifp->if_snd;
1072 IFCQ_LOCK(ifq);
1073 if (IFCQ_ALTQ(ifq)->altq_type != ALTQT_NONE)
1074 error = altq_enable(IFCQ_ALTQ(ifq));
1075
1076 /* set or clear tokenbucket regulator */
1077 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(IFCQ_ALTQ(ifq))) {
1078 struct tb_profile tb = { 0, 0, 0 };
1079
1080 if (altq->aflags & PF_ALTQF_TBR) {
1081 if (altq->bwtype != PF_ALTQ_BW_ABSOLUTE &&
1082 altq->bwtype != PF_ALTQ_BW_PERCENT) {
1083 error = EINVAL;
1084 } else {
1085 if (altq->bwtype == PF_ALTQ_BW_ABSOLUTE)
1086 tb.rate = altq->ifbandwidth;
1087 else
1088 tb.percent = altq->ifbandwidth;
1089 tb.depth = altq->tbrsize;
1090 error = ifclassq_tbr_set(ifq, &tb, TRUE);
1091 }
1092 } else if (IFCQ_TBR_IS_ENABLED(ifq)) {
1093 error = ifclassq_tbr_set(ifq, &tb, TRUE);
1094 }
1095 }
1096 IFCQ_UNLOCK(ifq);
1097
1098 return (error);
1099 }
1100
1101 static int
1102 pf_disable_altq(struct pf_altq *altq)
1103 {
1104 struct ifnet *ifp;
1105 struct ifclassq *ifq;
1106 int error;
1107
1108 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1109
1110 if ((ifp = ifunit(altq->ifname)) == NULL)
1111 return (EINVAL);
1112
1113 /*
1114 * when the discipline is no longer referenced, it was overridden
1115 * by a new one. if so, just return.
1116 */
1117 ifq = &ifp->if_snd;
1118 IFCQ_LOCK(ifq);
1119 if (altq->altq_disc != IFCQ_ALTQ(ifq)->altq_disc) {
1120 IFCQ_UNLOCK(ifq);
1121 return (0);
1122 }
1123
1124 error = altq_disable(IFCQ_ALTQ(ifq));
1125
1126 if (error == 0 && IFCQ_TBR_IS_ENABLED(ifq)) {
1127 /* clear tokenbucket regulator */
1128 struct tb_profile tb = { 0, 0, 0 };
1129 error = ifclassq_tbr_set(ifq, &tb, TRUE);
1130 }
1131 IFCQ_UNLOCK(ifq);
1132
1133 return (error);
1134 }
1135
1136 static void
1137 pf_altq_copyin(struct pf_altq *src, struct pf_altq *dst)
1138 {
1139 bcopy(src, dst, sizeof (struct pf_altq));
1140
1141 dst->ifname[sizeof (dst->ifname) - 1] = '\0';
1142 dst->qname[sizeof (dst->qname) - 1] = '\0';
1143 dst->parent[sizeof (dst->parent) - 1] = '\0';
1144 dst->altq_disc = NULL;
1145 dst->entries.tqe_next = NULL;
1146 dst->entries.tqe_prev = NULL;
1147 }
1148
1149 static void
1150 pf_altq_copyout(struct pf_altq *src, struct pf_altq *dst)
1151 {
1152 struct pf_altq pa;
1153
1154 bcopy(src, &pa, sizeof (struct pf_altq));
1155 pa.altq_disc = NULL;
1156 pa.entries.tqe_next = NULL;
1157 pa.entries.tqe_prev = NULL;
1158 bcopy(&pa, dst, sizeof (struct pf_altq));
1159 }
1160 #endif /* PF_ALTQ */
1161
1162 static int
1163 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1164 {
1165 struct pf_ruleset *rs;
1166 struct pf_rule *rule;
1167
1168 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1169 return (EINVAL);
1170 rs = pf_find_or_create_ruleset(anchor);
1171 if (rs == NULL)
1172 return (EINVAL);
1173 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1174 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1175 rs->rules[rs_num].inactive.rcount--;
1176 }
1177 *ticket = ++rs->rules[rs_num].inactive.ticket;
1178 rs->rules[rs_num].inactive.open = 1;
1179 return (0);
1180 }
1181
1182 static int
1183 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1184 {
1185 struct pf_ruleset *rs;
1186 struct pf_rule *rule;
1187
1188 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1189 return (EINVAL);
1190 rs = pf_find_ruleset(anchor);
1191 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1192 rs->rules[rs_num].inactive.ticket != ticket)
1193 return (0);
1194 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1195 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1196 rs->rules[rs_num].inactive.rcount--;
1197 }
1198 rs->rules[rs_num].inactive.open = 0;
1199 return (0);
1200 }
1201
1202 #define PF_MD5_UPD(st, elm) \
1203 MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm))
1204
1205 #define PF_MD5_UPD_STR(st, elm) \
1206 MD5Update(ctx, (u_int8_t *)(st)->elm, strlen((st)->elm))
1207
1208 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
1209 (stor) = htonl((st)->elm); \
1210 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t)); \
1211 } while (0)
1212
1213 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
1214 (stor) = htons((st)->elm); \
1215 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t)); \
1216 } while (0)
1217
1218 static void
1219 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr, u_int8_t proto)
1220 {
1221 PF_MD5_UPD(pfr, addr.type);
1222 switch (pfr->addr.type) {
1223 case PF_ADDR_DYNIFTL:
1224 PF_MD5_UPD(pfr, addr.v.ifname);
1225 PF_MD5_UPD(pfr, addr.iflags);
1226 break;
1227 case PF_ADDR_TABLE:
1228 PF_MD5_UPD(pfr, addr.v.tblname);
1229 break;
1230 case PF_ADDR_ADDRMASK:
1231 /* XXX ignore af? */
1232 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1233 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1234 break;
1235 case PF_ADDR_RTLABEL:
1236 PF_MD5_UPD(pfr, addr.v.rtlabelname);
1237 break;
1238 }
1239
1240 switch (proto) {
1241 case IPPROTO_TCP:
1242 case IPPROTO_UDP:
1243 PF_MD5_UPD(pfr, xport.range.port[0]);
1244 PF_MD5_UPD(pfr, xport.range.port[1]);
1245 PF_MD5_UPD(pfr, xport.range.op);
1246 break;
1247
1248 default:
1249 break;
1250 }
1251
1252 PF_MD5_UPD(pfr, neg);
1253 }
1254
1255 static void
1256 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
1257 {
1258 u_int16_t x;
1259 u_int32_t y;
1260
1261 pf_hash_rule_addr(ctx, &rule->src, rule->proto);
1262 pf_hash_rule_addr(ctx, &rule->dst, rule->proto);
1263 PF_MD5_UPD_STR(rule, label);
1264 PF_MD5_UPD_STR(rule, ifname);
1265 PF_MD5_UPD_STR(rule, match_tagname);
1266 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1267 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1268 PF_MD5_UPD_HTONL(rule, prob, y);
1269 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1270 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1271 PF_MD5_UPD(rule, uid.op);
1272 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1273 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1274 PF_MD5_UPD(rule, gid.op);
1275 PF_MD5_UPD_HTONL(rule, rule_flag, y);
1276 PF_MD5_UPD(rule, action);
1277 PF_MD5_UPD(rule, direction);
1278 PF_MD5_UPD(rule, af);
1279 PF_MD5_UPD(rule, quick);
1280 PF_MD5_UPD(rule, ifnot);
1281 PF_MD5_UPD(rule, match_tag_not);
1282 PF_MD5_UPD(rule, natpass);
1283 PF_MD5_UPD(rule, keep_state);
1284 PF_MD5_UPD(rule, proto);
1285 PF_MD5_UPD(rule, type);
1286 PF_MD5_UPD(rule, code);
1287 PF_MD5_UPD(rule, flags);
1288 PF_MD5_UPD(rule, flagset);
1289 PF_MD5_UPD(rule, allow_opts);
1290 PF_MD5_UPD(rule, rt);
1291 PF_MD5_UPD(rule, tos);
1292 }
1293
1294 static int
1295 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1296 {
1297 struct pf_ruleset *rs;
1298 struct pf_rule *rule, **old_array, *r;
1299 struct pf_rulequeue *old_rules;
1300 int error;
1301 u_int32_t old_rcount;
1302
1303 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1304
1305 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1306 return (EINVAL);
1307 rs = pf_find_ruleset(anchor);
1308 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1309 ticket != rs->rules[rs_num].inactive.ticket)
1310 return (EBUSY);
1311
1312 /* Calculate checksum for the main ruleset */
1313 if (rs == &pf_main_ruleset) {
1314 error = pf_setup_pfsync_matching(rs);
1315 if (error != 0)
1316 return (error);
1317 }
1318
1319 /* Swap rules, keep the old. */
1320 old_rules = rs->rules[rs_num].active.ptr;
1321 old_rcount = rs->rules[rs_num].active.rcount;
1322 old_array = rs->rules[rs_num].active.ptr_array;
1323
1324 if(old_rcount != 0) {
1325 r = TAILQ_FIRST(rs->rules[rs_num].active.ptr);
1326 while (r) {
1327 if (r->rule_flag & PFRULE_PFM)
1328 pffwrules--;
1329 r = TAILQ_NEXT(r, entries);
1330 }
1331 }
1332
1333
1334 rs->rules[rs_num].active.ptr =
1335 rs->rules[rs_num].inactive.ptr;
1336 rs->rules[rs_num].active.ptr_array =
1337 rs->rules[rs_num].inactive.ptr_array;
1338 rs->rules[rs_num].active.rcount =
1339 rs->rules[rs_num].inactive.rcount;
1340 rs->rules[rs_num].inactive.ptr = old_rules;
1341 rs->rules[rs_num].inactive.ptr_array = old_array;
1342 rs->rules[rs_num].inactive.rcount = old_rcount;
1343
1344 rs->rules[rs_num].active.ticket =
1345 rs->rules[rs_num].inactive.ticket;
1346 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1347
1348
1349 /* Purge the old rule list. */
1350 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1351 pf_rm_rule(old_rules, rule);
1352 if (rs->rules[rs_num].inactive.ptr_array)
1353 _FREE(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1354 rs->rules[rs_num].inactive.ptr_array = NULL;
1355 rs->rules[rs_num].inactive.rcount = 0;
1356 rs->rules[rs_num].inactive.open = 0;
1357 pf_remove_if_empty_ruleset(rs);
1358 return (0);
1359 }
1360
1361 static void
1362 pf_rule_copyin(struct pf_rule *src, struct pf_rule *dst, struct proc *p,
1363 int minordev)
1364 {
1365 bcopy(src, dst, sizeof (struct pf_rule));
1366
1367 dst->label[sizeof (dst->label) - 1] = '\0';
1368 dst->ifname[sizeof (dst->ifname) - 1] = '\0';
1369 dst->qname[sizeof (dst->qname) - 1] = '\0';
1370 dst->pqname[sizeof (dst->pqname) - 1] = '\0';
1371 dst->tagname[sizeof (dst->tagname) - 1] = '\0';
1372 dst->match_tagname[sizeof (dst->match_tagname) - 1] = '\0';
1373 dst->overload_tblname[sizeof (dst->overload_tblname) - 1] = '\0';
1374
1375 dst->cuid = kauth_cred_getuid(p->p_ucred);
1376 dst->cpid = p->p_pid;
1377
1378 dst->anchor = NULL;
1379 dst->kif = NULL;
1380 dst->overload_tbl = NULL;
1381
1382 TAILQ_INIT(&dst->rpool.list);
1383 dst->rpool.cur = NULL;
1384
1385 /* initialize refcounting */
1386 dst->states = 0;
1387 dst->src_nodes = 0;
1388
1389 dst->entries.tqe_prev = NULL;
1390 dst->entries.tqe_next = NULL;
1391 if ((uint8_t)minordev == PFDEV_PFM)
1392 dst->rule_flag |= PFRULE_PFM;
1393 }
1394
1395 static void
1396 pf_rule_copyout(struct pf_rule *src, struct pf_rule *dst)
1397 {
1398 bcopy(src, dst, sizeof (struct pf_rule));
1399
1400 dst->anchor = NULL;
1401 dst->kif = NULL;
1402 dst->overload_tbl = NULL;
1403
1404 TAILQ_INIT(&dst->rpool.list);
1405 dst->rpool.cur = NULL;
1406
1407 dst->entries.tqe_prev = NULL;
1408 dst->entries.tqe_next = NULL;
1409 }
1410
1411 static void
1412 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk,
1413 struct pf_state *s)
1414 {
1415 uint64_t secs = pf_time_second();
1416 bzero(sp, sizeof (struct pfsync_state));
1417
1418 /* copy from state key */
1419 sp->lan.addr = sk->lan.addr;
1420 sp->lan.xport = sk->lan.xport;
1421 sp->gwy.addr = sk->gwy.addr;
1422 sp->gwy.xport = sk->gwy.xport;
1423 sp->ext.addr = sk->ext.addr;
1424 sp->ext.xport = sk->ext.xport;
1425 sp->proto_variant = sk->proto_variant;
1426 sp->tag = s->tag;
1427 sp->proto = sk->proto;
1428 sp->af = sk->af;
1429 sp->direction = sk->direction;
1430 sp->flowhash = sk->flowhash;
1431
1432 /* copy from state */
1433 memcpy(&sp->id, &s->id, sizeof (sp->id));
1434 sp->creatorid = s->creatorid;
1435 strlcpy(sp->ifname, s->kif->pfik_name, sizeof (sp->ifname));
1436 pf_state_peer_to_pfsync(&s->src, &sp->src);
1437 pf_state_peer_to_pfsync(&s->dst, &sp->dst);
1438
1439 sp->rule = s->rule.ptr->nr;
1440 sp->nat_rule = (s->nat_rule.ptr == NULL) ?
1441 (unsigned)-1 : s->nat_rule.ptr->nr;
1442 sp->anchor = (s->anchor.ptr == NULL) ?
1443 (unsigned)-1 : s->anchor.ptr->nr;
1444
1445 pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]);
1446 pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]);
1447 pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]);
1448 pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]);
1449 sp->creation = secs - s->creation;
1450 sp->expire = pf_state_expires(s);
1451 sp->log = s->log;
1452 sp->allow_opts = s->allow_opts;
1453 sp->timeout = s->timeout;
1454
1455 if (s->src_node)
1456 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
1457 if (s->nat_src_node)
1458 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
1459
1460 if (sp->expire > secs)
1461 sp->expire -= secs;
1462 else
1463 sp->expire = 0;
1464
1465 }
1466
1467 static void
1468 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk,
1469 struct pf_state *s)
1470 {
1471 /* copy to state key */
1472 sk->lan.addr = sp->lan.addr;
1473 sk->lan.xport = sp->lan.xport;
1474 sk->gwy.addr = sp->gwy.addr;
1475 sk->gwy.xport = sp->gwy.xport;
1476 sk->ext.addr = sp->ext.addr;
1477 sk->ext.xport = sp->ext.xport;
1478 sk->proto_variant = sp->proto_variant;
1479 s->tag = sp->tag;
1480 sk->proto = sp->proto;
1481 sk->af = sp->af;
1482 sk->direction = sp->direction;
1483 sk->flowhash = pf_calc_state_key_flowhash(sk);
1484
1485 /* copy to state */
1486 memcpy(&s->id, &sp->id, sizeof (sp->id));
1487 s->creatorid = sp->creatorid;
1488 pf_state_peer_from_pfsync(&sp->src, &s->src);
1489 pf_state_peer_from_pfsync(&sp->dst, &s->dst);
1490
1491 s->rule.ptr = &pf_default_rule;
1492 s->nat_rule.ptr = NULL;
1493 s->anchor.ptr = NULL;
1494 s->rt_kif = NULL;
1495 s->creation = pf_time_second();
1496 s->expire = pf_time_second();
1497 if (sp->expire > 0)
1498 s->expire -= pf_default_rule.timeout[sp->timeout] - sp->expire;
1499 s->pfsync_time = 0;
1500 s->packets[0] = s->packets[1] = 0;
1501 s->bytes[0] = s->bytes[1] = 0;
1502 }
1503
1504 static void
1505 pf_pooladdr_copyin(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1506 {
1507 bcopy(src, dst, sizeof (struct pf_pooladdr));
1508
1509 dst->entries.tqe_prev = NULL;
1510 dst->entries.tqe_next = NULL;
1511 dst->ifname[sizeof (dst->ifname) - 1] = '\0';
1512 dst->kif = NULL;
1513 }
1514
1515 static void
1516 pf_pooladdr_copyout(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1517 {
1518 bcopy(src, dst, sizeof (struct pf_pooladdr));
1519
1520 dst->entries.tqe_prev = NULL;
1521 dst->entries.tqe_next = NULL;
1522 dst->kif = NULL;
1523 }
1524
1525 static int
1526 pf_setup_pfsync_matching(struct pf_ruleset *rs)
1527 {
1528 MD5_CTX ctx;
1529 struct pf_rule *rule;
1530 int rs_cnt;
1531 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
1532
1533 MD5Init(&ctx);
1534 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1535 /* XXX PF_RULESET_SCRUB as well? */
1536 if (rs_cnt == PF_RULESET_SCRUB)
1537 continue;
1538
1539 if (rs->rules[rs_cnt].inactive.ptr_array)
1540 _FREE(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1541 rs->rules[rs_cnt].inactive.ptr_array = NULL;
1542
1543 if (rs->rules[rs_cnt].inactive.rcount) {
1544 rs->rules[rs_cnt].inactive.ptr_array =
1545 _MALLOC(sizeof (caddr_t) *
1546 rs->rules[rs_cnt].inactive.rcount,
1547 M_TEMP, M_WAITOK);
1548
1549 if (!rs->rules[rs_cnt].inactive.ptr_array)
1550 return (ENOMEM);
1551 }
1552
1553 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1554 entries) {
1555 pf_hash_rule(&ctx, rule);
1556 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1557 }
1558 }
1559
1560 MD5Final(digest, &ctx);
1561 memcpy(pf_status.pf_chksum, digest, sizeof (pf_status.pf_chksum));
1562 return (0);
1563 }
1564
1565 static void
1566 pf_start(void)
1567 {
1568 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1569
1570 VERIFY(pf_is_enabled == 0);
1571
1572 pf_is_enabled = 1;
1573 pf_status.running = 1;
1574 pf_status.since = pf_calendar_time_second();
1575 if (pf_status.stateid == 0) {
1576 pf_status.stateid = pf_time_second();
1577 pf_status.stateid = pf_status.stateid << 32;
1578 }
1579 wakeup(pf_purge_thread_fn);
1580 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1581 }
1582
1583 static void
1584 pf_stop(void)
1585 {
1586 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1587
1588 VERIFY(pf_is_enabled);
1589
1590 pf_status.running = 0;
1591 pf_is_enabled = 0;
1592 pf_status.since = pf_calendar_time_second();
1593 wakeup(pf_purge_thread_fn);
1594 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1595 }
1596
1597 static int
1598 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1599 {
1600 #pragma unused(dev)
1601 int p64 = proc_is64bit(p);
1602 int error = 0;
1603 int minordev = minor(dev);
1604
1605 if (kauth_cred_issuser(kauth_cred_get()) == 0)
1606 return (EPERM);
1607
1608 /* XXX keep in sync with switch() below */
1609 if (securelevel > 1)
1610 switch (cmd) {
1611 case DIOCGETRULES:
1612 case DIOCGETRULE:
1613 case DIOCGETADDRS:
1614 case DIOCGETADDR:
1615 case DIOCGETSTATE:
1616 case DIOCSETSTATUSIF:
1617 case DIOCGETSTATUS:
1618 case DIOCCLRSTATUS:
1619 case DIOCNATLOOK:
1620 case DIOCSETDEBUG:
1621 case DIOCGETSTATES:
1622 case DIOCINSERTRULE:
1623 case DIOCDELETERULE:
1624 case DIOCGETTIMEOUT:
1625 case DIOCCLRRULECTRS:
1626 case DIOCGETLIMIT:
1627 case DIOCGETALTQS:
1628 case DIOCGETALTQ:
1629 case DIOCGETQSTATS:
1630 case DIOCGETRULESETS:
1631 case DIOCGETRULESET:
1632 case DIOCRGETTABLES:
1633 case DIOCRGETTSTATS:
1634 case DIOCRCLRTSTATS:
1635 case DIOCRCLRADDRS:
1636 case DIOCRADDADDRS:
1637 case DIOCRDELADDRS:
1638 case DIOCRSETADDRS:
1639 case DIOCRGETADDRS:
1640 case DIOCRGETASTATS:
1641 case DIOCRCLRASTATS:
1642 case DIOCRTSTADDRS:
1643 case DIOCOSFPGET:
1644 case DIOCGETSRCNODES:
1645 case DIOCCLRSRCNODES:
1646 case DIOCIGETIFACES:
1647 case DIOCGIFSPEED:
1648 case DIOCSETIFFLAG:
1649 case DIOCCLRIFFLAG:
1650 break;
1651 case DIOCRCLRTABLES:
1652 case DIOCRADDTABLES:
1653 case DIOCRDELTABLES:
1654 case DIOCRSETTFLAGS: {
1655 int pfrio_flags;
1656
1657 bcopy(&((struct pfioc_table *)(void *)addr)->
1658 pfrio_flags, &pfrio_flags, sizeof (pfrio_flags));
1659
1660 if (pfrio_flags & PFR_FLAG_DUMMY)
1661 break; /* dummy operation ok */
1662 return (EPERM);
1663 }
1664 default:
1665 return (EPERM);
1666 }
1667
1668 if (!(flags & FWRITE))
1669 switch (cmd) {
1670 case DIOCSTART:
1671 case DIOCSTARTREF:
1672 case DIOCSTOP:
1673 case DIOCSTOPREF:
1674 case DIOCGETSTARTERS:
1675 case DIOCGETRULES:
1676 case DIOCGETADDRS:
1677 case DIOCGETADDR:
1678 case DIOCGETSTATE:
1679 case DIOCGETSTATUS:
1680 case DIOCGETSTATES:
1681 case DIOCINSERTRULE:
1682 case DIOCDELETERULE:
1683 case DIOCGETTIMEOUT:
1684 case DIOCGETLIMIT:
1685 case DIOCGETALTQS:
1686 case DIOCGETALTQ:
1687 case DIOCGETQSTATS:
1688 case DIOCGETRULESETS:
1689 case DIOCGETRULESET:
1690 case DIOCNATLOOK:
1691 case DIOCRGETTABLES:
1692 case DIOCRGETTSTATS:
1693 case DIOCRGETADDRS:
1694 case DIOCRGETASTATS:
1695 case DIOCRTSTADDRS:
1696 case DIOCOSFPGET:
1697 case DIOCGETSRCNODES:
1698 case DIOCIGETIFACES:
1699 case DIOCGIFSPEED:
1700 break;
1701 case DIOCRCLRTABLES:
1702 case DIOCRADDTABLES:
1703 case DIOCRDELTABLES:
1704 case DIOCRCLRTSTATS:
1705 case DIOCRCLRADDRS:
1706 case DIOCRADDADDRS:
1707 case DIOCRDELADDRS:
1708 case DIOCRSETADDRS:
1709 case DIOCRSETTFLAGS: {
1710 int pfrio_flags;
1711
1712 bcopy(&((struct pfioc_table *)(void *)addr)->
1713 pfrio_flags, &pfrio_flags, sizeof (pfrio_flags));
1714
1715 if (pfrio_flags & PFR_FLAG_DUMMY) {
1716 flags |= FWRITE; /* need write lock for dummy */
1717 break; /* dummy operation ok */
1718 }
1719 return (EACCES);
1720 }
1721 case DIOCGETRULE: {
1722 u_int32_t action;
1723
1724 bcopy(&((struct pfioc_rule *)(void *)addr)->action,
1725 &action, sizeof (action));
1726
1727 if (action == PF_GET_CLR_CNTR)
1728 return (EACCES);
1729 break;
1730 }
1731 default:
1732 return (EACCES);
1733 }
1734
1735 #if PF_ALTQ
1736 switch (cmd) {
1737 case DIOCSTARTALTQ:
1738 case DIOCSTOPALTQ:
1739 case DIOCADDALTQ:
1740 case DIOCGETALTQS:
1741 case DIOCGETALTQ:
1742 case DIOCCHANGEALTQ:
1743 case DIOCGETQSTATS:
1744 /* fail if ALTQ is disabled */
1745 if (!altq_allowed)
1746 return (ENODEV);
1747 break;
1748 }
1749 #endif /* PF_ALTQ */
1750
1751 if (flags & FWRITE)
1752 lck_rw_lock_exclusive(pf_perim_lock);
1753 else
1754 lck_rw_lock_shared(pf_perim_lock);
1755
1756 lck_mtx_lock(pf_lock);
1757
1758 switch (cmd) {
1759
1760 case DIOCSTART:
1761 if (pf_status.running) {
1762 /*
1763 * Increment the reference for a simple -e enable, so
1764 * that even if other processes drop their references,
1765 * pf will still be available to processes that turned
1766 * it on without taking a reference
1767 */
1768 if (nr_tokens == pf_enabled_ref_count) {
1769 pf_enabled_ref_count++;
1770 VERIFY(pf_enabled_ref_count != 0);
1771 }
1772 error = EEXIST;
1773 } else if (pf_purge_thread == NULL) {
1774 error = ENOMEM;
1775 } else {
1776 pf_start();
1777 pf_enabled_ref_count++;
1778 VERIFY(pf_enabled_ref_count != 0);
1779 }
1780 break;
1781
1782 case DIOCSTARTREF: /* u_int64_t */
1783 if (pf_purge_thread == NULL) {
1784 error = ENOMEM;
1785 } else {
1786 u_int64_t token;
1787
1788 /* small enough to be on stack */
1789 if ((token = generate_token(p)) != 0) {
1790 if (pf_is_enabled == 0) {
1791 pf_start();
1792 }
1793 pf_enabled_ref_count++;
1794 VERIFY(pf_enabled_ref_count != 0);
1795 } else {
1796 error = ENOMEM;
1797 DPFPRINTF(PF_DEBUG_URGENT,
1798 ("pf: unable to generate token\n"));
1799 }
1800 bcopy(&token, addr, sizeof (token));
1801 }
1802 break;
1803
1804 case DIOCSTOP:
1805 if (!pf_status.running) {
1806 error = ENOENT;
1807 } else {
1808 pf_stop();
1809 pf_enabled_ref_count = 0;
1810 invalidate_all_tokens();
1811 }
1812 break;
1813
1814 case DIOCSTOPREF: /* struct pfioc_remove_token */
1815 if (!pf_status.running) {
1816 error = ENOENT;
1817 } else {
1818 struct pfioc_remove_token pfrt;
1819
1820 /* small enough to be on stack */
1821 bcopy(addr, &pfrt, sizeof (pfrt));
1822 if ((error = remove_token(&pfrt)) == 0) {
1823 VERIFY(pf_enabled_ref_count != 0);
1824 pf_enabled_ref_count--;
1825 /* return currently held references */
1826 pfrt.refcount = pf_enabled_ref_count;
1827 DPFPRINTF(PF_DEBUG_MISC,
1828 ("pf: enabled refcount decremented\n"));
1829 } else {
1830 error = EINVAL;
1831 DPFPRINTF(PF_DEBUG_URGENT,
1832 ("pf: token mismatch\n"));
1833 }
1834 bcopy(&pfrt, addr, sizeof (pfrt));
1835
1836 if (error == 0 && pf_enabled_ref_count == 0)
1837 pf_stop();
1838 }
1839 break;
1840
1841 case DIOCGETSTARTERS: { /* struct pfioc_tokens */
1842 PFIOCX_STRUCT_DECL(pfioc_tokens);
1843
1844 PFIOCX_STRUCT_BEGIN(addr, pfioc_tokens, error = ENOMEM; break;);
1845 error = pfioctl_ioc_tokens(cmd,
1846 PFIOCX_STRUCT_ADDR32(pfioc_tokens),
1847 PFIOCX_STRUCT_ADDR64(pfioc_tokens), p);
1848 PFIOCX_STRUCT_END(pfioc_tokens, addr);
1849 break;
1850 }
1851
1852 case DIOCADDRULE: /* struct pfioc_rule */
1853 case DIOCGETRULES: /* struct pfioc_rule */
1854 case DIOCGETRULE: /* struct pfioc_rule */
1855 case DIOCCHANGERULE: /* struct pfioc_rule */
1856 case DIOCINSERTRULE: /* struct pfioc_rule */
1857 case DIOCDELETERULE: { /* struct pfioc_rule */
1858 struct pfioc_rule *pr = NULL;
1859
1860 PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break;);
1861 error = pfioctl_ioc_rule(cmd, minordev, pr, p);
1862 PFIOC_STRUCT_END(pr, addr);
1863 break;
1864 }
1865
1866 case DIOCCLRSTATES: /* struct pfioc_state_kill */
1867 case DIOCKILLSTATES: { /* struct pfioc_state_kill */
1868 struct pfioc_state_kill *psk = NULL;
1869
1870 PFIOC_STRUCT_BEGIN(addr, psk, error = ENOMEM; break;);
1871 error = pfioctl_ioc_state_kill(cmd, psk, p);
1872 PFIOC_STRUCT_END(psk, addr);
1873 break;
1874 }
1875
1876 case DIOCADDSTATE: /* struct pfioc_state */
1877 case DIOCGETSTATE: { /* struct pfioc_state */
1878 struct pfioc_state *ps = NULL;
1879
1880 PFIOC_STRUCT_BEGIN(addr, ps, error = ENOMEM; break;);
1881 error = pfioctl_ioc_state(cmd, ps, p);
1882 PFIOC_STRUCT_END(ps, addr);
1883 break;
1884 }
1885
1886 case DIOCGETSTATES: { /* struct pfioc_states */
1887 PFIOCX_STRUCT_DECL(pfioc_states);
1888
1889 PFIOCX_STRUCT_BEGIN(addr, pfioc_states, error = ENOMEM; break;);
1890 error = pfioctl_ioc_states(cmd,
1891 PFIOCX_STRUCT_ADDR32(pfioc_states),
1892 PFIOCX_STRUCT_ADDR64(pfioc_states), p);
1893 PFIOCX_STRUCT_END(pfioc_states, addr);
1894 break;
1895 }
1896
1897 case DIOCGETSTATUS: { /* struct pf_status */
1898 struct pf_status *s = NULL;
1899
1900 PFIOC_STRUCT_BEGIN(&pf_status, s, error = ENOMEM; break;);
1901 pfi_update_status(s->ifname, s);
1902 PFIOC_STRUCT_END(s, addr);
1903 break;
1904 }
1905
1906 case DIOCSETSTATUSIF: { /* struct pfioc_if */
1907 struct pfioc_if *pi = (struct pfioc_if *)(void *)addr;
1908
1909 /* OK for unaligned accesses */
1910 if (pi->ifname[0] == 0) {
1911 bzero(pf_status.ifname, IFNAMSIZ);
1912 break;
1913 }
1914 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1915 break;
1916 }
1917
1918 case DIOCCLRSTATUS: {
1919 bzero(pf_status.counters, sizeof (pf_status.counters));
1920 bzero(pf_status.fcounters, sizeof (pf_status.fcounters));
1921 bzero(pf_status.scounters, sizeof (pf_status.scounters));
1922 pf_status.since = pf_calendar_time_second();
1923 if (*pf_status.ifname)
1924 pfi_update_status(pf_status.ifname, NULL);
1925 break;
1926 }
1927
1928 case DIOCNATLOOK: { /* struct pfioc_natlook */
1929 struct pfioc_natlook *pnl = NULL;
1930
1931 PFIOC_STRUCT_BEGIN(addr, pnl, error = ENOMEM; break;);
1932 error = pfioctl_ioc_natlook(cmd, pnl, p);
1933 PFIOC_STRUCT_END(pnl, addr);
1934 break;
1935 }
1936
1937 case DIOCSETTIMEOUT: /* struct pfioc_tm */
1938 case DIOCGETTIMEOUT: { /* struct pfioc_tm */
1939 struct pfioc_tm pt;
1940
1941 /* small enough to be on stack */
1942 bcopy(addr, &pt, sizeof (pt));
1943 error = pfioctl_ioc_tm(cmd, &pt, p);
1944 bcopy(&pt, addr, sizeof (pt));
1945 break;
1946 }
1947
1948 case DIOCGETLIMIT: /* struct pfioc_limit */
1949 case DIOCSETLIMIT: { /* struct pfioc_limit */
1950 struct pfioc_limit pl;
1951
1952 /* small enough to be on stack */
1953 bcopy(addr, &pl, sizeof (pl));
1954 error = pfioctl_ioc_limit(cmd, &pl, p);
1955 bcopy(&pl, addr, sizeof (pl));
1956 break;
1957 }
1958
1959 case DIOCSETDEBUG: { /* u_int32_t */
1960 bcopy(addr, &pf_status.debug, sizeof (u_int32_t));
1961 break;
1962 }
1963
1964 case DIOCCLRRULECTRS: {
1965 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1966 struct pf_ruleset *ruleset = &pf_main_ruleset;
1967 struct pf_rule *rule;
1968
1969 TAILQ_FOREACH(rule,
1970 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1971 rule->evaluations = 0;
1972 rule->packets[0] = rule->packets[1] = 0;
1973 rule->bytes[0] = rule->bytes[1] = 0;
1974 }
1975 break;
1976 }
1977
1978 case DIOCGIFSPEED: {
1979 struct pf_ifspeed *psp = (struct pf_ifspeed *)(void *)addr;
1980 struct pf_ifspeed ps;
1981 struct ifnet *ifp;
1982 u_int64_t baudrate;
1983
1984 if (psp->ifname[0] != '\0') {
1985 /* Can we completely trust user-land? */
1986 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
1987 ps.ifname[IFNAMSIZ - 1] = '\0';
1988 ifp = ifunit(ps.ifname);
1989 if (ifp != NULL) {
1990 baudrate = ifp->if_output_bw.max_bw;
1991 bcopy(&baudrate, &psp->baudrate,
1992 sizeof (baudrate));
1993 } else {
1994 error = EINVAL;
1995 }
1996 } else {
1997 error = EINVAL;
1998 }
1999 break;
2000 }
2001
2002 #if PF_ALTQ
2003 case DIOCSTARTALTQ: {
2004 struct pf_altq *altq;
2005
2006 VERIFY(altq_allowed);
2007 /* enable all altq interfaces on active list */
2008 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2009 if (altq->qname[0] == '\0') {
2010 error = pf_enable_altq(altq);
2011 if (error != 0)
2012 break;
2013 }
2014 }
2015 if (error == 0)
2016 pf_altq_running = 1;
2017 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2018 break;
2019 }
2020
2021 case DIOCSTOPALTQ: {
2022 struct pf_altq *altq;
2023
2024 VERIFY(altq_allowed);
2025 /* disable all altq interfaces on active list */
2026 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2027 if (altq->qname[0] == '\0') {
2028 error = pf_disable_altq(altq);
2029 if (error != 0)
2030 break;
2031 }
2032 }
2033 if (error == 0)
2034 pf_altq_running = 0;
2035 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2036 break;
2037 }
2038
2039 case DIOCADDALTQ: { /* struct pfioc_altq */
2040 struct pfioc_altq *pa = (struct pfioc_altq *)(void *)addr;
2041 struct pf_altq *altq, *a;
2042 u_int32_t ticket;
2043
2044 VERIFY(altq_allowed);
2045 bcopy(&pa->ticket, &ticket, sizeof (ticket));
2046 if (ticket != ticket_altqs_inactive) {
2047 error = EBUSY;
2048 break;
2049 }
2050 altq = pool_get(&pf_altq_pl, PR_WAITOK);
2051 if (altq == NULL) {
2052 error = ENOMEM;
2053 break;
2054 }
2055 pf_altq_copyin(&pa->altq, altq);
2056
2057 /*
2058 * if this is for a queue, find the discipline and
2059 * copy the necessary fields
2060 */
2061 if (altq->qname[0] != '\0') {
2062 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2063 error = EBUSY;
2064 pool_put(&pf_altq_pl, altq);
2065 break;
2066 }
2067 altq->altq_disc = NULL;
2068 TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
2069 if (strncmp(a->ifname, altq->ifname,
2070 IFNAMSIZ) == 0 && a->qname[0] == '\0') {
2071 altq->altq_disc = a->altq_disc;
2072 break;
2073 }
2074 }
2075 }
2076
2077 error = altq_add(altq);
2078 if (error) {
2079 pool_put(&pf_altq_pl, altq);
2080 break;
2081 }
2082
2083 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
2084 pf_altq_copyout(altq, &pa->altq);
2085 break;
2086 }
2087
2088 case DIOCGETALTQS: {
2089 struct pfioc_altq *pa = (struct pfioc_altq *)(void *)addr;
2090 struct pf_altq *altq;
2091 u_int32_t nr;
2092
2093 VERIFY(altq_allowed);
2094 nr = 0;
2095 TAILQ_FOREACH(altq, pf_altqs_active, entries)
2096 nr++;
2097 bcopy(&nr, &pa->nr, sizeof (nr));
2098 bcopy(&ticket_altqs_active, &pa->ticket, sizeof (pa->ticket));
2099 break;
2100 }
2101
2102 case DIOCGETALTQ: {
2103 struct pfioc_altq *pa = (struct pfioc_altq *)(void *)addr;
2104 struct pf_altq *altq;
2105 u_int32_t nr, pa_nr, ticket;
2106
2107 VERIFY(altq_allowed);
2108 bcopy(&pa->ticket, &ticket, sizeof (ticket));
2109 if (ticket != ticket_altqs_active) {
2110 error = EBUSY;
2111 break;
2112 }
2113 bcopy(&pa->nr, &pa_nr, sizeof (pa_nr));
2114 nr = 0;
2115 altq = TAILQ_FIRST(pf_altqs_active);
2116 while ((altq != NULL) && (nr < pa_nr)) {
2117 altq = TAILQ_NEXT(altq, entries);
2118 nr++;
2119 }
2120 if (altq == NULL) {
2121 error = EBUSY;
2122 break;
2123 }
2124 pf_altq_copyout(altq, &pa->altq);
2125 break;
2126 }
2127
2128 case DIOCCHANGEALTQ:
2129 VERIFY(altq_allowed);
2130 /* CHANGEALTQ not supported yet! */
2131 error = ENODEV;
2132 break;
2133
2134 case DIOCGETQSTATS: {
2135 struct pfioc_qstats *pq = (struct pfioc_qstats *)(void *)addr;
2136 struct pf_altq *altq;
2137 u_int32_t nr, pq_nr, ticket;
2138 int nbytes;
2139
2140 VERIFY(altq_allowed);
2141 bcopy(&pq->ticket, &ticket, sizeof (ticket));
2142 if (ticket != ticket_altqs_active) {
2143 error = EBUSY;
2144 break;
2145 }
2146 bcopy(&pq->nr, &pq_nr, sizeof (pq_nr));
2147 nr = 0;
2148 altq = TAILQ_FIRST(pf_altqs_active);
2149 while ((altq != NULL) && (nr < pq_nr)) {
2150 altq = TAILQ_NEXT(altq, entries);
2151 nr++;
2152 }
2153 if (altq == NULL) {
2154 error = EBUSY;
2155 break;
2156 }
2157 bcopy(&pq->nbytes, &nbytes, sizeof (nbytes));
2158 error = altq_getqstats(altq, pq->buf, &nbytes);
2159 if (error == 0) {
2160 pq->scheduler = altq->scheduler;
2161 bcopy(&nbytes, &pq->nbytes, sizeof (nbytes));
2162 }
2163 break;
2164 }
2165 #endif /* PF_ALTQ */
2166
2167 case DIOCBEGINADDRS: /* struct pfioc_pooladdr */
2168 case DIOCADDADDR: /* struct pfioc_pooladdr */
2169 case DIOCGETADDRS: /* struct pfioc_pooladdr */
2170 case DIOCGETADDR: /* struct pfioc_pooladdr */
2171 case DIOCCHANGEADDR: { /* struct pfioc_pooladdr */
2172 struct pfioc_pooladdr *pp = NULL;
2173
2174 PFIOC_STRUCT_BEGIN(addr, pp, error = ENOMEM; break;)
2175 error = pfioctl_ioc_pooladdr(cmd, pp, p);
2176 PFIOC_STRUCT_END(pp, addr);
2177 break;
2178 }
2179
2180 case DIOCGETRULESETS: /* struct pfioc_ruleset */
2181 case DIOCGETRULESET: { /* struct pfioc_ruleset */
2182 struct pfioc_ruleset *pr = NULL;
2183
2184 PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break;);
2185 error = pfioctl_ioc_ruleset(cmd, pr, p);
2186 PFIOC_STRUCT_END(pr, addr);
2187 break;
2188 }
2189
2190 case DIOCRCLRTABLES: /* struct pfioc_table */
2191 case DIOCRADDTABLES: /* struct pfioc_table */
2192 case DIOCRDELTABLES: /* struct pfioc_table */
2193 case DIOCRGETTABLES: /* struct pfioc_table */
2194 case DIOCRGETTSTATS: /* struct pfioc_table */
2195 case DIOCRCLRTSTATS: /* struct pfioc_table */
2196 case DIOCRSETTFLAGS: /* struct pfioc_table */
2197 case DIOCRCLRADDRS: /* struct pfioc_table */
2198 case DIOCRADDADDRS: /* struct pfioc_table */
2199 case DIOCRDELADDRS: /* struct pfioc_table */
2200 case DIOCRSETADDRS: /* struct pfioc_table */
2201 case DIOCRGETADDRS: /* struct pfioc_table */
2202 case DIOCRGETASTATS: /* struct pfioc_table */
2203 case DIOCRCLRASTATS: /* struct pfioc_table */
2204 case DIOCRTSTADDRS: /* struct pfioc_table */
2205 case DIOCRINADEFINE: { /* struct pfioc_table */
2206 PFIOCX_STRUCT_DECL(pfioc_table);
2207
2208 PFIOCX_STRUCT_BEGIN(addr, pfioc_table, error = ENOMEM; break;);
2209 error = pfioctl_ioc_table(cmd,
2210 PFIOCX_STRUCT_ADDR32(pfioc_table),
2211 PFIOCX_STRUCT_ADDR64(pfioc_table), p);
2212 PFIOCX_STRUCT_END(pfioc_table, addr);
2213 break;
2214 }
2215
2216 case DIOCOSFPADD: /* struct pf_osfp_ioctl */
2217 case DIOCOSFPGET: { /* struct pf_osfp_ioctl */
2218 struct pf_osfp_ioctl *io = NULL;
2219
2220 PFIOC_STRUCT_BEGIN(addr, io, error = ENOMEM; break;);
2221 if (cmd == DIOCOSFPADD) {
2222 error = pf_osfp_add(io);
2223 } else {
2224 VERIFY(cmd == DIOCOSFPGET);
2225 error = pf_osfp_get(io);
2226 }
2227 PFIOC_STRUCT_END(io, addr);
2228 break;
2229 }
2230
2231 case DIOCXBEGIN: /* struct pfioc_trans */
2232 case DIOCXROLLBACK: /* struct pfioc_trans */
2233 case DIOCXCOMMIT: { /* struct pfioc_trans */
2234 PFIOCX_STRUCT_DECL(pfioc_trans);
2235
2236 PFIOCX_STRUCT_BEGIN(addr, pfioc_trans, error = ENOMEM; break;);
2237 error = pfioctl_ioc_trans(cmd,
2238 PFIOCX_STRUCT_ADDR32(pfioc_trans),
2239 PFIOCX_STRUCT_ADDR64(pfioc_trans), p);
2240 PFIOCX_STRUCT_END(pfioc_trans, addr);
2241 break;
2242 }
2243
2244 case DIOCGETSRCNODES: { /* struct pfioc_src_nodes */
2245 PFIOCX_STRUCT_DECL(pfioc_src_nodes);
2246
2247 PFIOCX_STRUCT_BEGIN(addr, pfioc_src_nodes,
2248 error = ENOMEM; break;);
2249 error = pfioctl_ioc_src_nodes(cmd,
2250 PFIOCX_STRUCT_ADDR32(pfioc_src_nodes),
2251 PFIOCX_STRUCT_ADDR64(pfioc_src_nodes), p);
2252 PFIOCX_STRUCT_END(pfioc_src_nodes, addr);
2253 break;
2254 }
2255
2256 case DIOCCLRSRCNODES: {
2257 struct pf_src_node *n;
2258 struct pf_state *state;
2259
2260 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
2261 state->src_node = NULL;
2262 state->nat_src_node = NULL;
2263 }
2264 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
2265 n->expire = 1;
2266 n->states = 0;
2267 }
2268 pf_purge_expired_src_nodes();
2269 pf_status.src_nodes = 0;
2270 break;
2271 }
2272
2273 case DIOCKILLSRCNODES: { /* struct pfioc_src_node_kill */
2274 struct pfioc_src_node_kill *psnk = NULL;
2275
2276 PFIOC_STRUCT_BEGIN(addr, psnk, error = ENOMEM; break;);
2277 error = pfioctl_ioc_src_node_kill(cmd, psnk, p);
2278 PFIOC_STRUCT_END(psnk, addr);
2279 break;
2280 }
2281
2282 case DIOCSETHOSTID: { /* u_int32_t */
2283 u_int32_t hid;
2284
2285 /* small enough to be on stack */
2286 bcopy(addr, &hid, sizeof (hid));
2287 if (hid == 0)
2288 pf_status.hostid = random();
2289 else
2290 pf_status.hostid = hid;
2291 break;
2292 }
2293
2294 case DIOCOSFPFLUSH:
2295 pf_osfp_flush();
2296 break;
2297
2298 case DIOCIGETIFACES: /* struct pfioc_iface */
2299 case DIOCSETIFFLAG: /* struct pfioc_iface */
2300 case DIOCCLRIFFLAG: { /* struct pfioc_iface */
2301 PFIOCX_STRUCT_DECL(pfioc_iface);
2302
2303 PFIOCX_STRUCT_BEGIN(addr, pfioc_iface, error = ENOMEM; break;);
2304 error = pfioctl_ioc_iface(cmd,
2305 PFIOCX_STRUCT_ADDR32(pfioc_iface),
2306 PFIOCX_STRUCT_ADDR64(pfioc_iface), p);
2307 PFIOCX_STRUCT_END(pfioc_iface, addr);
2308 break;
2309 }
2310
2311 default:
2312 error = ENODEV;
2313 break;
2314 }
2315
2316 lck_mtx_unlock(pf_lock);
2317 lck_rw_done(pf_perim_lock);
2318
2319 return (error);
2320 }
2321
2322 static int
2323 pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32,
2324 struct pfioc_table_64 *io64, struct proc *p)
2325 {
2326 int p64 = proc_is64bit(p);
2327 int error = 0;
2328
2329 if (!p64)
2330 goto struct32;
2331
2332 /*
2333 * 64-bit structure processing
2334 */
2335 switch (cmd) {
2336 case DIOCRCLRTABLES:
2337 if (io64->pfrio_esize != 0) {
2338 error = ENODEV;
2339 break;
2340 }
2341 pfr_table_copyin_cleanup(&io64->pfrio_table);
2342 error = pfr_clr_tables(&io64->pfrio_table, &io64->pfrio_ndel,
2343 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2344 break;
2345
2346 case DIOCRADDTABLES:
2347 if (io64->pfrio_esize != sizeof (struct pfr_table)) {
2348 error = ENODEV;
2349 break;
2350 }
2351 error = pfr_add_tables(io64->pfrio_buffer, io64->pfrio_size,
2352 &io64->pfrio_nadd, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2353 break;
2354
2355 case DIOCRDELTABLES:
2356 if (io64->pfrio_esize != sizeof (struct pfr_table)) {
2357 error = ENODEV;
2358 break;
2359 }
2360 error = pfr_del_tables(io64->pfrio_buffer, io64->pfrio_size,
2361 &io64->pfrio_ndel, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2362 break;
2363
2364 case DIOCRGETTABLES:
2365 if (io64->pfrio_esize != sizeof (struct pfr_table)) {
2366 error = ENODEV;
2367 break;
2368 }
2369 pfr_table_copyin_cleanup(&io64->pfrio_table);
2370 error = pfr_get_tables(&io64->pfrio_table, io64->pfrio_buffer,
2371 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2372 break;
2373
2374 case DIOCRGETTSTATS:
2375 if (io64->pfrio_esize != sizeof (struct pfr_tstats)) {
2376 error = ENODEV;
2377 break;
2378 }
2379 pfr_table_copyin_cleanup(&io64->pfrio_table);
2380 error = pfr_get_tstats(&io64->pfrio_table, io64->pfrio_buffer,
2381 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2382 break;
2383
2384 case DIOCRCLRTSTATS:
2385 if (io64->pfrio_esize != sizeof (struct pfr_table)) {
2386 error = ENODEV;
2387 break;
2388 }
2389 error = pfr_clr_tstats(io64->pfrio_buffer, io64->pfrio_size,
2390 &io64->pfrio_nzero, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2391 break;
2392
2393 case DIOCRSETTFLAGS:
2394 if (io64->pfrio_esize != sizeof (struct pfr_table)) {
2395 error = ENODEV;
2396 break;
2397 }
2398 error = pfr_set_tflags(io64->pfrio_buffer, io64->pfrio_size,
2399 io64->pfrio_setflag, io64->pfrio_clrflag,
2400 &io64->pfrio_nchange, &io64->pfrio_ndel,
2401 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2402 break;
2403
2404 case DIOCRCLRADDRS:
2405 if (io64->pfrio_esize != 0) {
2406 error = ENODEV;
2407 break;
2408 }
2409 pfr_table_copyin_cleanup(&io64->pfrio_table);
2410 error = pfr_clr_addrs(&io64->pfrio_table, &io64->pfrio_ndel,
2411 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2412 break;
2413
2414 case DIOCRADDADDRS:
2415 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2416 error = ENODEV;
2417 break;
2418 }
2419 pfr_table_copyin_cleanup(&io64->pfrio_table);
2420 error = pfr_add_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2421 io64->pfrio_size, &io64->pfrio_nadd, io64->pfrio_flags |
2422 PFR_FLAG_USERIOCTL);
2423 break;
2424
2425 case DIOCRDELADDRS:
2426 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2427 error = ENODEV;
2428 break;
2429 }
2430 pfr_table_copyin_cleanup(&io64->pfrio_table);
2431 error = pfr_del_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2432 io64->pfrio_size, &io64->pfrio_ndel, io64->pfrio_flags |
2433 PFR_FLAG_USERIOCTL);
2434 break;
2435
2436 case DIOCRSETADDRS:
2437 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2438 error = ENODEV;
2439 break;
2440 }
2441 pfr_table_copyin_cleanup(&io64->pfrio_table);
2442 error = pfr_set_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2443 io64->pfrio_size, &io64->pfrio_size2, &io64->pfrio_nadd,
2444 &io64->pfrio_ndel, &io64->pfrio_nchange, io64->pfrio_flags |
2445 PFR_FLAG_USERIOCTL, 0);
2446 break;
2447
2448 case DIOCRGETADDRS:
2449 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2450 error = ENODEV;
2451 break;
2452 }
2453 pfr_table_copyin_cleanup(&io64->pfrio_table);
2454 error = pfr_get_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2455 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2456 break;
2457
2458 case DIOCRGETASTATS:
2459 if (io64->pfrio_esize != sizeof (struct pfr_astats)) {
2460 error = ENODEV;
2461 break;
2462 }
2463 pfr_table_copyin_cleanup(&io64->pfrio_table);
2464 error = pfr_get_astats(&io64->pfrio_table, io64->pfrio_buffer,
2465 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2466 break;
2467
2468 case DIOCRCLRASTATS:
2469 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2470 error = ENODEV;
2471 break;
2472 }
2473 pfr_table_copyin_cleanup(&io64->pfrio_table);
2474 error = pfr_clr_astats(&io64->pfrio_table, io64->pfrio_buffer,
2475 io64->pfrio_size, &io64->pfrio_nzero, io64->pfrio_flags |
2476 PFR_FLAG_USERIOCTL);
2477 break;
2478
2479 case DIOCRTSTADDRS:
2480 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2481 error = ENODEV;
2482 break;
2483 }
2484 pfr_table_copyin_cleanup(&io64->pfrio_table);
2485 error = pfr_tst_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2486 io64->pfrio_size, &io64->pfrio_nmatch, io64->pfrio_flags |
2487 PFR_FLAG_USERIOCTL);
2488 break;
2489
2490 case DIOCRINADEFINE:
2491 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2492 error = ENODEV;
2493 break;
2494 }
2495 pfr_table_copyin_cleanup(&io64->pfrio_table);
2496 error = pfr_ina_define(&io64->pfrio_table, io64->pfrio_buffer,
2497 io64->pfrio_size, &io64->pfrio_nadd, &io64->pfrio_naddr,
2498 io64->pfrio_ticket, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2499 break;
2500
2501 default:
2502 VERIFY(0);
2503 /* NOTREACHED */
2504 }
2505 goto done;
2506
2507 struct32:
2508 /*
2509 * 32-bit structure processing
2510 */
2511 switch (cmd) {
2512 case DIOCRCLRTABLES:
2513 if (io32->pfrio_esize != 0) {
2514 error = ENODEV;
2515 break;
2516 }
2517 pfr_table_copyin_cleanup(&io32->pfrio_table);
2518 error = pfr_clr_tables(&io32->pfrio_table, &io32->pfrio_ndel,
2519 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2520 break;
2521
2522 case DIOCRADDTABLES:
2523 if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2524 error = ENODEV;
2525 break;
2526 }
2527 error = pfr_add_tables(io32->pfrio_buffer, io32->pfrio_size,
2528 &io32->pfrio_nadd, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2529 break;
2530
2531 case DIOCRDELTABLES:
2532 if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2533 error = ENODEV;
2534 break;
2535 }
2536 error = pfr_del_tables(io32->pfrio_buffer, io32->pfrio_size,
2537 &io32->pfrio_ndel, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2538 break;
2539
2540 case DIOCRGETTABLES:
2541 if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2542 error = ENODEV;
2543 break;
2544 }
2545 pfr_table_copyin_cleanup(&io32->pfrio_table);
2546 error = pfr_get_tables(&io32->pfrio_table, io32->pfrio_buffer,
2547 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2548 break;
2549
2550 case DIOCRGETTSTATS:
2551 if (io32->pfrio_esize != sizeof (struct pfr_tstats)) {
2552 error = ENODEV;
2553 break;
2554 }
2555 pfr_table_copyin_cleanup(&io32->pfrio_table);
2556 error = pfr_get_tstats(&io32->pfrio_table, io32->pfrio_buffer,
2557 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2558 break;
2559
2560 case DIOCRCLRTSTATS:
2561 if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2562 error = ENODEV;
2563 break;
2564 }
2565 error = pfr_clr_tstats(io32->pfrio_buffer, io32->pfrio_size,
2566 &io32->pfrio_nzero, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2567 break;
2568
2569 case DIOCRSETTFLAGS:
2570 if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2571 error = ENODEV;
2572 break;
2573 }
2574 error = pfr_set_tflags(io32->pfrio_buffer, io32->pfrio_size,
2575 io32->pfrio_setflag, io32->pfrio_clrflag,
2576 &io32->pfrio_nchange, &io32->pfrio_ndel,
2577 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2578 break;
2579
2580 case DIOCRCLRADDRS:
2581 if (io32->pfrio_esize != 0) {
2582 error = ENODEV;
2583 break;
2584 }
2585 pfr_table_copyin_cleanup(&io32->pfrio_table);
2586 error = pfr_clr_addrs(&io32->pfrio_table, &io32->pfrio_ndel,
2587 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2588 break;
2589
2590 case DIOCRADDADDRS:
2591 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2592 error = ENODEV;
2593 break;
2594 }
2595 pfr_table_copyin_cleanup(&io32->pfrio_table);
2596 error = pfr_add_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2597 io32->pfrio_size, &io32->pfrio_nadd, io32->pfrio_flags |
2598 PFR_FLAG_USERIOCTL);
2599 break;
2600
2601 case DIOCRDELADDRS:
2602 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2603 error = ENODEV;
2604 break;
2605 }
2606 pfr_table_copyin_cleanup(&io32->pfrio_table);
2607 error = pfr_del_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2608 io32->pfrio_size, &io32->pfrio_ndel, io32->pfrio_flags |
2609 PFR_FLAG_USERIOCTL);
2610 break;
2611
2612 case DIOCRSETADDRS:
2613 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2614 error = ENODEV;
2615 break;
2616 }
2617 pfr_table_copyin_cleanup(&io32->pfrio_table);
2618 error = pfr_set_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2619 io32->pfrio_size, &io32->pfrio_size2, &io32->pfrio_nadd,
2620 &io32->pfrio_ndel, &io32->pfrio_nchange, io32->pfrio_flags |
2621 PFR_FLAG_USERIOCTL, 0);
2622 break;
2623
2624 case DIOCRGETADDRS:
2625 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2626 error = ENODEV;
2627 break;
2628 }
2629 pfr_table_copyin_cleanup(&io32->pfrio_table);
2630 error = pfr_get_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2631 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2632 break;
2633
2634 case DIOCRGETASTATS:
2635 if (io32->pfrio_esize != sizeof (struct pfr_astats)) {
2636 error = ENODEV;
2637 break;
2638 }
2639 pfr_table_copyin_cleanup(&io32->pfrio_table);
2640 error = pfr_get_astats(&io32->pfrio_table, io32->pfrio_buffer,
2641 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2642 break;
2643
2644 case DIOCRCLRASTATS:
2645 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2646 error = ENODEV;
2647 break;
2648 }
2649 pfr_table_copyin_cleanup(&io32->pfrio_table);
2650 error = pfr_clr_astats(&io32->pfrio_table, io32->pfrio_buffer,
2651 io32->pfrio_size, &io32->pfrio_nzero, io32->pfrio_flags |
2652 PFR_FLAG_USERIOCTL);
2653 break;
2654
2655 case DIOCRTSTADDRS:
2656 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2657 error = ENODEV;
2658 break;
2659 }
2660 pfr_table_copyin_cleanup(&io32->pfrio_table);
2661 error = pfr_tst_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2662 io32->pfrio_size, &io32->pfrio_nmatch, io32->pfrio_flags |
2663 PFR_FLAG_USERIOCTL);
2664 break;
2665
2666 case DIOCRINADEFINE:
2667 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2668 error = ENODEV;
2669 break;
2670 }
2671 pfr_table_copyin_cleanup(&io32->pfrio_table);
2672 error = pfr_ina_define(&io32->pfrio_table, io32->pfrio_buffer,
2673 io32->pfrio_size, &io32->pfrio_nadd, &io32->pfrio_naddr,
2674 io32->pfrio_ticket, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2675 break;
2676
2677 default:
2678 VERIFY(0);
2679 /* NOTREACHED */
2680 }
2681
2682 done:
2683 return (error);
2684 }
2685
2686 static int
2687 pfioctl_ioc_tokens(u_long cmd, struct pfioc_tokens_32 *tok32,
2688 struct pfioc_tokens_64 *tok64, struct proc *p)
2689 {
2690 struct pfioc_token *tokens;
2691 struct pfioc_kernel_token *entry, *tmp;
2692 user_addr_t token_buf;
2693 int ocnt, cnt, error = 0, p64 = proc_is64bit(p);
2694 char *ptr;
2695
2696 switch (cmd) {
2697 case DIOCGETSTARTERS: {
2698 int size;
2699
2700 if (nr_tokens == 0) {
2701 error = ENOENT;
2702 break;
2703 }
2704
2705 size = sizeof (struct pfioc_token) * nr_tokens;
2706 ocnt = cnt = (p64 ? tok64->size : tok32->size);
2707 if (cnt == 0) {
2708 if (p64)
2709 tok64->size = size;
2710 else
2711 tok32->size = size;
2712 break;
2713 }
2714
2715 token_buf = (p64 ? tok64->pgt_buf : tok32->pgt_buf);
2716 tokens = _MALLOC(size, M_TEMP, M_WAITOK|M_ZERO);
2717 if (tokens == NULL) {
2718 error = ENOMEM;
2719 break;
2720 }
2721
2722 ptr = (void *)tokens;
2723 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
2724 struct pfioc_token *t;
2725
2726 if ((unsigned)cnt < sizeof (*tokens))
2727 break; /* no more buffer space left */
2728
2729 t = (struct pfioc_token *)(void *)ptr;
2730 t->token_value = entry->token.token_value;
2731 t->timestamp = entry->token.timestamp;
2732 t->pid = entry->token.pid;
2733 bcopy(entry->token.proc_name, t->proc_name,
2734 PFTOK_PROCNAME_LEN);
2735 ptr += sizeof (struct pfioc_token);
2736
2737 cnt -= sizeof (struct pfioc_token);
2738 }
2739
2740 if (cnt < ocnt)
2741 error = copyout(tokens, token_buf, ocnt - cnt);
2742
2743 if (p64)
2744 tok64->size = ocnt - cnt;
2745 else
2746 tok32->size = ocnt - cnt;
2747
2748 _FREE(tokens, M_TEMP);
2749 break;
2750 }
2751
2752 default:
2753 VERIFY(0);
2754 /* NOTREACHED */
2755 }
2756
2757 return (error);
2758 }
2759
2760 static void
2761 pf_expire_states_and_src_nodes(struct pf_rule *rule)
2762 {
2763 struct pf_state *state;
2764 struct pf_src_node *sn;
2765 int killed = 0;
2766
2767 /* expire the states */
2768 state = TAILQ_FIRST(&state_list);
2769 while (state) {
2770 if (state->rule.ptr == rule)
2771 state->timeout = PFTM_PURGE;
2772 state = TAILQ_NEXT(state, entry_list);
2773 }
2774 pf_purge_expired_states(pf_status.states);
2775
2776 /* expire the src_nodes */
2777 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2778 if (sn->rule.ptr != rule)
2779 continue;
2780 if (sn->states != 0) {
2781 RB_FOREACH(state, pf_state_tree_id,
2782 &tree_id) {
2783 if (state->src_node == sn)
2784 state->src_node = NULL;
2785 if (state->nat_src_node == sn)
2786 state->nat_src_node = NULL;
2787 }
2788 sn->states = 0;
2789 }
2790 sn->expire = 1;
2791 killed++;
2792 }
2793 if (killed)
2794 pf_purge_expired_src_nodes();
2795 }
2796
2797 static void
2798 pf_delete_rule_from_ruleset(struct pf_ruleset *ruleset, int rs_num,
2799 struct pf_rule *rule)
2800 {
2801 struct pf_rule *r;
2802 int nr = 0;
2803
2804 pf_expire_states_and_src_nodes(rule);
2805
2806 pf_rm_rule(ruleset->rules[rs_num].active.ptr, rule);
2807 if (ruleset->rules[rs_num].active.rcount-- == 0)
2808 panic("%s: rcount value broken!", __func__);
2809 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2810
2811 while (r) {
2812 r->nr = nr++;
2813 r = TAILQ_NEXT(r, entries);
2814 }
2815 }
2816
2817
2818 static void
2819 pf_ruleset_cleanup(struct pf_ruleset *ruleset, int rs)
2820 {
2821 pf_calc_skip_steps(ruleset->rules[rs].active.ptr);
2822 ruleset->rules[rs].active.ticket =
2823 ++ruleset->rules[rs].inactive.ticket;
2824 }
2825
2826 /*
2827 * req_dev encodes the PF interface. Currently, possible values are
2828 * 0 or PFRULE_PFM
2829 */
2830 static int
2831 pf_delete_rule_by_ticket(struct pfioc_rule *pr, u_int32_t req_dev)
2832 {
2833 struct pf_ruleset *ruleset;
2834 struct pf_rule *rule = NULL;
2835 int is_anchor;
2836 int error;
2837 int i;
2838
2839 is_anchor = (pr->anchor_call[0] != '\0');
2840 if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
2841 pr->rule.owner, is_anchor, &error)) == NULL)
2842 return (error);
2843
2844 for (i = 0; i < PF_RULESET_MAX && rule == NULL; i++) {
2845 rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2846 while (rule && (rule->ticket != pr->rule.ticket))
2847 rule = TAILQ_NEXT(rule, entries);
2848 }
2849 if (rule == NULL)
2850 return (ENOENT);
2851 else
2852 i--;
2853
2854 if (strcmp(rule->owner, pr->rule.owner))
2855 return (EACCES);
2856
2857 delete_rule:
2858 if (rule->anchor && (ruleset != &pf_main_ruleset) &&
2859 ((strcmp(ruleset->anchor->owner, "")) == 0) &&
2860 ((ruleset->rules[i].active.rcount - 1) == 0)) {
2861 /* set rule & ruleset to parent and repeat */
2862 struct pf_rule *delete_rule = rule;
2863 struct pf_ruleset *delete_ruleset = ruleset;
2864
2865 #define parent_ruleset ruleset->anchor->parent->ruleset
2866 if (ruleset->anchor->parent == NULL)
2867 ruleset = &pf_main_ruleset;
2868 else
2869 ruleset = &parent_ruleset;
2870
2871 rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2872 while (rule &&
2873 (rule->anchor != delete_ruleset->anchor))
2874 rule = TAILQ_NEXT(rule, entries);
2875 if (rule == NULL)
2876 panic("%s: rule not found!", __func__);
2877
2878 /*
2879 * if reqest device != rule's device, bail :
2880 * with error if ticket matches;
2881 * without error if ticket doesn't match (i.e. its just cleanup)
2882 */
2883 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2884 if (rule->ticket != pr->rule.ticket) {
2885 return (0);
2886 } else {
2887 return EACCES;
2888 }
2889 }
2890
2891 if (delete_rule->rule_flag & PFRULE_PFM) {
2892 pffwrules--;
2893 }
2894
2895 pf_delete_rule_from_ruleset(delete_ruleset,
2896 i, delete_rule);
2897 delete_ruleset->rules[i].active.ticket =
2898 ++delete_ruleset->rules[i].inactive.ticket;
2899 goto delete_rule;
2900 } else {
2901 /*
2902 * process deleting rule only if device that added the
2903 * rule matches device that issued the request
2904 */
2905 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev)
2906 return EACCES;
2907 if (rule->rule_flag & PFRULE_PFM)
2908 pffwrules--;
2909 pf_delete_rule_from_ruleset(ruleset, i,
2910 rule);
2911 pf_ruleset_cleanup(ruleset, i);
2912 }
2913
2914 return (0);
2915 }
2916
2917 /*
2918 * req_dev encodes the PF interface. Currently, possible values are
2919 * 0 or PFRULE_PFM
2920 */
2921 static void
2922 pf_delete_rule_by_owner(char *owner, u_int32_t req_dev)
2923 {
2924 struct pf_ruleset *ruleset;
2925 struct pf_rule *rule, *next;
2926 int deleted = 0;
2927
2928 for (int rs = 0; rs < PF_RULESET_MAX; rs++) {
2929 rule = TAILQ_FIRST(pf_main_ruleset.rules[rs].active.ptr);
2930 ruleset = &pf_main_ruleset;
2931 while (rule) {
2932 next = TAILQ_NEXT(rule, entries);
2933 /*
2934 * process deleting rule only if device that added the
2935 * rule matches device that issued the request
2936 */
2937 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2938 rule = next;
2939 continue;
2940 }
2941 if (rule->anchor) {
2942 if (((strcmp(rule->owner, owner)) == 0) ||
2943 ((strcmp(rule->owner, "")) == 0)) {
2944 if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2945 if (deleted) {
2946 pf_ruleset_cleanup(ruleset, rs);
2947 deleted = 0;
2948 }
2949 /* step into anchor */
2950 ruleset =
2951 &rule->anchor->ruleset;
2952 rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2953 continue;
2954 } else {
2955 if (rule->rule_flag &
2956 PFRULE_PFM)
2957 pffwrules--;
2958 pf_delete_rule_from_ruleset(ruleset, rs, rule);
2959 deleted = 1;
2960 rule = next;
2961 }
2962 } else
2963 rule = next;
2964 } else {
2965 if (((strcmp(rule->owner, owner)) == 0)) {
2966 /* delete rule */
2967 if (rule->rule_flag & PFRULE_PFM)
2968 pffwrules--;
2969 pf_delete_rule_from_ruleset(ruleset,
2970 rs, rule);
2971 deleted = 1;
2972 }
2973 rule = next;
2974 }
2975 if (rule == NULL) {
2976 if (deleted) {
2977 pf_ruleset_cleanup(ruleset, rs);
2978 deleted = 0;
2979 }
2980 if (ruleset != &pf_main_ruleset)
2981 pf_deleterule_anchor_step_out(&ruleset,
2982 rs, &rule);
2983 }
2984 }
2985 }
2986 }
2987
2988 static void
2989 pf_deleterule_anchor_step_out(struct pf_ruleset **ruleset_ptr,
2990 int rs, struct pf_rule **rule_ptr)
2991 {
2992 struct pf_ruleset *ruleset = *ruleset_ptr;
2993 struct pf_rule *rule = *rule_ptr;
2994
2995 /* step out of anchor */
2996 struct pf_ruleset *rs_copy = ruleset;
2997 ruleset = ruleset->anchor->parent?
2998 &ruleset->anchor->parent->ruleset:&pf_main_ruleset;
2999
3000 rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
3001 while (rule && (rule->anchor != rs_copy->anchor))
3002 rule = TAILQ_NEXT(rule, entries);
3003 if (rule == NULL)
3004 panic("%s: parent rule of anchor not found!", __func__);
3005 if (rule->anchor->ruleset.rules[rs].active.rcount > 0)
3006 rule = TAILQ_NEXT(rule, entries);
3007
3008 *ruleset_ptr = ruleset;
3009 *rule_ptr = rule;
3010 }
3011
3012 static void
3013 pf_addrwrap_setup(struct pf_addr_wrap *aw)
3014 {
3015 VERIFY(aw);
3016 bzero(&aw->p, sizeof aw->p);
3017 }
3018
3019 static int
3020 pf_rule_setup(struct pfioc_rule *pr, struct pf_rule *rule,
3021 struct pf_ruleset *ruleset) {
3022 struct pf_pooladdr *apa;
3023 int error = 0;
3024
3025 if (rule->ifname[0]) {
3026 rule->kif = pfi_kif_get(rule->ifname);
3027 if (rule->kif == NULL) {
3028 pool_put(&pf_rule_pl, rule);
3029 return (EINVAL);
3030 }
3031 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
3032 }
3033 #if PF_ALTQ
3034 /* set queue IDs */
3035 if (altq_allowed && rule->qname[0] != '\0') {
3036 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
3037 error = EBUSY;
3038 else if (rule->pqname[0] != '\0') {
3039 if ((rule->pqid =
3040 pf_qname2qid(rule->pqname)) == 0)
3041 error = EBUSY;
3042 } else
3043 rule->pqid = rule->qid;
3044 }
3045 #endif /* PF_ALTQ */
3046 if (rule->tagname[0])
3047 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
3048 error = EBUSY;
3049 if (rule->match_tagname[0])
3050 if ((rule->match_tag =
3051 pf_tagname2tag(rule->match_tagname)) == 0)
3052 error = EBUSY;
3053 if (rule->rt && !rule->direction)
3054 error = EINVAL;
3055 #if PFLOG
3056 if (!rule->log)
3057 rule->logif = 0;
3058 if (rule->logif >= PFLOGIFS_MAX)
3059 error = EINVAL;
3060 #endif /* PFLOG */
3061 pf_addrwrap_setup(&rule->src.addr);
3062 pf_addrwrap_setup(&rule->dst.addr);
3063 if (pf_rtlabel_add(&rule->src.addr) ||
3064 pf_rtlabel_add(&rule->dst.addr))
3065 error = EBUSY;
3066 if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
3067 error = EINVAL;
3068 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
3069 error = EINVAL;
3070 if (pf_tbladdr_setup(ruleset, &rule->src.addr))
3071 error = EINVAL;
3072 if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
3073 error = EINVAL;
3074 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
3075 error = EINVAL;
3076 TAILQ_FOREACH(apa, &pf_pabuf, entries)
3077 if (pf_tbladdr_setup(ruleset, &apa->addr))
3078 error = EINVAL;
3079
3080 if (rule->overload_tblname[0]) {
3081 if ((rule->overload_tbl = pfr_attach_table(ruleset,
3082 rule->overload_tblname)) == NULL)
3083 error = EINVAL;
3084 else
3085 rule->overload_tbl->pfrkt_flags |=
3086 PFR_TFLAG_ACTIVE;
3087 }
3088
3089 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
3090 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
3091 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
3092 (rule->rt > PF_FASTROUTE)) &&
3093 (TAILQ_FIRST(&rule->rpool.list) == NULL))
3094 error = EINVAL;
3095
3096 if (error) {
3097 pf_rm_rule(NULL, rule);
3098 return (error);
3099 }
3100 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
3101 rule->evaluations = rule->packets[0] = rule->packets[1] =
3102 rule->bytes[0] = rule->bytes[1] = 0;
3103
3104 return (0);
3105 }
3106
3107 static int
3108 pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p)
3109 {
3110 int error = 0;
3111 u_int32_t req_dev = 0;
3112
3113 switch (cmd) {
3114 case DIOCADDRULE: {
3115 struct pf_ruleset *ruleset;
3116 struct pf_rule *rule, *tail;
3117 int rs_num;
3118
3119 pr->anchor[sizeof (pr->anchor) - 1] = '\0';
3120 pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
3121 ruleset = pf_find_ruleset(pr->anchor);
3122 if (ruleset == NULL) {
3123 error = EINVAL;
3124 break;
3125 }
3126 rs_num = pf_get_ruleset_number(pr->rule.action);
3127 if (rs_num >= PF_RULESET_MAX) {
3128 error = EINVAL;
3129 break;
3130 }
3131 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3132 error = EINVAL;
3133 break;
3134 }
3135 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
3136 error = EBUSY;
3137 break;
3138 }
3139 if (pr->pool_ticket != ticket_pabuf) {
3140 error = EBUSY;
3141 break;
3142 }
3143 rule = pool_get(&pf_rule_pl, PR_WAITOK);
3144 if (rule == NULL) {
3145 error = ENOMEM;
3146 break;
3147 }
3148 pf_rule_copyin(&pr->rule, rule, p, minordev);
3149 #if !INET
3150 if (rule->af == AF_INET) {
3151 pool_put(&pf_rule_pl, rule);
3152 error = EAFNOSUPPORT;
3153 break;
3154 }
3155 #endif /* INET */
3156 #if !INET6
3157 if (rule->af == AF_INET6) {
3158 pool_put(&pf_rule_pl, rule);
3159 error = EAFNOSUPPORT;
3160 break;
3161 }
3162 #endif /* INET6 */
3163 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
3164 pf_rulequeue);
3165 if (tail)
3166 rule->nr = tail->nr + 1;
3167 else
3168 rule->nr = 0;
3169
3170 if ((error = pf_rule_setup(pr, rule, ruleset)))
3171 break;
3172
3173 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
3174 rule, entries);
3175 ruleset->rules[rs_num].inactive.rcount++;
3176 if (rule->rule_flag & PFRULE_PFM)
3177 pffwrules++;
3178 break;
3179 }
3180
3181 case DIOCGETRULES: {
3182 struct pf_ruleset *ruleset;
3183 struct pf_rule *tail;
3184 int rs_num;
3185
3186 pr->anchor[sizeof (pr->anchor) - 1] = '\0';
3187 pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
3188 ruleset = pf_find_ruleset(pr->anchor);
3189 if (ruleset == NULL) {
3190 error = EINVAL;
3191 break;
3192 }
3193 rs_num = pf_get_ruleset_number(pr->rule.action);
3194 if (rs_num >= PF_RULESET_MAX) {
3195 error = EINVAL;
3196 break;
3197 }
3198 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3199 pf_rulequeue);
3200 if (tail)
3201 pr->nr = tail->nr + 1;
3202 else
3203 pr->nr = 0;
3204 pr->ticket = ruleset->rules[rs_num].active.ticket;
3205 break;
3206 }
3207
3208 case DIOCGETRULE: {
3209 struct pf_ruleset *ruleset;
3210 struct pf_rule *rule;
3211 int rs_num, i;
3212
3213 pr->anchor[sizeof (pr->anchor) - 1] = '\0';
3214 pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
3215 ruleset = pf_find_ruleset(pr->anchor);
3216 if (ruleset == NULL) {
3217 error = EINVAL;
3218 break;
3219 }
3220 rs_num = pf_get_ruleset_number(pr->rule.action);
3221 if (rs_num >= PF_RULESET_MAX) {
3222 error = EINVAL;
3223 break;
3224 }
3225 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
3226 error = EBUSY;
3227 break;
3228 }
3229 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3230 while ((rule != NULL) && (rule->nr != pr->nr))
3231 rule = TAILQ_NEXT(rule, entries);
3232 if (rule == NULL) {
3233 error = EBUSY;
3234 break;
3235 }
3236 pf_rule_copyout(rule, &pr->rule);
3237 if (pf_anchor_copyout(ruleset, rule, pr)) {
3238 error = EBUSY;
3239 break;
3240 }
3241 pfi_dynaddr_copyout(&pr->rule.src.addr);
3242 pfi_dynaddr_copyout(&pr->rule.dst.addr);
3243 pf_tbladdr_copyout(&pr->rule.src.addr);
3244 pf_tbladdr_copyout(&pr->rule.dst.addr);
3245 pf_rtlabel_copyout(&pr->rule.src.addr);
3246 pf_rtlabel_copyout(&pr->rule.dst.addr);
3247 for (i = 0; i < PF_SKIP_COUNT; ++i)
3248 if (rule->skip[i].ptr == NULL)
3249 pr->rule.skip[i].nr = -1;
3250 else
3251 pr->rule.skip[i].nr =
3252 rule->skip[i].ptr->nr;
3253
3254 if (pr->action == PF_GET_CLR_CNTR) {
3255 rule->evaluations = 0;
3256 rule->packets[0] = rule->packets[1] = 0;
3257 rule->bytes[0] = rule->bytes[1] = 0;
3258 }
3259 break;
3260 }
3261
3262 case DIOCCHANGERULE: {
3263 struct pfioc_rule *pcr = pr;
3264 struct pf_ruleset *ruleset;
3265 struct pf_rule *oldrule = NULL, *newrule = NULL;
3266 struct pf_pooladdr *pa;
3267 u_int32_t nr = 0;
3268 int rs_num;
3269
3270 if (!(pcr->action == PF_CHANGE_REMOVE ||
3271 pcr->action == PF_CHANGE_GET_TICKET) &&
3272 pcr->pool_ticket != ticket_pabuf) {
3273 error = EBUSY;
3274 break;
3275 }
3276
3277 if (pcr->action < PF_CHANGE_ADD_HEAD ||
3278 pcr->action > PF_CHANGE_GET_TICKET) {
3279 error = EINVAL;
3280 break;
3281 }
3282 pcr->anchor[sizeof (pcr->anchor) - 1] = '\0';
3283 pcr->anchor_call[sizeof (pcr->anchor_call) - 1] = '\0';
3284 ruleset = pf_find_ruleset(pcr->anchor);
3285 if (ruleset == NULL) {
3286 error = EINVAL;
3287 break;
3288 }
3289 rs_num = pf_get_ruleset_number(pcr->rule.action);
3290 if (rs_num >= PF_RULESET_MAX) {
3291 error = EINVAL;
3292 break;
3293 }
3294
3295 if (pcr->action == PF_CHANGE_GET_TICKET) {
3296 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3297 break;
3298 } else {
3299 if (pcr->ticket !=
3300 ruleset->rules[rs_num].active.ticket) {
3301 error = EINVAL;
3302 break;
3303 }
3304 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3305 error = EINVAL;
3306 break;
3307 }
3308 }
3309
3310 if (pcr->action != PF_CHANGE_REMOVE) {
3311 newrule = pool_get(&pf_rule_pl, PR_WAITOK);
3312 if (newrule == NULL) {
3313 error = ENOMEM;
3314 break;
3315 }
3316 pf_rule_copyin(&pcr->rule, newrule, p, minordev);
3317 #if !INET
3318 if (newrule->af == AF_INET) {
3319 pool_put(&pf_rule_pl, newrule);
3320 error = EAFNOSUPPORT;
3321 break;
3322 }
3323 #endif /* INET */
3324 #if !INET6
3325 if (newrule->af == AF_INET6) {
3326 pool_put(&pf_rule_pl, newrule);
3327 error = EAFNOSUPPORT;
3328 break;
3329 }
3330 #endif /* INET6 */
3331 if (newrule->ifname[0]) {
3332 newrule->kif = pfi_kif_get(newrule->ifname);
3333 if (newrule->kif == NULL) {
3334 pool_put(&pf_rule_pl, newrule);
3335 error = EINVAL;
3336 break;
3337 }
3338 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
3339 } else
3340 newrule->kif = NULL;
3341
3342 #if PF_ALTQ
3343 /* set queue IDs */
3344 if (altq_allowed && newrule->qname[0] != '\0') {
3345 if ((newrule->qid =
3346 pf_qname2qid(newrule->qname)) == 0)
3347 error = EBUSY;
3348 else if (newrule->pqname[0] != '\0') {
3349 if ((newrule->pqid =
3350 pf_qname2qid(newrule->pqname)) == 0)
3351 error = EBUSY;
3352 } else
3353 newrule->pqid = newrule->qid;
3354 }
3355 #endif /* PF_ALTQ */
3356 if (newrule->tagname[0])
3357 if ((newrule->tag =
3358 pf_tagname2tag(newrule->tagname)) == 0)
3359 error = EBUSY;
3360 if (newrule->match_tagname[0])
3361 if ((newrule->match_tag = pf_tagname2tag(
3362 newrule->match_tagname)) == 0)
3363 error = EBUSY;
3364 if (newrule->rt && !newrule->direction)
3365 error = EINVAL;
3366 #if PFLOG
3367 if (!newrule->log)
3368 newrule->logif = 0;
3369 if (newrule->logif >= PFLOGIFS_MAX)
3370 error = EINVAL;
3371 #endif /* PFLOG */
3372 pf_addrwrap_setup(&newrule->src.addr);
3373 pf_addrwrap_setup(&newrule->dst.addr);
3374 if (pf_rtlabel_add(&newrule->src.addr) ||
3375 pf_rtlabel_add(&newrule->dst.addr))
3376 error = EBUSY;
3377 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
3378 error = EINVAL;
3379 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
3380 error = EINVAL;
3381 if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
3382 error = EINVAL;
3383 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
3384 error = EINVAL;
3385 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
3386 error = EINVAL;
3387 TAILQ_FOREACH(pa, &pf_pabuf, entries)
3388 if (pf_tbladdr_setup(ruleset, &pa->addr))
3389 error = EINVAL;
3390
3391 if (newrule->overload_tblname[0]) {
3392 if ((newrule->overload_tbl = pfr_attach_table(
3393 ruleset, newrule->overload_tblname)) ==
3394 NULL)
3395 error = EINVAL;
3396 else
3397 newrule->overload_tbl->pfrkt_flags |=
3398 PFR_TFLAG_ACTIVE;
3399 }
3400
3401 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
3402 if (((((newrule->action == PF_NAT) ||
3403 (newrule->action == PF_RDR) ||
3404 (newrule->action == PF_BINAT) ||
3405 (newrule->rt > PF_FASTROUTE)) &&
3406 !newrule->anchor)) &&
3407 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
3408 error = EINVAL;
3409
3410 if (error) {
3411 pf_rm_rule(NULL, newrule);
3412 break;
3413 }
3414 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3415 newrule->evaluations = 0;
3416 newrule->packets[0] = newrule->packets[1] = 0;
3417 newrule->bytes[0] = newrule->bytes[1] = 0;
3418 }
3419 pf_empty_pool(&pf_pabuf);
3420
3421 if (pcr->action == PF_CHANGE_ADD_HEAD)
3422 oldrule = TAILQ_FIRST(
3423 ruleset->rules[rs_num].active.ptr);
3424 else if (pcr->action == PF_CHANGE_ADD_TAIL)
3425 oldrule = TAILQ_LAST(
3426 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
3427 else {
3428 oldrule = TAILQ_FIRST(
3429 ruleset->rules[rs_num].active.ptr);
3430 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
3431 oldrule = TAILQ_NEXT(oldrule, entries);
3432 if (oldrule == NULL) {
3433 if (newrule != NULL)
3434 pf_rm_rule(NULL, newrule);
3435 error = EINVAL;
3436 break;
3437 }
3438 }
3439
3440 if (pcr->action == PF_CHANGE_REMOVE) {
3441 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
3442 ruleset->rules[rs_num].active.rcount--;
3443 } else {
3444 if (oldrule == NULL)
3445 TAILQ_INSERT_TAIL(
3446 ruleset->rules[rs_num].active.ptr,
3447 newrule, entries);
3448 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3449 pcr->action == PF_CHANGE_ADD_BEFORE)
3450 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3451 else
3452 TAILQ_INSERT_AFTER(
3453 ruleset->rules[rs_num].active.ptr,
3454 oldrule, newrule, entries);
3455 ruleset->rules[rs_num].active.rcount++;
3456 }
3457
3458 nr = 0;
3459 TAILQ_FOREACH(oldrule,
3460 ruleset->rules[rs_num].active.ptr, entries)
3461 oldrule->nr = nr++;
3462
3463 ruleset->rules[rs_num].active.ticket++;
3464
3465 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3466 pf_remove_if_empty_ruleset(ruleset);
3467
3468 break;
3469 }
3470
3471 case DIOCINSERTRULE: {
3472 struct pf_ruleset *ruleset;
3473 struct pf_rule *rule, *tail, *r;
3474 int rs_num;
3475 int is_anchor;
3476
3477 pr->anchor[sizeof (pr->anchor) - 1] = '\0';
3478 pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
3479 is_anchor = (pr->anchor_call[0] != '\0');
3480
3481 if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
3482 pr->rule.owner, is_anchor, &error)) == NULL)
3483 break;
3484
3485 rs_num = pf_get_ruleset_number(pr->rule.action);
3486 if (rs_num >= PF_RULESET_MAX) {
3487 error = EINVAL;
3488 break;
3489 }
3490 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3491 error = EINVAL;
3492 break;
3493 }
3494
3495 /* make sure this anchor rule doesn't exist already */
3496 if (is_anchor) {
3497 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3498 while (r) {
3499 if (r->anchor &&
3500 ((strcmp(r->anchor->name,
3501 pr->anchor_call)) == 0)) {
3502 if (((strcmp(pr->rule.owner,
3503 r->owner)) == 0) ||
3504 ((strcmp(r->owner, "")) == 0))
3505 error = EEXIST;
3506 else
3507 error = EPERM;
3508 break;
3509 }
3510 r = TAILQ_NEXT(r, entries);
3511 }
3512 if (error != 0)
3513 return (error);
3514 }
3515
3516 rule = pool_get(&pf_rule_pl, PR_WAITOK);
3517 if (rule == NULL) {
3518 error = ENOMEM;
3519 break;
3520 }
3521 pf_rule_copyin(&pr->rule, rule, p, minordev);
3522 #if !INET
3523 if (rule->af == AF_INET) {
3524 pool_put(&pf_rule_pl, rule);
3525 error = EAFNOSUPPORT;
3526 break;
3527 }
3528 #endif /* INET */
3529 #if !INET6
3530 if (rule->af == AF_INET6) {
3531 pool_put(&pf_rule_pl, rule);
3532 error = EAFNOSUPPORT;
3533 break;
3534 }
3535
3536 #endif /* INET6 */
3537 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3538 while ((r != NULL) && (rule->priority >= (unsigned)r->priority))
3539 r = TAILQ_NEXT(r, entries);
3540 if (r == NULL) {
3541 if ((tail =
3542 TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3543 pf_rulequeue)) != NULL)
3544 rule->nr = tail->nr + 1;
3545 else
3546 rule->nr = 0;
3547 } else {
3548 rule->nr = r->nr;
3549 }
3550
3551 if ((error = pf_rule_setup(pr, rule, ruleset)))
3552 break;
3553
3554 if (rule->anchor != NULL)
3555 strlcpy(rule->anchor->owner, rule->owner,
3556 PF_OWNER_NAME_SIZE);
3557
3558 if (r) {
3559 TAILQ_INSERT_BEFORE(r, rule, entries);
3560 while (r && ++r->nr)
3561 r = TAILQ_NEXT(r, entries);
3562 } else
3563 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].active.ptr,
3564 rule, entries);
3565 ruleset->rules[rs_num].active.rcount++;
3566
3567 /* Calculate checksum for the main ruleset */
3568 if (ruleset == &pf_main_ruleset)
3569 error = pf_setup_pfsync_matching(ruleset);
3570
3571 pf_ruleset_cleanup(ruleset, rs_num);
3572 rule->ticket = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)rule);
3573
3574 pr->rule.ticket = rule->ticket;
3575 pf_rule_copyout(rule, &pr->rule);
3576 if (rule->rule_flag & PFRULE_PFM)
3577 pffwrules++;
3578 break;
3579 }
3580
3581 case DIOCDELETERULE: {
3582 pr->anchor[sizeof (pr->anchor) - 1] = '\0';
3583 pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
3584
3585 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3586 error = EINVAL;
3587 break;
3588 }
3589
3590 /* get device through which request is made */
3591 if ((uint8_t)minordev == PFDEV_PFM)
3592 req_dev |= PFRULE_PFM;
3593
3594 if (pr->rule.ticket) {
3595 if ((error = pf_delete_rule_by_ticket(pr, req_dev)))
3596 break;
3597 } else
3598 pf_delete_rule_by_owner(pr->rule.owner, req_dev);
3599 pr->nr = pffwrules;
3600 break;
3601 }
3602
3603 default:
3604 VERIFY(0);
3605 /* NOTREACHED */
3606 }
3607
3608 return (error);
3609 }
3610
3611 static int
3612 pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p)
3613 {
3614 #pragma unused(p)
3615 int error = 0;
3616
3617 psk->psk_ifname[sizeof (psk->psk_ifname) - 1] = '\0';
3618 psk->psk_ownername[sizeof(psk->psk_ownername) - 1] = '\0';
3619
3620 bool ifname_matched = true;
3621 bool owner_matched = true;
3622
3623 switch (cmd) {
3624 case DIOCCLRSTATES: {
3625 struct pf_state *s, *nexts;
3626 int killed = 0;
3627
3628 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
3629 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3630 /*
3631 * Purge all states only when neither ifname
3632 * or owner is provided. If any of these are provided
3633 * we purge only the states with meta data that match
3634 */
3635 bool unlink_state = false;
3636 ifname_matched = true;
3637 owner_matched = true;
3638
3639 if (psk->psk_ifname[0] &&
3640 strcmp(psk->psk_ifname, s->kif->pfik_name)) {
3641 ifname_matched = false;
3642 }
3643
3644 if (psk->psk_ownername[0] &&
3645 ((NULL == s->rule.ptr) ||
3646 strcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3647 owner_matched = false;
3648 }
3649
3650 unlink_state = ifname_matched && owner_matched;
3651
3652 if (unlink_state) {
3653 #if NPFSYNC
3654 /* don't send out individual delete messages */
3655 s->sync_flags = PFSTATE_NOSYNC;
3656 #endif
3657 pf_unlink_state(s);
3658 killed++;
3659 }
3660 }
3661 psk->psk_af = killed;
3662 #if NPFSYNC
3663 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
3664 #endif
3665 break;
3666 }
3667
3668 case DIOCKILLSTATES: {
3669 struct pf_state *s, *nexts;
3670 struct pf_state_key *sk;
3671 struct pf_state_host *src, *dst;
3672 int killed = 0;
3673
3674 for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
3675 s = nexts) {
3676 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3677 sk = s->state_key;
3678 ifname_matched = true;
3679 owner_matched = true;
3680
3681 if (psk->psk_ifname[0] &&
3682 strcmp(psk->psk_ifname, s->kif->pfik_name)) {
3683 ifname_matched = false;
3684 }
3685
3686 if (psk->psk_ownername[0] &&
3687 ((NULL == s->rule.ptr) ||
3688 strcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3689 owner_matched = false;
3690 }
3691
3692 if (sk->direction == PF_OUT) {
3693 src = &sk->lan;
3694 dst = &sk->ext;
3695 } else {
3696 src = &sk->ext;
3697 dst = &sk->lan;
3698 }
3699 if ((!psk->psk_af || sk->af == psk->psk_af) &&
3700 (!psk->psk_proto || psk->psk_proto == sk->proto) &&
3701 PF_MATCHA(psk->psk_src.neg,
3702 &psk->psk_src.addr.v.a.addr,
3703 &psk->psk_src.addr.v.a.mask,
3704 &src->addr, sk->af) &&
3705 PF_MATCHA(psk->psk_dst.neg,
3706 &psk->psk_dst.addr.v.a.addr,
3707 &psk->psk_dst.addr.v.a.mask,
3708 &dst->addr, sk->af) &&
3709 (pf_match_xport(psk->psk_proto,
3710 psk->psk_proto_variant, &psk->psk_src.xport,
3711 &src->xport)) &&
3712 (pf_match_xport(psk->psk_proto,
3713 psk->psk_proto_variant, &psk->psk_dst.xport,
3714 &dst->xport)) &&
3715 ifname_matched &&
3716 owner_matched) {
3717 #if NPFSYNC
3718 /* send immediate delete of state */
3719 pfsync_delete_state(s);
3720 s->sync_flags |= PFSTATE_NOSYNC;
3721 #endif
3722 pf_unlink_state(s);
3723 killed++;
3724 }
3725 }
3726 psk->psk_af = killed;
3727 break;
3728 }
3729
3730 default:
3731 VERIFY(0);
3732 /* NOTREACHED */
3733 }
3734
3735 return (error);
3736 }
3737
3738 static int
3739 pfioctl_ioc_state(u_long cmd, struct pfioc_state *ps, struct proc *p)
3740 {
3741 #pragma unused(p)
3742 int error = 0;
3743
3744 switch (cmd) {
3745 case DIOCADDSTATE: {
3746 struct pfsync_state *sp = &ps->state;
3747 struct pf_state *s;
3748 struct pf_state_key *sk;
3749 struct pfi_kif *kif;
3750
3751 if (sp->timeout >= PFTM_MAX) {
3752 error = EINVAL;
3753 break;
3754 }
3755 s = pool_get(&pf_state_pl, PR_WAITOK);
3756 if (s == NULL) {
3757 error = ENOMEM;
3758 break;
3759 }
3760 bzero(s, sizeof (struct pf_state));
3761 if ((sk = pf_alloc_state_key(s, NULL)) == NULL) {
3762 pool_put(&pf_state_pl, s);
3763 error = ENOMEM;
3764 break;
3765 }
3766 pf_state_import(sp, sk, s);
3767 kif = pfi_kif_get(sp->ifname);
3768 if (kif == NULL) {
3769 pool_put(&pf_state_pl, s);
3770 pool_put(&pf_state_key_pl, sk);
3771 error = ENOENT;
3772 break;
3773 }
3774 TAILQ_INIT(&s->unlink_hooks);
3775 s->state_key->app_state = 0;
3776 if (pf_insert_state(kif, s)) {
3777 pfi_kif_unref(kif, PFI_KIF_REF_NONE);
3778 pool_put(&pf_state_pl, s);
3779 error = EEXIST;
3780 break;
3781 }
3782 pf_default_rule.states++;
3783 VERIFY(pf_default_rule.states != 0);
3784 break;
3785 }
3786
3787 case DIOCGETSTATE: {
3788 struct pf_state *s;
3789 struct pf_state_cmp id_key;
3790
3791 bcopy(ps->state.id, &id_key.id, sizeof (id_key.id));
3792 id_key.creatorid = ps->state.creatorid;
3793
3794 s = pf_find_state_byid(&id_key);
3795 if (s == NULL) {
3796 error = ENOENT;
3797 break;
3798 }
3799
3800 pf_state_export(&ps->state, s->state_key, s);
3801 break;
3802 }
3803
3804 default:
3805 VERIFY(0);
3806 /* NOTREACHED */
3807 }
3808
3809 return (error);
3810 }
3811
3812 static int
3813 pfioctl_ioc_states(u_long cmd, struct pfioc_states_32 *ps32,
3814 struct pfioc_states_64 *ps64, struct proc *p)
3815 {
3816 int p64 = proc_is64bit(p);
3817 int error = 0;
3818
3819 switch (cmd) {
3820 case DIOCGETSTATES: { /* struct pfioc_states */
3821 struct pf_state *state;
3822 struct pfsync_state *pstore;
3823 user_addr_t buf;
3824 u_int32_t nr = 0;
3825 int len, size;
3826
3827 len = (p64 ? ps64->ps_len : ps32->ps_len);
3828 if (len == 0) {
3829 size = sizeof (struct pfsync_state) * pf_status.states;
3830 if (p64)
3831 ps64->ps_len = size;
3832 else
3833 ps32->ps_len = size;
3834 break;
3835 }
3836
3837 pstore = _MALLOC(sizeof (*pstore), M_TEMP, M_WAITOK);
3838 if (pstore == NULL) {
3839 error = ENOMEM;
3840 break;
3841 }
3842 buf = (p64 ? ps64->ps_buf : ps32->ps_buf);
3843
3844 state = TAILQ_FIRST(&state_list);
3845 while (state) {
3846 if (state->timeout != PFTM_UNLINKED) {
3847 if ((nr + 1) * sizeof (*pstore) > (unsigned)len)
3848 break;
3849
3850 pf_state_export(pstore,
3851 state->state_key, state);
3852 error = copyout(pstore, buf, sizeof (*pstore));
3853 if (error) {
3854 _FREE(pstore, M_TEMP);
3855 goto fail;
3856 }
3857 buf += sizeof (*pstore);
3858 nr++;
3859 }
3860 state = TAILQ_NEXT(state, entry_list);
3861 }
3862
3863 size = sizeof (struct pfsync_state) * nr;
3864 if (p64)
3865 ps64->ps_len = size;
3866 else
3867 ps32->ps_len = size;
3868
3869 _FREE(pstore, M_TEMP);
3870 break;
3871 }
3872
3873 default:
3874 VERIFY(0);
3875 /* NOTREACHED */
3876 }
3877 fail:
3878 return (error);
3879 }
3880
3881 static int
3882 pfioctl_ioc_natlook(u_long cmd, struct pfioc_natlook *pnl, struct proc *p)
3883 {
3884 #pragma unused(p)
3885 int error = 0;
3886
3887 switch (cmd) {
3888 case DIOCNATLOOK: {
3889 struct pf_state_key *sk;
3890 struct pf_state *state;
3891 struct pf_state_key_cmp key;
3892 int m = 0, direction = pnl->direction;
3893
3894 key.af = pnl->af;
3895 key.proto = pnl->proto;
3896 key.proto_variant = pnl->proto_variant;
3897
3898 if (!pnl->proto ||
3899 PF_AZERO(&pnl->saddr, pnl->af) ||
3900 PF_AZERO(&pnl->daddr, pnl->af) ||
3901 ((pnl->proto == IPPROTO_TCP ||
3902 pnl->proto == IPPROTO_UDP) &&
3903 (!pnl->dxport.port || !pnl->sxport.port)))
3904 error = EINVAL;
3905 else {
3906 /*
3907 * userland gives us source and dest of connection,
3908 * reverse the lookup so we ask for what happens with
3909 * the return traffic, enabling us to find it in the
3910 * state tree.
3911 */
3912 if (direction == PF_IN) {
3913 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
3914 memcpy(&key.ext.xport, &pnl->dxport,
3915 sizeof (key.ext.xport));
3916 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
3917 memcpy(&key.gwy.xport, &pnl->sxport,
3918 sizeof (key.gwy.xport));
3919 state = pf_find_state_all(&key, PF_IN, &m);
3920 } else {
3921 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
3922 memcpy(&key.lan.xport, &pnl->dxport,
3923 sizeof (key.lan.xport));
3924 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
3925 memcpy(&key.ext.xport, &pnl->sxport,
3926 sizeof (key.ext.xport));
3927 state = pf_find_state_all(&key, PF_OUT, &m);
3928 }
3929 if (m > 1)
3930 error = E2BIG; /* more than one state */
3931 else if (state != NULL) {
3932 sk = state->state_key;
3933 if (direction == PF_IN) {
3934 PF_ACPY(&pnl->rsaddr, &sk->lan.addr,
3935 sk->af);
3936 memcpy(&pnl->rsxport, &sk->lan.xport,
3937 sizeof (pnl->rsxport));
3938 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
3939 pnl->af);
3940 memcpy(&pnl->rdxport, &pnl->dxport,
3941 sizeof (pnl->rdxport));
3942 } else {
3943 PF_ACPY(&pnl->rdaddr, &sk->gwy.addr,
3944 sk->af);
3945 memcpy(&pnl->rdxport, &sk->gwy.xport,
3946 sizeof (pnl->rdxport));
3947 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
3948 pnl->af);
3949 memcpy(&pnl->rsxport, &pnl->sxport,
3950 sizeof (pnl->rsxport));
3951 }
3952 } else
3953 error = ENOENT;
3954 }
3955 break;
3956 }
3957
3958 default:
3959 VERIFY(0);
3960 /* NOTREACHED */
3961 }
3962
3963 return (error);
3964 }
3965
3966 static int
3967 pfioctl_ioc_tm(u_long cmd, struct pfioc_tm *pt, struct proc *p)
3968 {
3969 #pragma unused(p)
3970 int error = 0;
3971
3972 switch (cmd) {
3973 case DIOCSETTIMEOUT: {
3974 int old;
3975
3976 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
3977 pt->seconds < 0) {
3978 error = EINVAL;
3979 goto fail;
3980 }
3981 old = pf_default_rule.timeout[pt->timeout];
3982 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
3983 pt->seconds = 1;
3984 pf_default_rule.timeout[pt->timeout] = pt->seconds;
3985 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
3986 wakeup(pf_purge_thread_fn);
3987 pt->seconds = old;
3988 break;
3989 }
3990
3991 case DIOCGETTIMEOUT: {
3992 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
3993 error = EINVAL;
3994 goto fail;
3995 }
3996 pt->seconds = pf_default_rule.timeout[pt->timeout];
3997 break;
3998 }
3999
4000 default:
4001 VERIFY(0);
4002 /* NOTREACHED */
4003 }
4004 fail:
4005 return (error);
4006 }
4007
4008 static int
4009 pfioctl_ioc_limit(u_long cmd, struct pfioc_limit *pl, struct proc *p)
4010 {
4011 #pragma unused(p)
4012 int error = 0;
4013
4014 switch (cmd) {
4015 case DIOCGETLIMIT: {
4016
4017 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
4018 error = EINVAL;
4019 goto fail;
4020 }
4021 pl->limit = pf_pool_limits[pl->index].limit;
4022 break;
4023 }
4024
4025 case DIOCSETLIMIT: {
4026 int old_limit;
4027
4028 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
4029 pf_pool_limits[pl->index].pp == NULL) {
4030 error = EINVAL;
4031 goto fail;
4032 }
4033 pool_sethardlimit(pf_pool_limits[pl->index].pp,
4034 pl->limit, NULL, 0);
4035 old_limit = pf_pool_limits[pl->index].limit;
4036 pf_pool_limits[pl->index].limit = pl->limit;
4037 pl->limit = old_limit;
4038 break;
4039 }
4040
4041 default:
4042 VERIFY(0);
4043 /* NOTREACHED */
4044 }
4045 fail:
4046 return (error);
4047 }
4048
4049 static int
4050 pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p)
4051 {
4052 #pragma unused(p)
4053 struct pf_pooladdr *pa = NULL;
4054 struct pf_pool *pool = NULL;
4055 int error = 0;
4056
4057 switch (cmd) {
4058 case DIOCBEGINADDRS: {
4059 pf_empty_pool(&pf_pabuf);
4060 pp->ticket = ++ticket_pabuf;
4061 break;
4062 }
4063
4064 case DIOCADDADDR: {
4065 pp->anchor[sizeof (pp->anchor) - 1] = '\0';
4066 if (pp->ticket != ticket_pabuf) {
4067 error = EBUSY;
4068 break;
4069 }
4070 #if !INET
4071 if (pp->af == AF_INET) {
4072 error = EAFNOSUPPORT;
4073 break;
4074 }
4075 #endif /* INET */
4076 #if !INET6
4077 if (pp->af == AF_INET6) {
4078 error = EAFNOSUPPORT;
4079 break;
4080 }
4081 #endif /* INET6 */
4082 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
4083 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
4084 pp->addr.addr.type != PF_ADDR_TABLE) {
4085 error = EINVAL;
4086 break;
4087 }
4088 pa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
4089 if (pa == NULL) {
4090 error = ENOMEM;
4091 break;
4092 }
4093 pf_pooladdr_copyin(&pp->addr, pa);
4094 if (pa->ifname[0]) {
4095 pa->kif = pfi_kif_get(pa->ifname);
4096 if (pa->kif == NULL) {
4097 pool_put(&pf_pooladdr_pl, pa);
4098 error = EINVAL;
4099 break;
4100 }
4101 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
4102 }
4103 pf_addrwrap_setup(&pa->addr);
4104 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
4105 pfi_dynaddr_remove(&pa->addr);
4106 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
4107 pool_put(&pf_pooladdr_pl, pa);
4108 error = EINVAL;
4109 break;
4110 }
4111 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
4112 break;
4113 }
4114
4115 case DIOCGETADDRS: {
4116 pp->nr = 0;
4117 pp->anchor[sizeof (pp->anchor) - 1] = '\0';
4118 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
4119 pp->r_num, 0, 1, 0);
4120 if (pool == NULL) {
4121 error = EBUSY;
4122 break;
4123 }
4124 TAILQ_FOREACH(pa, &pool->list, entries)
4125 pp->nr++;
4126 break;
4127 }
4128
4129 case DIOCGETADDR: {
4130 u_int32_t nr = 0;
4131
4132 pp->anchor[sizeof (pp->anchor) - 1] = '\0';
4133 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
4134 pp->r_num, 0, 1, 1);
4135 if (pool == NULL) {
4136 error = EBUSY;
4137 break;
4138 }
4139 pa = TAILQ_FIRST(&pool->list);
4140 while ((pa != NULL) && (nr < pp->nr)) {
4141 pa = TAILQ_NEXT(pa, entries);
4142 nr++;
4143 }
4144 if (pa == NULL) {
4145 error = EBUSY;
4146 break;
4147 }
4148 pf_pooladdr_copyout(pa, &pp->addr);
4149 pfi_dynaddr_copyout(&pp->addr.addr);
4150 pf_tbladdr_copyout(&pp->addr.addr);
4151 pf_rtlabel_copyout(&pp->addr.addr);
4152 break;
4153 }
4154
4155 case DIOCCHANGEADDR: {
4156 struct pfioc_pooladdr *pca = pp;
4157 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
4158 struct pf_ruleset *ruleset;
4159
4160 if (pca->action < PF_CHANGE_ADD_HEAD ||
4161 pca->action > PF_CHANGE_REMOVE) {
4162 error = EINVAL;
4163 break;
4164 }
4165 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
4166 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
4167 pca->addr.addr.type != PF_ADDR_TABLE) {
4168 error = EINVAL;
4169 break;
4170 }
4171
4172 pca->anchor[sizeof (pca->anchor) - 1] = '\0';
4173 ruleset = pf_find_ruleset(pca->anchor);
4174 if (ruleset == NULL) {
4175 error = EBUSY;
4176 break;
4177 }
4178 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
4179 pca->r_num, pca->r_last, 1, 1);
4180 if (pool == NULL) {
4181 error = EBUSY;
4182 break;
4183 }
4184 if (pca->action != PF_CHANGE_REMOVE) {
4185 newpa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
4186 if (newpa == NULL) {
4187 error = ENOMEM;
4188 break;
4189 }
4190 pf_pooladdr_copyin(&pca->addr, newpa);
4191 #if !INET
4192 if (pca->af == AF_INET) {
4193 pool_put(&pf_pooladdr_pl, newpa);
4194 error = EAFNOSUPPORT;
4195 break;
4196 }
4197 #endif /* INET */
4198 #if !INET6
4199 if (pca->af == AF_INET6) {
4200 pool_put(&pf_pooladdr_pl, newpa);
4201 error = EAFNOSUPPORT;
4202 break;
4203 }
4204 #endif /* INET6 */
4205 if (newpa->ifname[0]) {
4206 newpa->kif = pfi_kif_get(newpa->ifname);
4207 if (newpa->kif == NULL) {
4208 pool_put(&pf_pooladdr_pl, newpa);
4209 error = EINVAL;
4210 break;
4211 }
4212 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
4213 } else
4214 newpa->kif = NULL;
4215 pf_addrwrap_setup(&newpa->addr);
4216 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
4217 pf_tbladdr_setup(ruleset, &newpa->addr)) {
4218 pfi_dynaddr_remove(&newpa->addr);
4219 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
4220 pool_put(&pf_pooladdr_pl, newpa);
4221 error = EINVAL;
4222 break;
4223 }
4224 }
4225
4226 if (pca->action == PF_CHANGE_ADD_HEAD)
4227 oldpa = TAILQ_FIRST(&pool->list);
4228 else if (pca->action == PF_CHANGE_ADD_TAIL)
4229 oldpa = TAILQ_LAST(&pool->list, pf_palist);
4230 else {
4231 int i = 0;
4232
4233 oldpa = TAILQ_FIRST(&pool->list);
4234 while ((oldpa != NULL) && (i < (int)pca->nr)) {
4235 oldpa = TAILQ_NEXT(oldpa, entries);
4236 i++;
4237 }
4238 if (oldpa == NULL) {
4239 error = EINVAL;
4240 break;
4241 }
4242 }
4243
4244 if (pca->action == PF_CHANGE_REMOVE) {
4245 TAILQ_REMOVE(&pool->list, oldpa, entries);
4246 pfi_dynaddr_remove(&oldpa->addr);
4247 pf_tbladdr_remove(&oldpa->addr);
4248 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
4249 pool_put(&pf_pooladdr_pl, oldpa);
4250 } else {
4251 if (oldpa == NULL)
4252 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4253 else if (pca->action == PF_CHANGE_ADD_HEAD ||
4254 pca->action == PF_CHANGE_ADD_BEFORE)
4255 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4256 else
4257 TAILQ_INSERT_AFTER(&pool->list, oldpa,
4258 newpa, entries);
4259 }
4260
4261 pool->cur = TAILQ_FIRST(&pool->list);
4262 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
4263 pca->af);
4264 break;
4265 }
4266
4267 default:
4268 VERIFY(0);
4269 /* NOTREACHED */
4270 }
4271
4272 return (error);
4273 }
4274
4275 static int
4276 pfioctl_ioc_ruleset(u_long cmd, struct pfioc_ruleset *pr, struct proc *p)
4277 {
4278 #pragma unused(p)
4279 int error = 0;
4280
4281 switch (cmd) {
4282 case DIOCGETRULESETS: {
4283 struct pf_ruleset *ruleset;
4284 struct pf_anchor *anchor;
4285
4286 pr->path[sizeof (pr->path) - 1] = '\0';
4287 pr->name[sizeof (pr->name) - 1] = '\0';
4288 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
4289 error = EINVAL;
4290 break;
4291 }
4292 pr->nr = 0;
4293 if (ruleset->anchor == NULL) {
4294 /* XXX kludge for pf_main_ruleset */
4295 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4296 if (anchor->parent == NULL)
4297 pr->nr++;
4298 } else {
4299 RB_FOREACH(anchor, pf_anchor_node,
4300 &ruleset->anchor->children)
4301 pr->nr++;
4302 }
4303 break;
4304 }
4305
4306 case DIOCGETRULESET: {
4307 struct pf_ruleset *ruleset;
4308 struct pf_anchor *anchor;
4309 u_int32_t nr = 0;
4310
4311 pr->path[sizeof (pr->path) - 1] = '\0';
4312 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
4313 error = EINVAL;
4314 break;
4315 }
4316 pr->name[0] = 0;
4317 if (ruleset->anchor == NULL) {
4318 /* XXX kludge for pf_main_ruleset */
4319 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4320 if (anchor->parent == NULL && nr++ == pr->nr) {
4321 strlcpy(pr->name, anchor->name,
4322 sizeof (pr->name));
4323 break;
4324 }
4325 } else {
4326 RB_FOREACH(anchor, pf_anchor_node,
4327 &ruleset->anchor->children)
4328 if (nr++ == pr->nr) {
4329 strlcpy(pr->name, anchor->name,
4330 sizeof (pr->name));
4331 break;
4332 }
4333 }
4334 if (!pr->name[0])
4335 error = EBUSY;
4336 break;
4337 }
4338
4339 default:
4340 VERIFY(0);
4341 /* NOTREACHED */
4342 }
4343
4344 return (error);
4345 }
4346
4347 static int
4348 pfioctl_ioc_trans(u_long cmd, struct pfioc_trans_32 *io32,
4349 struct pfioc_trans_64 *io64, struct proc *p)
4350 {
4351 int p64 = proc_is64bit(p);
4352 int error = 0, esize, size;
4353 user_addr_t buf;
4354
4355 esize = (p64 ? io64->esize : io32->esize);
4356 size = (p64 ? io64->size : io32->size);
4357 buf = (p64 ? io64->array : io32->array);
4358
4359 switch (cmd) {
4360 case DIOCXBEGIN: {
4361 struct pfioc_trans_e *ioe;
4362 struct pfr_table *table;
4363 int i;
4364
4365 if (esize != sizeof (*ioe)) {
4366 error = ENODEV;
4367 goto fail;
4368 }
4369 ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK);
4370 table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK);
4371 for (i = 0; i < size; i++, buf += sizeof (*ioe)) {
4372 if (copyin(buf, ioe, sizeof (*ioe))) {
4373 _FREE(table, M_TEMP);
4374 _FREE(ioe, M_TEMP);
4375 error = EFAULT;
4376 goto fail;
4377 }
4378 ioe->anchor[sizeof (ioe->anchor) - 1] = '\0';
4379 switch (ioe->rs_num) {
4380 case PF_RULESET_ALTQ:
4381 #if PF_ALTQ
4382 if (altq_allowed) {
4383 if (ioe->anchor[0]) {
4384 _FREE(table, M_TEMP);
4385 _FREE(ioe, M_TEMP);
4386 error = EINVAL;
4387 goto fail;
4388 }
4389 error = pf_begin_altq(&ioe->ticket);
4390 if (error != 0) {
4391 _FREE(table, M_TEMP);
4392 _FREE(ioe, M_TEMP);
4393 goto fail;
4394 }
4395 }
4396 #endif /* PF_ALTQ */
4397 break;
4398 case PF_RULESET_TABLE:
4399 bzero(table, sizeof (*table));
4400 strlcpy(table->pfrt_anchor, ioe->anchor,
4401 sizeof (table->pfrt_anchor));
4402 if ((error = pfr_ina_begin(table,
4403 &ioe->ticket, NULL, 0))) {
4404 _FREE(table, M_TEMP);
4405 _FREE(ioe, M_TEMP);
4406 goto fail;
4407 }
4408 break;
4409 default:
4410 if ((error = pf_begin_rules(&ioe->ticket,
4411 ioe->rs_num, ioe->anchor))) {
4412 _FREE(table, M_TEMP);
4413 _FREE(ioe, M_TEMP);
4414 goto fail;
4415 }
4416 break;
4417 }
4418 if (copyout(ioe, buf, sizeof (*ioe))) {
4419 _FREE(table, M_TEMP);
4420 _FREE(ioe, M_TEMP);
4421 error = EFAULT;
4422 goto fail;
4423 }
4424 }
4425 _FREE(table, M_TEMP);
4426 _FREE(ioe, M_TEMP);
4427 break;
4428 }
4429
4430 case DIOCXROLLBACK: {
4431 struct pfioc_trans_e *ioe;
4432 struct pfr_table *table;
4433 int i;
4434
4435 if (esize != sizeof (*ioe)) {
4436 error = ENODEV;
4437 goto fail;
4438 }
4439 ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK);
4440 table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK);
4441 for (i = 0; i < size; i++, buf += sizeof (*ioe)) {
4442 if (copyin(buf, ioe, sizeof (*ioe))) {
4443 _FREE(table, M_TEMP);
4444 _FREE(ioe, M_TEMP);
4445 error = EFAULT;
4446 goto fail;
4447 }
4448 ioe->anchor[sizeof (ioe->anchor) - 1] = '\0';
4449 switch (ioe->rs_num) {
4450 case PF_RULESET_ALTQ:
4451 #if PF_ALTQ
4452 if (altq_allowed) {
4453 if (ioe->anchor[0]) {
4454 _FREE(table, M_TEMP);
4455 _FREE(ioe, M_TEMP);
4456 error = EINVAL;
4457 goto fail;
4458 }
4459 error = pf_rollback_altq(ioe->ticket);
4460 if (error != 0) {
4461 _FREE(table, M_TEMP);
4462 _FREE(ioe, M_TEMP);
4463 goto fail; /* really bad */
4464 }
4465 }
4466 #endif /* PF_ALTQ */
4467 break;
4468 case PF_RULESET_TABLE:
4469 bzero(table, sizeof (*table));
4470 strlcpy(table->pfrt_anchor, ioe->anchor,
4471 sizeof (table->pfrt_anchor));
4472 if ((error = pfr_ina_rollback(table,
4473 ioe->ticket, NULL, 0))) {
4474 _FREE(table, M_TEMP);
4475 _FREE(ioe, M_TEMP);
4476 goto fail; /* really bad */
4477 }
4478 break;
4479 default:
4480 if ((error = pf_rollback_rules(ioe->ticket,
4481 ioe->rs_num, ioe->anchor))) {
4482 _FREE(table, M_TEMP);
4483 _FREE(ioe, M_TEMP);
4484 goto fail; /* really bad */
4485 }
4486 break;
4487 }
4488 }
4489 _FREE(table, M_TEMP);
4490 _FREE(ioe, M_TEMP);
4491 break;
4492 }
4493
4494 case DIOCXCOMMIT: {
4495 struct pfioc_trans_e *ioe;
4496 struct pfr_table *table;
4497 struct pf_ruleset *rs;
4498 user_addr_t _buf = buf;
4499 int i;
4500
4501 if (esize != sizeof (*ioe)) {
4502 error = ENODEV;
4503 goto fail;
4504 }
4505 ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK);
4506 table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK);
4507 /* first makes sure everything will succeed */
4508 for (i = 0; i < size; i++, buf += sizeof (*ioe)) {
4509 if (copyin(buf, ioe, sizeof (*ioe))) {
4510 _FREE(table, M_TEMP);
4511 _FREE(ioe, M_TEMP);
4512 error = EFAULT;
4513 goto fail;
4514 }
4515 ioe->anchor[sizeof (ioe->anchor) - 1] = '\0';
4516 switch (ioe->rs_num) {
4517 case PF_RULESET_ALTQ:
4518 #if PF_ALTQ
4519 if (altq_allowed) {
4520 if (ioe->anchor[0]) {
4521 _FREE(table, M_TEMP);
4522 _FREE(ioe, M_TEMP);
4523 error = EINVAL;
4524 goto fail;
4525 }
4526 if (!altqs_inactive_open ||
4527 ioe->ticket !=
4528 ticket_altqs_inactive) {
4529 _FREE(table, M_TEMP);
4530 _FREE(ioe, M_TEMP);
4531 error = EBUSY;
4532 goto fail;
4533 }
4534 }
4535 #endif /* PF_ALTQ */
4536 break;
4537 case PF_RULESET_TABLE:
4538 rs = pf_find_ruleset(ioe->anchor);
4539 if (rs == NULL || !rs->topen || ioe->ticket !=
4540 rs->tticket) {
4541 _FREE(table, M_TEMP);
4542 _FREE(ioe, M_TEMP);
4543 error = EBUSY;
4544 goto fail;
4545 }
4546 break;
4547 default:
4548 if (ioe->rs_num < 0 || ioe->rs_num >=
4549 PF_RULESET_MAX) {
4550 _FREE(table, M_TEMP);
4551 _FREE(ioe, M_TEMP);
4552 error = EINVAL;
4553 goto fail;
4554 }
4555 rs = pf_find_ruleset(ioe->anchor);
4556 if (rs == NULL ||
4557 !rs->rules[ioe->rs_num].inactive.open ||
4558 rs->rules[ioe->rs_num].inactive.ticket !=
4559 ioe->ticket) {
4560 _FREE(table, M_TEMP);
4561 _FREE(ioe, M_TEMP);
4562 error = EBUSY;
4563 goto fail;
4564 }
4565 break;
4566 }
4567 }
4568 buf = _buf;
4569 /* now do the commit - no errors should happen here */
4570 for (i = 0; i < size; i++, buf += sizeof (*ioe)) {
4571 if (copyin(buf, ioe, sizeof (*ioe))) {
4572 _FREE(table, M_TEMP);
4573 _FREE(ioe, M_TEMP);
4574 error = EFAULT;
4575 goto fail;
4576 }
4577 ioe->anchor[sizeof (ioe->anchor) - 1] = '\0';
4578 switch (ioe->rs_num) {
4579 case PF_RULESET_ALTQ:
4580 #if PF_ALTQ
4581 if (altq_allowed &&
4582 (error = pf_commit_altq(ioe->ticket))) {
4583 _FREE(table, M_TEMP);
4584 _FREE(ioe, M_TEMP);
4585 goto fail; /* really bad */
4586 }
4587 #endif /* PF_ALTQ */
4588 break;
4589 case PF_RULESET_TABLE:
4590 bzero(table, sizeof (*table));
4591 strlcpy(table->pfrt_anchor, ioe->anchor,
4592 sizeof (table->pfrt_anchor));
4593 if ((error = pfr_ina_commit(table, ioe->ticket,
4594 NULL, NULL, 0))) {
4595 _FREE(table, M_TEMP);
4596 _FREE(ioe, M_TEMP);
4597 goto fail; /* really bad */
4598 }
4599 break;
4600 default:
4601 if ((error = pf_commit_rules(ioe->ticket,
4602 ioe->rs_num, ioe->anchor))) {
4603 _FREE(table, M_TEMP);
4604 _FREE(ioe, M_TEMP);
4605 goto fail; /* really bad */
4606 }
4607 break;
4608 }
4609 }
4610 _FREE(table, M_TEMP);
4611 _FREE(ioe, M_TEMP);
4612 break;
4613 }
4614
4615 default:
4616 VERIFY(0);
4617 /* NOTREACHED */
4618 }
4619 fail:
4620 return (error);
4621 }
4622
4623 static int
4624 pfioctl_ioc_src_nodes(u_long cmd, struct pfioc_src_nodes_32 *psn32,
4625 struct pfioc_src_nodes_64 *psn64, struct proc *p)
4626 {
4627 int p64 = proc_is64bit(p);
4628 int error = 0;
4629
4630 switch (cmd) {
4631 case DIOCGETSRCNODES: {
4632 struct pf_src_node *n, *pstore;
4633 user_addr_t buf;
4634 u_int32_t nr = 0;
4635 int space, size;
4636
4637 space = (p64 ? psn64->psn_len : psn32->psn_len);
4638 if (space == 0) {
4639 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
4640 nr++;
4641
4642 size = sizeof (struct pf_src_node) * nr;
4643 if (p64)
4644 psn64->psn_len = size;
4645 else
4646 psn32->psn_len = size;
4647 break;
4648 }
4649
4650 pstore = _MALLOC(sizeof (*pstore), M_TEMP, M_WAITOK);
4651 if (pstore == NULL) {
4652 error = ENOMEM;
4653 break;
4654 }
4655 buf = (p64 ? psn64->psn_buf : psn32->psn_buf);
4656
4657 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
4658 uint64_t secs = pf_time_second(), diff;
4659
4660 if ((nr + 1) * sizeof (*pstore) > (unsigned)space)
4661 break;
4662
4663 bcopy(n, pstore, sizeof (*pstore));
4664 if (n->rule.ptr != NULL)
4665 pstore->rule.nr = n->rule.ptr->nr;
4666 pstore->creation = secs - pstore->creation;
4667 if (pstore->expire > secs)
4668 pstore->expire -= secs;
4669 else
4670 pstore->expire = 0;
4671
4672 /* adjust the connection rate estimate */
4673 diff = secs - n->conn_rate.last;
4674 if (diff >= n->conn_rate.seconds)
4675 pstore->conn_rate.count = 0;
4676 else
4677 pstore->conn_rate.count -=
4678 n->conn_rate.count * diff /
4679 n->conn_rate.seconds;
4680
4681 _RB_PARENT(pstore, entry) = NULL;
4682 RB_LEFT(pstore, entry) = RB_RIGHT(pstore, entry) = NULL;
4683 pstore->kif = NULL;
4684
4685 error = copyout(pstore, buf, sizeof (*pstore));
4686 if (error) {
4687 _FREE(pstore, M_TEMP);
4688 goto fail;
4689 }
4690 buf += sizeof (*pstore);
4691 nr++;
4692 }
4693
4694 size = sizeof (struct pf_src_node) * nr;
4695 if (p64)
4696 psn64->psn_len = size;
4697 else
4698 psn32->psn_len = size;
4699
4700 _FREE(pstore, M_TEMP);
4701 break;
4702 }
4703
4704 default:
4705 VERIFY(0);
4706 /* NOTREACHED */
4707 }
4708 fail:
4709 return (error);
4710
4711 }
4712
4713 static int
4714 pfioctl_ioc_src_node_kill(u_long cmd, struct pfioc_src_node_kill *psnk,
4715 struct proc *p)
4716 {
4717 #pragma unused(p)
4718 int error = 0;
4719
4720 switch (cmd) {
4721 case DIOCKILLSRCNODES: {
4722 struct pf_src_node *sn;
4723 struct pf_state *s;
4724 int killed = 0;
4725
4726 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
4727 if (PF_MATCHA(psnk->psnk_src.neg,
4728 &psnk->psnk_src.addr.v.a.addr,
4729 &psnk->psnk_src.addr.v.a.mask,
4730 &sn->addr, sn->af) &&
4731 PF_MATCHA(psnk->psnk_dst.neg,
4732 &psnk->psnk_dst.addr.v.a.addr,
4733 &psnk->psnk_dst.addr.v.a.mask,
4734 &sn->raddr, sn->af)) {
4735 /* Handle state to src_node linkage */
4736 if (sn->states != 0) {
4737 RB_FOREACH(s, pf_state_tree_id,
4738 &tree_id) {
4739 if (s->src_node == sn)
4740 s->src_node = NULL;
4741 if (s->nat_src_node == sn)
4742 s->nat_src_node = NULL;
4743 }
4744 sn->states = 0;
4745 }
4746 sn->expire = 1;
4747 killed++;
4748 }
4749 }
4750
4751 if (killed > 0)
4752 pf_purge_expired_src_nodes();
4753
4754 psnk->psnk_af = killed;
4755 break;
4756 }
4757
4758 default:
4759 VERIFY(0);
4760 /* NOTREACHED */
4761 }
4762
4763 return (error);
4764 }
4765
4766 static int
4767 pfioctl_ioc_iface(u_long cmd, struct pfioc_iface_32 *io32,
4768 struct pfioc_iface_64 *io64, struct proc *p)
4769 {
4770 int p64 = proc_is64bit(p);
4771 int error = 0;
4772
4773 switch (cmd) {
4774 case DIOCIGETIFACES: {
4775 user_addr_t buf;
4776 int esize;
4777
4778 buf = (p64 ? io64->pfiio_buffer : io32->pfiio_buffer);
4779 esize = (p64 ? io64->pfiio_esize : io32->pfiio_esize);
4780
4781 /* esize must be that of the user space version of pfi_kif */
4782 if (esize != sizeof (struct pfi_uif)) {
4783 error = ENODEV;
4784 break;
4785 }
4786 if (p64)
4787 io64->pfiio_name[sizeof (io64->pfiio_name) - 1] = '\0';
4788 else
4789 io32->pfiio_name[sizeof (io32->pfiio_name) - 1] = '\0';
4790 error = pfi_get_ifaces(
4791 p64 ? io64->pfiio_name : io32->pfiio_name, buf,
4792 p64 ? &io64->pfiio_size : &io32->pfiio_size);
4793 break;
4794 }
4795
4796 case DIOCSETIFFLAG: {
4797 if (p64)
4798 io64->pfiio_name[sizeof (io64->pfiio_name) - 1] = '\0';
4799 else
4800 io32->pfiio_name[sizeof (io32->pfiio_name) - 1] = '\0';
4801
4802 error = pfi_set_flags(
4803 p64 ? io64->pfiio_name : io32->pfiio_name,
4804 p64 ? io64->pfiio_flags : io32->pfiio_flags);
4805 break;
4806 }
4807
4808 case DIOCCLRIFFLAG: {
4809 if (p64)
4810 io64->pfiio_name[sizeof (io64->pfiio_name) - 1] = '\0';
4811 else
4812 io32->pfiio_name[sizeof (io32->pfiio_name) - 1] = '\0';
4813
4814 error = pfi_clear_flags(
4815 p64 ? io64->pfiio_name : io32->pfiio_name,
4816 p64 ? io64->pfiio_flags : io32->pfiio_flags);
4817 break;
4818 }
4819
4820 default:
4821 VERIFY(0);
4822 /* NOTREACHED */
4823 }
4824
4825 return (error);
4826 }
4827
4828 int
4829 pf_af_hook(struct ifnet *ifp, struct mbuf **mppn, struct mbuf **mp,
4830 unsigned int af, int input, struct ip_fw_args *fwa)
4831 {
4832 int error = 0;
4833 struct mbuf *nextpkt;
4834 net_thread_marks_t marks;
4835 struct ifnet * pf_ifp = ifp;
4836
4837 marks = net_thread_marks_push(NET_THREAD_HELD_PF);
4838
4839 if (marks != net_thread_marks_none) {
4840 lck_rw_lock_shared(pf_perim_lock);
4841 if (!pf_is_enabled)
4842 goto done;
4843 lck_mtx_lock(pf_lock);
4844 }
4845
4846 if (mppn != NULL && *mppn != NULL)
4847 VERIFY(*mppn == *mp);
4848 if ((nextpkt = (*mp)->m_nextpkt) != NULL)
4849 (*mp)->m_nextpkt = NULL;
4850
4851 /*
4852 * For packets destined to locally hosted IP address
4853 * ip_output_list sets Mbuf's pkt header's rcvif to
4854 * the interface hosting the IP address.
4855 * While on the output path ifp passed to pf_af_hook
4856 * to such local communication is the loopback interface,
4857 * the input path derives ifp from mbuf packet header's
4858 * rcvif.
4859 * This asymmetry caues issues with PF.
4860 * To handle that case, we have a limited change here to
4861 * pass interface as loopback if packets are looped in.
4862 */
4863 if (input && ((*mp)->m_pkthdr.pkt_flags & PKTF_LOOP)) {
4864 pf_ifp = lo_ifp;
4865 }
4866
4867 switch (af) {
4868 #if INET
4869 case AF_INET: {
4870 error = pf_inet_hook(pf_ifp, mp, input, fwa);
4871 break;
4872 }
4873 #endif /* INET */
4874 #if INET6
4875 case AF_INET6:
4876 error = pf_inet6_hook(pf_ifp, mp, input, fwa);
4877 break;
4878 #endif /* INET6 */
4879 default:
4880 break;
4881 }
4882
4883 /* When packet valid, link to the next packet */
4884 if (*mp != NULL && nextpkt != NULL) {
4885 struct mbuf *m = *mp;
4886 while (m->m_nextpkt != NULL)
4887 m = m->m_nextpkt;
4888 m->m_nextpkt = nextpkt;
4889 }
4890 /* Fix up linkage of previous packet in the chain */
4891 if (mppn != NULL) {
4892 if (*mp != NULL)
4893 *mppn = *mp;
4894 else
4895 *mppn = nextpkt;
4896 }
4897
4898 if (marks != net_thread_marks_none)
4899 lck_mtx_unlock(pf_lock);
4900
4901 done:
4902 if (marks != net_thread_marks_none)
4903 lck_rw_done(pf_perim_lock);
4904
4905 net_thread_marks_pop(marks);
4906 return (error);
4907 }
4908
4909
4910 #if INET
4911 static int
4912 pf_inet_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4913 struct ip_fw_args *fwa)
4914 {
4915 struct mbuf *m = *mp;
4916 #if BYTE_ORDER != BIG_ENDIAN
4917 struct ip *ip = mtod(m, struct ip *);
4918 #endif
4919 int error = 0;
4920
4921 /*
4922 * If the packet is outbound, is originated locally, is flagged for
4923 * delayed UDP/TCP checksum calculation, and is about to be processed
4924 * for an interface that doesn't support the appropriate checksum
4925 * offloading, then calculated the checksum here so that PF can adjust
4926 * it properly.
4927 */
4928 if (!input && m->m_pkthdr.rcvif == NULL) {
4929 static const int mask = CSUM_DELAY_DATA;
4930 const int flags = m->m_pkthdr.csum_flags &
4931 ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4932
4933 if (flags & mask) {
4934 in_delayed_cksum(m);
4935 m->m_pkthdr.csum_flags &= ~mask;
4936 }
4937 }
4938
4939 #if BYTE_ORDER != BIG_ENDIAN
4940 HTONS(ip->ip_len);
4941 HTONS(ip->ip_off);
4942 #endif
4943 if (pf_test(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4944 if (*mp != NULL) {
4945 m_freem(*mp);
4946 *mp = NULL;
4947 error = EHOSTUNREACH;
4948 } else {
4949 error = ENOBUFS;
4950 }
4951 }
4952 #if BYTE_ORDER != BIG_ENDIAN
4953 else {
4954 if (*mp != NULL) {
4955 ip = mtod(*mp, struct ip *);
4956 NTOHS(ip->ip_len);
4957 NTOHS(ip->ip_off);
4958 }
4959 }
4960 #endif
4961 return (error);
4962 }
4963 #endif /* INET */
4964
4965 #if INET6
4966 int
4967 pf_inet6_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4968 struct ip_fw_args *fwa)
4969 {
4970 int error = 0;
4971
4972 /*
4973 * If the packet is outbound, is originated locally, is flagged for
4974 * delayed UDP/TCP checksum calculation, and is about to be processed
4975 * for an interface that doesn't support the appropriate checksum
4976 * offloading, then calculated the checksum here so that PF can adjust
4977 * it properly.
4978 */
4979 if (!input && (*mp)->m_pkthdr.rcvif == NULL) {
4980 static const int mask = CSUM_DELAY_IPV6_DATA;
4981 const int flags = (*mp)->m_pkthdr.csum_flags &
4982 ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4983
4984 if (flags & mask) {
4985 /*
4986 * Checksum offload should not have been enabled
4987 * when extension headers exist, thus 0 for optlen.
4988 */
4989 in6_delayed_cksum(*mp);
4990 (*mp)->m_pkthdr.csum_flags &= ~mask;
4991 }
4992 }
4993
4994 if (pf_test6(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4995 if (*mp != NULL) {
4996 m_freem(*mp);
4997 *mp = NULL;
4998 error = EHOSTUNREACH;
4999 } else {
5000 error = ENOBUFS;
5001 }
5002 }
5003 return (error);
5004 }
5005 #endif /* INET6 */
5006
5007 int
5008 pf_ifaddr_hook(struct ifnet *ifp)
5009 {
5010 struct pfi_kif *kif = ifp->if_pf_kif;
5011
5012 if (kif != NULL) {
5013 lck_rw_lock_shared(pf_perim_lock);
5014 lck_mtx_lock(pf_lock);
5015
5016 pfi_kifaddr_update(kif);
5017
5018 lck_mtx_unlock(pf_lock);
5019 lck_rw_done(pf_perim_lock);
5020 }
5021 return (0);
5022 }
5023
5024 /*
5025 * Caller acquires dlil lock as writer (exclusive)
5026 */
5027 void
5028 pf_ifnet_hook(struct ifnet *ifp, int attach)
5029 {
5030 lck_rw_lock_shared(pf_perim_lock);
5031 lck_mtx_lock(pf_lock);
5032 if (attach)
5033 pfi_attach_ifnet(ifp);
5034 else
5035 pfi_detach_ifnet(ifp);
5036 lck_mtx_unlock(pf_lock);
5037 lck_rw_done(pf_perim_lock);
5038 }
5039
5040 static void
5041 pf_attach_hooks(void)
5042 {
5043 ifnet_head_lock_shared();
5044 /*
5045 * Check against ifnet_addrs[] before proceeding, in case this
5046 * is called very early on, e.g. during dlil_init() before any
5047 * network interface is attached.
5048 */
5049 if (ifnet_addrs != NULL) {
5050 int i;
5051
5052 for (i = 0; i <= if_index; i++) {
5053 struct ifnet *ifp = ifindex2ifnet[i];
5054 if (ifp != NULL) {
5055 pfi_attach_ifnet(ifp);
5056 }
5057 }
5058 }
5059 ifnet_head_done();
5060 }
5061
5062 #if 0
5063 /* currently unused along with pfdetach() */
5064 static void
5065 pf_detach_hooks(void)
5066 {
5067 ifnet_head_lock_shared();
5068 if (ifnet_addrs != NULL) {
5069 for (i = 0; i <= if_index; i++) {
5070 int i;
5071
5072 struct ifnet *ifp = ifindex2ifnet[i];
5073 if (ifp != NULL && ifp->if_pf_kif != NULL) {
5074 pfi_detach_ifnet(ifp);
5075 }
5076 }
5077 }
5078 ifnet_head_done();
5079 }
5080 #endif
5081
5082 /*
5083 * 'D' group ioctls.
5084 *
5085 * The switch statement below does nothing at runtime, as it serves as a
5086 * compile time check to ensure that all of the socket 'D' ioctls (those
5087 * in the 'D' group going thru soo_ioctl) that are made available by the
5088 * networking stack is unique. This works as long as this routine gets
5089 * updated each time a new interface ioctl gets added.
5090 *
5091 * Any failures at compile time indicates duplicated ioctl values.
5092 */
5093 static __attribute__((unused)) void
5094 pfioctl_cassert(void)
5095 {
5096 /*
5097 * This is equivalent to _CASSERT() and the compiler wouldn't
5098 * generate any instructions, thus for compile time only.
5099 */
5100 switch ((u_long)0) {
5101 case 0:
5102
5103 /* bsd/net/pfvar.h */
5104 case DIOCSTART:
5105 case DIOCSTOP:
5106 case DIOCADDRULE:
5107 case DIOCGETSTARTERS:
5108 case DIOCGETRULES:
5109 case DIOCGETRULE:
5110 case DIOCSTARTREF:
5111 case DIOCSTOPREF:
5112 case DIOCCLRSTATES:
5113 case DIOCGETSTATE:
5114 case DIOCSETSTATUSIF:
5115 case DIOCGETSTATUS:
5116 case DIOCCLRSTATUS:
5117 case DIOCNATLOOK:
5118 case DIOCSETDEBUG:
5119 case DIOCGETSTATES:
5120 case DIOCCHANGERULE:
5121 case DIOCINSERTRULE:
5122 case DIOCDELETERULE:
5123 case DIOCSETTIMEOUT:
5124 case DIOCGETTIMEOUT:
5125 case DIOCADDSTATE:
5126 case DIOCCLRRULECTRS:
5127 case DIOCGETLIMIT:
5128 case DIOCSETLIMIT:
5129 case DIOCKILLSTATES:
5130 case DIOCSTARTALTQ:
5131 case DIOCSTOPALTQ:
5132 case DIOCADDALTQ:
5133 case DIOCGETALTQS:
5134 case DIOCGETALTQ:
5135 case DIOCCHANGEALTQ:
5136 case DIOCGETQSTATS:
5137 case DIOCBEGINADDRS:
5138 case DIOCADDADDR:
5139 case DIOCGETADDRS:
5140 case DIOCGETADDR:
5141 case DIOCCHANGEADDR:
5142 case DIOCGETRULESETS:
5143 case DIOCGETRULESET:
5144 case DIOCRCLRTABLES:
5145 case DIOCRADDTABLES:
5146 case DIOCRDELTABLES:
5147 case DIOCRGETTABLES:
5148 case DIOCRGETTSTATS:
5149 case DIOCRCLRTSTATS:
5150 case DIOCRCLRADDRS:
5151 case DIOCRADDADDRS:
5152 case DIOCRDELADDRS:
5153 case DIOCRSETADDRS:
5154 case DIOCRGETADDRS:
5155 case DIOCRGETASTATS:
5156 case DIOCRCLRASTATS:
5157 case DIOCRTSTADDRS:
5158 case DIOCRSETTFLAGS:
5159 case DIOCRINADEFINE:
5160 case DIOCOSFPFLUSH:
5161 case DIOCOSFPADD:
5162 case DIOCOSFPGET:
5163 case DIOCXBEGIN:
5164 case DIOCXCOMMIT:
5165 case DIOCXROLLBACK:
5166 case DIOCGETSRCNODES:
5167 case DIOCCLRSRCNODES:
5168 case DIOCSETHOSTID:
5169 case DIOCIGETIFACES:
5170 case DIOCSETIFFLAG:
5171 case DIOCCLRIFFLAG:
5172 case DIOCKILLSRCNODES:
5173 case DIOCGIFSPEED:
5174 ;
5175 }
5176 }