]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/pf_ioctl.c
62a475f9d2b4416c315083ceadfb16242f43aed4
[apple/xnu.git] / bsd / net / pf_ioctl.c
1 /*
2 * Copyright (c) 2007-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */
30 /* $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
31
32 /*
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * - Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * - Redistributions in binary form must reproduce the above
44 * copyright notice, this list of conditions and the following
45 * disclaimer in the documentation and/or other materials provided
46 * with the distribution.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 *
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
64 *
65 */
66
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/mbuf.h>
71 #include <sys/filio.h>
72 #include <sys/fcntl.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/kernel.h>
76 #include <sys/time.h>
77 #include <sys/proc_internal.h>
78 #include <sys/malloc.h>
79 #include <sys/kauth.h>
80 #include <sys/conf.h>
81 #include <sys/mcache.h>
82 #include <sys/queue.h>
83
84 #include <mach/vm_param.h>
85
86 #include <net/dlil.h>
87 #include <net/if.h>
88 #include <net/if_types.h>
89 #include <net/net_api_stats.h>
90 #include <net/route.h>
91
92 #include <netinet/in.h>
93 #include <netinet/in_var.h>
94 #include <netinet/in_systm.h>
95 #include <netinet/ip.h>
96 #include <netinet/ip_var.h>
97 #include <netinet/ip_icmp.h>
98 #include <netinet/if_ether.h>
99
100 #if DUMMYNET
101 #include <netinet/ip_dummynet.h>
102 #else
103 struct ip_fw_args;
104 #endif /* DUMMYNET */
105
106 #include <libkern/crypto/md5.h>
107
108 #include <machine/machine_routines.h>
109
110 #include <miscfs/devfs/devfs.h>
111
112 #include <net/pfvar.h>
113
114 #if NPFSYNC
115 #include <net/if_pfsync.h>
116 #endif /* NPFSYNC */
117
118 #if PFLOG
119 #include <net/if_pflog.h>
120 #endif /* PFLOG */
121
122 #if INET6
123 #include <netinet/ip6.h>
124 #include <netinet/in_pcb.h>
125 #endif /* INET6 */
126
127 #include <dev/random/randomdev.h>
128
129 #if 0
130 static void pfdetach(void);
131 #endif
132 static int pfopen(dev_t, int, int, struct proc *);
133 static int pfclose(dev_t, int, int, struct proc *);
134 static int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
135 static int pfioctl_ioc_table(u_long, struct pfioc_table_32 *,
136 struct pfioc_table_64 *, struct proc *);
137 static int pfioctl_ioc_tokens(u_long, struct pfioc_tokens_32 *,
138 struct pfioc_tokens_64 *, struct proc *);
139 static int pfioctl_ioc_rule(u_long, int, struct pfioc_rule *, struct proc *);
140 static int pfioctl_ioc_state_kill(u_long, struct pfioc_state_kill *,
141 struct proc *);
142 static int pfioctl_ioc_state(u_long, struct pfioc_state *, struct proc *);
143 static int pfioctl_ioc_states(u_long, struct pfioc_states_32 *,
144 struct pfioc_states_64 *, struct proc *);
145 static int pfioctl_ioc_natlook(u_long, struct pfioc_natlook *, struct proc *);
146 static int pfioctl_ioc_tm(u_long, struct pfioc_tm *, struct proc *);
147 static int pfioctl_ioc_limit(u_long, struct pfioc_limit *, struct proc *);
148 static int pfioctl_ioc_pooladdr(u_long, struct pfioc_pooladdr *, struct proc *);
149 static int pfioctl_ioc_ruleset(u_long, struct pfioc_ruleset *, struct proc *);
150 static int pfioctl_ioc_trans(u_long, struct pfioc_trans_32 *,
151 struct pfioc_trans_64 *, struct proc *);
152 static int pfioctl_ioc_src_nodes(u_long, struct pfioc_src_nodes_32 *,
153 struct pfioc_src_nodes_64 *, struct proc *);
154 static int pfioctl_ioc_src_node_kill(u_long, struct pfioc_src_node_kill *,
155 struct proc *);
156 static int pfioctl_ioc_iface(u_long, struct pfioc_iface_32 *,
157 struct pfioc_iface_64 *, struct proc *);
158 static struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
159 u_int8_t, u_int8_t, u_int8_t);
160 static void pf_mv_pool(struct pf_palist *, struct pf_palist *);
161 static void pf_empty_pool(struct pf_palist *);
162 static int pf_begin_rules(u_int32_t *, int, const char *);
163 static int pf_rollback_rules(u_int32_t, int, char *);
164 static int pf_setup_pfsync_matching(struct pf_ruleset *);
165 static void pf_hash_rule(MD5_CTX *, struct pf_rule *);
166 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *, u_int8_t);
167 static int pf_commit_rules(u_int32_t, int, char *);
168 static void pf_rule_copyin(struct pf_rule *, struct pf_rule *, struct proc *,
169 int);
170 static void pf_rule_copyout(struct pf_rule *, struct pf_rule *);
171 static void pf_state_export(struct pfsync_state *, struct pf_state_key *,
172 struct pf_state *);
173 static void pf_state_import(struct pfsync_state *, struct pf_state_key *,
174 struct pf_state *);
175 static void pf_pooladdr_copyin(struct pf_pooladdr *, struct pf_pooladdr *);
176 static void pf_pooladdr_copyout(struct pf_pooladdr *, struct pf_pooladdr *);
177 static void pf_expire_states_and_src_nodes(struct pf_rule *);
178 static void pf_delete_rule_from_ruleset(struct pf_ruleset *,
179 int, struct pf_rule *);
180 static void pf_addrwrap_setup(struct pf_addr_wrap *);
181 static int pf_rule_setup(struct pfioc_rule *, struct pf_rule *,
182 struct pf_ruleset *);
183 static void pf_delete_rule_by_owner(char *, u_int32_t);
184 static int pf_delete_rule_by_ticket(struct pfioc_rule *, u_int32_t);
185 static void pf_ruleset_cleanup(struct pf_ruleset *, int);
186 static void pf_deleterule_anchor_step_out(struct pf_ruleset **,
187 int, struct pf_rule **);
188
189 #define PF_CDEV_MAJOR (-1)
190
191 static struct cdevsw pf_cdevsw = {
192 /* open */ pfopen,
193 /* close */ pfclose,
194 /* read */ eno_rdwrt,
195 /* write */ eno_rdwrt,
196 /* ioctl */ pfioctl,
197 /* stop */ eno_stop,
198 /* reset */ eno_reset,
199 /* tty */ NULL,
200 /* select */ eno_select,
201 /* mmap */ eno_mmap,
202 /* strategy */ eno_strat,
203 /* getc */ eno_getc,
204 /* putc */ eno_putc,
205 /* type */ 0
206 };
207
208 static void pf_attach_hooks(void);
209 #if 0
210 /* currently unused along with pfdetach() */
211 static void pf_detach_hooks(void);
212 #endif
213
214 /*
215 * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer,
216 * and used in pf_af_hook() for performance optimization, such that packets
217 * will enter pf_test() or pf_test6() only when PF is running.
218 */
219 int pf_is_enabled = 0;
220
221 u_int32_t pf_hash_seed;
222 int16_t pf_nat64_configured = 0;
223
224 /*
225 * These are the pf enabled reference counting variables
226 */
227 static u_int64_t pf_enabled_ref_count;
228 static u_int32_t nr_tokens = 0;
229 static u_int64_t pffwrules;
230 static u_int32_t pfdevcnt;
231
232 SLIST_HEAD(list_head, pfioc_kernel_token);
233 static struct list_head token_list_head;
234
235 struct pf_rule pf_default_rule;
236
237 #define TAGID_MAX 50000
238 static TAILQ_HEAD(pf_tags, pf_tagname) pf_tags =
239 TAILQ_HEAD_INITIALIZER(pf_tags);
240
241 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
242 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
243 #endif
244 static u_int16_t tagname2tag(struct pf_tags *, char *);
245 static void tag2tagname(struct pf_tags *, u_int16_t, char *);
246 static void tag_unref(struct pf_tags *, u_int16_t);
247 static int pf_rtlabel_add(struct pf_addr_wrap *);
248 static void pf_rtlabel_remove(struct pf_addr_wrap *);
249 static void pf_rtlabel_copyout(struct pf_addr_wrap *);
250
251 #if INET
252 static int pf_inet_hook(struct ifnet *, struct mbuf **, int,
253 struct ip_fw_args *);
254 #endif /* INET */
255 #if INET6
256 static int pf_inet6_hook(struct ifnet *, struct mbuf **, int,
257 struct ip_fw_args *);
258 #endif /* INET6 */
259
260 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
261
262 /*
263 * Helper macros for ioctl structures which vary in size (32-bit vs. 64-bit)
264 */
265 #define PFIOCX_STRUCT_DECL(s) \
266 struct { \
267 union { \
268 struct s##_32 _s##_32; \
269 struct s##_64 _s##_64; \
270 } _u; \
271 } *s##_un = NULL \
272
273 #define PFIOCX_STRUCT_BEGIN(a, s, _action) { \
274 VERIFY(s##_un == NULL); \
275 s##_un = _MALLOC(sizeof (*s##_un), M_TEMP, M_WAITOK|M_ZERO); \
276 if (s##_un == NULL) { \
277 _action \
278 } else { \
279 if (p64) \
280 bcopy(a, &s##_un->_u._s##_64, \
281 sizeof (struct s##_64)); \
282 else \
283 bcopy(a, &s##_un->_u._s##_32, \
284 sizeof (struct s##_32)); \
285 } \
286 }
287
288 #define PFIOCX_STRUCT_END(s, a) { \
289 VERIFY(s##_un != NULL); \
290 if (p64) \
291 bcopy(&s##_un->_u._s##_64, a, sizeof (struct s##_64)); \
292 else \
293 bcopy(&s##_un->_u._s##_32, a, sizeof (struct s##_32)); \
294 _FREE(s##_un, M_TEMP); \
295 s##_un = NULL; \
296 }
297
298 #define PFIOCX_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
299 #define PFIOCX_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
300
301 /*
302 * Helper macros for regular ioctl structures.
303 */
304 #define PFIOC_STRUCT_BEGIN(a, v, _action) { \
305 VERIFY((v) == NULL); \
306 (v) = _MALLOC(sizeof (*(v)), M_TEMP, M_WAITOK|M_ZERO); \
307 if ((v) == NULL) { \
308 _action \
309 } else { \
310 bcopy(a, v, sizeof (*(v))); \
311 } \
312 }
313
314 #define PFIOC_STRUCT_END(v, a) { \
315 VERIFY((v) != NULL); \
316 bcopy(v, a, sizeof (*(v))); \
317 _FREE(v, M_TEMP); \
318 (v) = NULL; \
319 }
320
321 #define PFIOC_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
322 #define PFIOC_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
323
324 static lck_attr_t *pf_perim_lock_attr;
325 static lck_grp_t *pf_perim_lock_grp;
326 static lck_grp_attr_t *pf_perim_lock_grp_attr;
327
328 static lck_attr_t *pf_lock_attr;
329 static lck_grp_t *pf_lock_grp;
330 static lck_grp_attr_t *pf_lock_grp_attr;
331
332 struct thread *pf_purge_thread;
333
334 extern void pfi_kifaddr_update(void *);
335
336 /* pf enable ref-counting helper functions */
337 static u_int64_t generate_token(struct proc *);
338 static int remove_token(struct pfioc_remove_token *);
339 static void invalidate_all_tokens(void);
340
341 static u_int64_t
342 generate_token(struct proc *p)
343 {
344 u_int64_t token_value;
345 struct pfioc_kernel_token *new_token;
346
347 new_token = _MALLOC(sizeof (struct pfioc_kernel_token), M_TEMP,
348 M_WAITOK|M_ZERO);
349
350 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
351
352 if (new_token == NULL) {
353 /* malloc failed! bail! */
354 printf("%s: unable to allocate pf token structure!", __func__);
355 return (0);
356 }
357
358 token_value = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)new_token);
359
360 new_token->token.token_value = token_value;
361 new_token->token.pid = proc_pid(p);
362 proc_name(new_token->token.pid, new_token->token.proc_name,
363 sizeof (new_token->token.proc_name));
364 new_token->token.timestamp = pf_calendar_time_second();
365
366 SLIST_INSERT_HEAD(&token_list_head, new_token, next);
367 nr_tokens++;
368
369 return (token_value);
370 }
371
372 static int
373 remove_token(struct pfioc_remove_token *tok)
374 {
375 struct pfioc_kernel_token *entry, *tmp;
376
377 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
378
379 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
380 if (tok->token_value == entry->token.token_value) {
381 SLIST_REMOVE(&token_list_head, entry,
382 pfioc_kernel_token, next);
383 _FREE(entry, M_TEMP);
384 nr_tokens--;
385 return (0); /* success */
386 }
387 }
388
389 printf("pf : remove failure\n");
390 return (ESRCH); /* failure */
391 }
392
393 static void
394 invalidate_all_tokens(void)
395 {
396 struct pfioc_kernel_token *entry, *tmp;
397
398 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
399
400 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
401 SLIST_REMOVE(&token_list_head, entry, pfioc_kernel_token, next);
402 _FREE(entry, M_TEMP);
403 }
404
405 nr_tokens = 0;
406 }
407
408 void
409 pfinit(void)
410 {
411 u_int32_t *t = pf_default_rule.timeout;
412 int maj;
413
414 pf_perim_lock_grp_attr = lck_grp_attr_alloc_init();
415 pf_perim_lock_grp = lck_grp_alloc_init("pf_perim",
416 pf_perim_lock_grp_attr);
417 pf_perim_lock_attr = lck_attr_alloc_init();
418 lck_rw_init(pf_perim_lock, pf_perim_lock_grp, pf_perim_lock_attr);
419
420 pf_lock_grp_attr = lck_grp_attr_alloc_init();
421 pf_lock_grp = lck_grp_alloc_init("pf", pf_lock_grp_attr);
422 pf_lock_attr = lck_attr_alloc_init();
423 lck_mtx_init(pf_lock, pf_lock_grp, pf_lock_attr);
424
425 pool_init(&pf_rule_pl, sizeof (struct pf_rule), 0, 0, 0, "pfrulepl",
426 NULL);
427 pool_init(&pf_src_tree_pl, sizeof (struct pf_src_node), 0, 0, 0,
428 "pfsrctrpl", NULL);
429 pool_init(&pf_state_pl, sizeof (struct pf_state), 0, 0, 0, "pfstatepl",
430 NULL);
431 pool_init(&pf_state_key_pl, sizeof (struct pf_state_key), 0, 0, 0,
432 "pfstatekeypl", NULL);
433 pool_init(&pf_app_state_pl, sizeof (struct pf_app_state), 0, 0, 0,
434 "pfappstatepl", NULL);
435 pool_init(&pf_pooladdr_pl, sizeof (struct pf_pooladdr), 0, 0, 0,
436 "pfpooladdrpl", NULL);
437 pfr_initialize();
438 pfi_initialize();
439 pf_osfp_initialize();
440
441 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
442 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
443
444 if (max_mem <= 256*1024*1024)
445 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
446 PFR_KENTRY_HIWAT_SMALL;
447
448 RB_INIT(&tree_src_tracking);
449 RB_INIT(&pf_anchors);
450 pf_init_ruleset(&pf_main_ruleset);
451 TAILQ_INIT(&pf_pabuf);
452 TAILQ_INIT(&state_list);
453
454 _CASSERT((SC_BE & SCIDX_MASK) == SCIDX_BE);
455 _CASSERT((SC_BK_SYS & SCIDX_MASK) == SCIDX_BK_SYS);
456 _CASSERT((SC_BK & SCIDX_MASK) == SCIDX_BK);
457 _CASSERT((SC_RD & SCIDX_MASK) == SCIDX_RD);
458 _CASSERT((SC_OAM & SCIDX_MASK) == SCIDX_OAM);
459 _CASSERT((SC_AV & SCIDX_MASK) == SCIDX_AV);
460 _CASSERT((SC_RV & SCIDX_MASK) == SCIDX_RV);
461 _CASSERT((SC_VI & SCIDX_MASK) == SCIDX_VI);
462 _CASSERT((SC_VO & SCIDX_MASK) == SCIDX_VO);
463 _CASSERT((SC_CTL & SCIDX_MASK) == SCIDX_CTL);
464
465 /* default rule should never be garbage collected */
466 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
467 pf_default_rule.action = PF_PASS;
468 pf_default_rule.nr = -1;
469 pf_default_rule.rtableid = IFSCOPE_NONE;
470
471 /* initialize default timeouts */
472 t[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
473 t[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
474 t[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
475 t[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
476 t[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
477 t[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
478 t[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
479 t[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
480 t[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
481 t[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
482 t[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
483 t[PFTM_GREv1_FIRST_PACKET] = PFTM_GREv1_FIRST_PACKET_VAL;
484 t[PFTM_GREv1_INITIATING] = PFTM_GREv1_INITIATING_VAL;
485 t[PFTM_GREv1_ESTABLISHED] = PFTM_GREv1_ESTABLISHED_VAL;
486 t[PFTM_ESP_FIRST_PACKET] = PFTM_ESP_FIRST_PACKET_VAL;
487 t[PFTM_ESP_INITIATING] = PFTM_ESP_INITIATING_VAL;
488 t[PFTM_ESP_ESTABLISHED] = PFTM_ESP_ESTABLISHED_VAL;
489 t[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
490 t[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
491 t[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
492 t[PFTM_FRAG] = PFTM_FRAG_VAL;
493 t[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
494 t[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
495 t[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
496 t[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
497 t[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
498
499 pf_normalize_init();
500 bzero(&pf_status, sizeof (pf_status));
501 pf_status.debug = PF_DEBUG_URGENT;
502 pf_hash_seed = RandomULong();
503
504 /* XXX do our best to avoid a conflict */
505 pf_status.hostid = random();
506
507 if (kernel_thread_start(pf_purge_thread_fn, NULL,
508 &pf_purge_thread) != 0) {
509 printf("%s: unable to start purge thread!", __func__);
510 return;
511 }
512
513 maj = cdevsw_add(PF_CDEV_MAJOR, &pf_cdevsw);
514 if (maj == -1) {
515 printf("%s: failed to allocate major number!\n", __func__);
516 return;
517 }
518 (void) devfs_make_node(makedev(maj, PFDEV_PF), DEVFS_CHAR,
519 UID_ROOT, GID_WHEEL, 0600, "pf", 0);
520
521 (void) devfs_make_node(makedev(maj, PFDEV_PFM), DEVFS_CHAR,
522 UID_ROOT, GID_WHEEL, 0600, "pfm", 0);
523
524 pf_attach_hooks();
525 #if DUMMYNET
526 dummynet_init();
527 #endif
528 }
529
530 #if 0
531 static void
532 pfdetach(void)
533 {
534 struct pf_anchor *anchor;
535 struct pf_state *state;
536 struct pf_src_node *node;
537 struct pfioc_table pt;
538 u_int32_t ticket;
539 int i;
540 char r = '\0';
541
542 pf_detach_hooks();
543
544 pf_status.running = 0;
545 wakeup(pf_purge_thread_fn);
546
547 /* clear the rulesets */
548 for (i = 0; i < PF_RULESET_MAX; i++)
549 if (pf_begin_rules(&ticket, i, &r) == 0)
550 pf_commit_rules(ticket, i, &r);
551
552 /* clear states */
553 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
554 state->timeout = PFTM_PURGE;
555 #if NPFSYNC
556 state->sync_flags = PFSTATE_NOSYNC;
557 #endif
558 }
559 pf_purge_expired_states(pf_status.states);
560
561 #if NPFSYNC
562 pfsync_clear_states(pf_status.hostid, NULL);
563 #endif
564
565 /* clear source nodes */
566 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
567 state->src_node = NULL;
568 state->nat_src_node = NULL;
569 }
570 RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
571 node->expire = 1;
572 node->states = 0;
573 }
574 pf_purge_expired_src_nodes();
575
576 /* clear tables */
577 memset(&pt, '\0', sizeof (pt));
578 pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
579
580 /* destroy anchors */
581 while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
582 for (i = 0; i < PF_RULESET_MAX; i++)
583 if (pf_begin_rules(&ticket, i, anchor->name) == 0)
584 pf_commit_rules(ticket, i, anchor->name);
585 }
586
587 /* destroy main ruleset */
588 pf_remove_if_empty_ruleset(&pf_main_ruleset);
589
590 /* destroy the pools */
591 pool_destroy(&pf_pooladdr_pl);
592 pool_destroy(&pf_state_pl);
593 pool_destroy(&pf_rule_pl);
594 pool_destroy(&pf_src_tree_pl);
595
596 /* destroy subsystems */
597 pf_normalize_destroy();
598 pf_osfp_destroy();
599 pfr_destroy();
600 pfi_destroy();
601 }
602 #endif
603
604 static int
605 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
606 {
607 #pragma unused(flags, fmt, p)
608 if (minor(dev) >= PFDEV_MAX)
609 return (ENXIO);
610
611 if (minor(dev) == PFDEV_PFM) {
612 lck_mtx_lock(pf_lock);
613 if (pfdevcnt != 0) {
614 lck_mtx_unlock(pf_lock);
615 return (EBUSY);
616 }
617 pfdevcnt++;
618 lck_mtx_unlock(pf_lock);
619 }
620 return (0);
621 }
622
623 static int
624 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
625 {
626 #pragma unused(flags, fmt, p)
627 if (minor(dev) >= PFDEV_MAX)
628 return (ENXIO);
629
630 if (minor(dev) == PFDEV_PFM) {
631 lck_mtx_lock(pf_lock);
632 VERIFY(pfdevcnt > 0);
633 pfdevcnt--;
634 lck_mtx_unlock(pf_lock);
635 }
636 return (0);
637 }
638
639 static struct pf_pool *
640 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
641 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
642 u_int8_t check_ticket)
643 {
644 struct pf_ruleset *ruleset;
645 struct pf_rule *rule;
646 int rs_num;
647
648 ruleset = pf_find_ruleset(anchor);
649 if (ruleset == NULL)
650 return (NULL);
651 rs_num = pf_get_ruleset_number(rule_action);
652 if (rs_num >= PF_RULESET_MAX)
653 return (NULL);
654 if (active) {
655 if (check_ticket && ticket !=
656 ruleset->rules[rs_num].active.ticket)
657 return (NULL);
658 if (r_last)
659 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
660 pf_rulequeue);
661 else
662 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
663 } else {
664 if (check_ticket && ticket !=
665 ruleset->rules[rs_num].inactive.ticket)
666 return (NULL);
667 if (r_last)
668 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
669 pf_rulequeue);
670 else
671 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
672 }
673 if (!r_last) {
674 while ((rule != NULL) && (rule->nr != rule_number))
675 rule = TAILQ_NEXT(rule, entries);
676 }
677 if (rule == NULL)
678 return (NULL);
679
680 return (&rule->rpool);
681 }
682
683 static void
684 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
685 {
686 struct pf_pooladdr *mv_pool_pa;
687
688 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
689 TAILQ_REMOVE(poola, mv_pool_pa, entries);
690 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
691 }
692 }
693
694 static void
695 pf_empty_pool(struct pf_palist *poola)
696 {
697 struct pf_pooladdr *empty_pool_pa;
698
699 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
700 pfi_dynaddr_remove(&empty_pool_pa->addr);
701 pf_tbladdr_remove(&empty_pool_pa->addr);
702 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
703 TAILQ_REMOVE(poola, empty_pool_pa, entries);
704 pool_put(&pf_pooladdr_pl, empty_pool_pa);
705 }
706 }
707
708 void
709 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
710 {
711 if (rulequeue != NULL) {
712 if (rule->states <= 0) {
713 /*
714 * XXX - we need to remove the table *before* detaching
715 * the rule to make sure the table code does not delete
716 * the anchor under our feet.
717 */
718 pf_tbladdr_remove(&rule->src.addr);
719 pf_tbladdr_remove(&rule->dst.addr);
720 if (rule->overload_tbl)
721 pfr_detach_table(rule->overload_tbl);
722 }
723 TAILQ_REMOVE(rulequeue, rule, entries);
724 rule->entries.tqe_prev = NULL;
725 rule->nr = -1;
726 }
727
728 if (rule->states > 0 || rule->src_nodes > 0 ||
729 rule->entries.tqe_prev != NULL)
730 return;
731 pf_tag_unref(rule->tag);
732 pf_tag_unref(rule->match_tag);
733 pf_rtlabel_remove(&rule->src.addr);
734 pf_rtlabel_remove(&rule->dst.addr);
735 pfi_dynaddr_remove(&rule->src.addr);
736 pfi_dynaddr_remove(&rule->dst.addr);
737 if (rulequeue == NULL) {
738 pf_tbladdr_remove(&rule->src.addr);
739 pf_tbladdr_remove(&rule->dst.addr);
740 if (rule->overload_tbl)
741 pfr_detach_table(rule->overload_tbl);
742 }
743 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
744 pf_anchor_remove(rule);
745 pf_empty_pool(&rule->rpool.list);
746 pool_put(&pf_rule_pl, rule);
747 }
748
749 static u_int16_t
750 tagname2tag(struct pf_tags *head, char *tagname)
751 {
752 struct pf_tagname *tag, *p = NULL;
753 u_int16_t new_tagid = 1;
754
755 TAILQ_FOREACH(tag, head, entries)
756 if (strcmp(tagname, tag->name) == 0) {
757 tag->ref++;
758 return (tag->tag);
759 }
760
761 /*
762 * to avoid fragmentation, we do a linear search from the beginning
763 * and take the first free slot we find. if there is none or the list
764 * is empty, append a new entry at the end.
765 */
766
767 /* new entry */
768 if (!TAILQ_EMPTY(head))
769 for (p = TAILQ_FIRST(head); p != NULL &&
770 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
771 new_tagid = p->tag + 1;
772
773 if (new_tagid > TAGID_MAX)
774 return (0);
775
776 /* allocate and fill new struct pf_tagname */
777 tag = _MALLOC(sizeof (*tag), M_TEMP, M_WAITOK|M_ZERO);
778 if (tag == NULL)
779 return (0);
780 strlcpy(tag->name, tagname, sizeof (tag->name));
781 tag->tag = new_tagid;
782 tag->ref++;
783
784 if (p != NULL) /* insert new entry before p */
785 TAILQ_INSERT_BEFORE(p, tag, entries);
786 else /* either list empty or no free slot in between */
787 TAILQ_INSERT_TAIL(head, tag, entries);
788
789 return (tag->tag);
790 }
791
792 static void
793 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
794 {
795 struct pf_tagname *tag;
796
797 TAILQ_FOREACH(tag, head, entries)
798 if (tag->tag == tagid) {
799 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
800 return;
801 }
802 }
803
804 static void
805 tag_unref(struct pf_tags *head, u_int16_t tag)
806 {
807 struct pf_tagname *p, *next;
808
809 if (tag == 0)
810 return;
811
812 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
813 next = TAILQ_NEXT(p, entries);
814 if (tag == p->tag) {
815 if (--p->ref == 0) {
816 TAILQ_REMOVE(head, p, entries);
817 _FREE(p, M_TEMP);
818 }
819 break;
820 }
821 }
822 }
823
824 u_int16_t
825 pf_tagname2tag(char *tagname)
826 {
827 return (tagname2tag(&pf_tags, tagname));
828 }
829
830 void
831 pf_tag2tagname(u_int16_t tagid, char *p)
832 {
833 tag2tagname(&pf_tags, tagid, p);
834 }
835
836 void
837 pf_tag_ref(u_int16_t tag)
838 {
839 struct pf_tagname *t;
840
841 TAILQ_FOREACH(t, &pf_tags, entries)
842 if (t->tag == tag)
843 break;
844 if (t != NULL)
845 t->ref++;
846 }
847
848 void
849 pf_tag_unref(u_int16_t tag)
850 {
851 tag_unref(&pf_tags, tag);
852 }
853
854 static int
855 pf_rtlabel_add(struct pf_addr_wrap *a)
856 {
857 #pragma unused(a)
858 return (0);
859 }
860
861 static void
862 pf_rtlabel_remove(struct pf_addr_wrap *a)
863 {
864 #pragma unused(a)
865 }
866
867 static void
868 pf_rtlabel_copyout(struct pf_addr_wrap *a)
869 {
870 #pragma unused(a)
871 }
872
873 static int
874 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
875 {
876 struct pf_ruleset *rs;
877 struct pf_rule *rule;
878
879 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
880 return (EINVAL);
881 rs = pf_find_or_create_ruleset(anchor);
882 if (rs == NULL)
883 return (EINVAL);
884 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
885 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
886 rs->rules[rs_num].inactive.rcount--;
887 }
888 *ticket = ++rs->rules[rs_num].inactive.ticket;
889 rs->rules[rs_num].inactive.open = 1;
890 return (0);
891 }
892
893 static int
894 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
895 {
896 struct pf_ruleset *rs;
897 struct pf_rule *rule;
898
899 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
900 return (EINVAL);
901 rs = pf_find_ruleset(anchor);
902 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
903 rs->rules[rs_num].inactive.ticket != ticket)
904 return (0);
905 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
906 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
907 rs->rules[rs_num].inactive.rcount--;
908 }
909 rs->rules[rs_num].inactive.open = 0;
910 return (0);
911 }
912
913 #define PF_MD5_UPD(st, elm) \
914 MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm))
915
916 #define PF_MD5_UPD_STR(st, elm) \
917 MD5Update(ctx, (u_int8_t *)(st)->elm, strlen((st)->elm))
918
919 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
920 (stor) = htonl((st)->elm); \
921 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t)); \
922 } while (0)
923
924 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
925 (stor) = htons((st)->elm); \
926 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t)); \
927 } while (0)
928
929 static void
930 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr, u_int8_t proto)
931 {
932 PF_MD5_UPD(pfr, addr.type);
933 switch (pfr->addr.type) {
934 case PF_ADDR_DYNIFTL:
935 PF_MD5_UPD(pfr, addr.v.ifname);
936 PF_MD5_UPD(pfr, addr.iflags);
937 break;
938 case PF_ADDR_TABLE:
939 PF_MD5_UPD(pfr, addr.v.tblname);
940 break;
941 case PF_ADDR_ADDRMASK:
942 /* XXX ignore af? */
943 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
944 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
945 break;
946 case PF_ADDR_RTLABEL:
947 PF_MD5_UPD(pfr, addr.v.rtlabelname);
948 break;
949 }
950
951 switch (proto) {
952 case IPPROTO_TCP:
953 case IPPROTO_UDP:
954 PF_MD5_UPD(pfr, xport.range.port[0]);
955 PF_MD5_UPD(pfr, xport.range.port[1]);
956 PF_MD5_UPD(pfr, xport.range.op);
957 break;
958
959 default:
960 break;
961 }
962
963 PF_MD5_UPD(pfr, neg);
964 }
965
966 static void
967 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
968 {
969 u_int16_t x;
970 u_int32_t y;
971
972 pf_hash_rule_addr(ctx, &rule->src, rule->proto);
973 pf_hash_rule_addr(ctx, &rule->dst, rule->proto);
974 PF_MD5_UPD_STR(rule, label);
975 PF_MD5_UPD_STR(rule, ifname);
976 PF_MD5_UPD_STR(rule, match_tagname);
977 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
978 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
979 PF_MD5_UPD_HTONL(rule, prob, y);
980 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
981 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
982 PF_MD5_UPD(rule, uid.op);
983 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
984 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
985 PF_MD5_UPD(rule, gid.op);
986 PF_MD5_UPD_HTONL(rule, rule_flag, y);
987 PF_MD5_UPD(rule, action);
988 PF_MD5_UPD(rule, direction);
989 PF_MD5_UPD(rule, af);
990 PF_MD5_UPD(rule, quick);
991 PF_MD5_UPD(rule, ifnot);
992 PF_MD5_UPD(rule, match_tag_not);
993 PF_MD5_UPD(rule, natpass);
994 PF_MD5_UPD(rule, keep_state);
995 PF_MD5_UPD(rule, proto);
996 PF_MD5_UPD(rule, type);
997 PF_MD5_UPD(rule, code);
998 PF_MD5_UPD(rule, flags);
999 PF_MD5_UPD(rule, flagset);
1000 PF_MD5_UPD(rule, allow_opts);
1001 PF_MD5_UPD(rule, rt);
1002 PF_MD5_UPD(rule, tos);
1003 }
1004
1005 static int
1006 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1007 {
1008 struct pf_ruleset *rs;
1009 struct pf_rule *rule, **old_array, *r;
1010 struct pf_rulequeue *old_rules;
1011 int error;
1012 u_int32_t old_rcount;
1013
1014 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1015
1016 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1017 return (EINVAL);
1018 rs = pf_find_ruleset(anchor);
1019 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1020 ticket != rs->rules[rs_num].inactive.ticket)
1021 return (EBUSY);
1022
1023 /* Calculate checksum for the main ruleset */
1024 if (rs == &pf_main_ruleset) {
1025 error = pf_setup_pfsync_matching(rs);
1026 if (error != 0)
1027 return (error);
1028 }
1029
1030 /* Swap rules, keep the old. */
1031 old_rules = rs->rules[rs_num].active.ptr;
1032 old_rcount = rs->rules[rs_num].active.rcount;
1033 old_array = rs->rules[rs_num].active.ptr_array;
1034
1035 if(old_rcount != 0) {
1036 r = TAILQ_FIRST(rs->rules[rs_num].active.ptr);
1037 while (r) {
1038 if (r->rule_flag & PFRULE_PFM)
1039 pffwrules--;
1040 r = TAILQ_NEXT(r, entries);
1041 }
1042 }
1043
1044
1045 rs->rules[rs_num].active.ptr =
1046 rs->rules[rs_num].inactive.ptr;
1047 rs->rules[rs_num].active.ptr_array =
1048 rs->rules[rs_num].inactive.ptr_array;
1049 rs->rules[rs_num].active.rcount =
1050 rs->rules[rs_num].inactive.rcount;
1051 rs->rules[rs_num].inactive.ptr = old_rules;
1052 rs->rules[rs_num].inactive.ptr_array = old_array;
1053 rs->rules[rs_num].inactive.rcount = old_rcount;
1054
1055 rs->rules[rs_num].active.ticket =
1056 rs->rules[rs_num].inactive.ticket;
1057 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1058
1059
1060 /* Purge the old rule list. */
1061 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1062 pf_rm_rule(old_rules, rule);
1063 if (rs->rules[rs_num].inactive.ptr_array)
1064 _FREE(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1065 rs->rules[rs_num].inactive.ptr_array = NULL;
1066 rs->rules[rs_num].inactive.rcount = 0;
1067 rs->rules[rs_num].inactive.open = 0;
1068 pf_remove_if_empty_ruleset(rs);
1069 return (0);
1070 }
1071
1072 static void
1073 pf_rule_copyin(struct pf_rule *src, struct pf_rule *dst, struct proc *p,
1074 int minordev)
1075 {
1076 bcopy(src, dst, sizeof (struct pf_rule));
1077
1078 dst->label[sizeof (dst->label) - 1] = '\0';
1079 dst->ifname[sizeof (dst->ifname) - 1] = '\0';
1080 dst->qname[sizeof (dst->qname) - 1] = '\0';
1081 dst->pqname[sizeof (dst->pqname) - 1] = '\0';
1082 dst->tagname[sizeof (dst->tagname) - 1] = '\0';
1083 dst->match_tagname[sizeof (dst->match_tagname) - 1] = '\0';
1084 dst->overload_tblname[sizeof (dst->overload_tblname) - 1] = '\0';
1085
1086 dst->cuid = kauth_cred_getuid(p->p_ucred);
1087 dst->cpid = p->p_pid;
1088
1089 dst->anchor = NULL;
1090 dst->kif = NULL;
1091 dst->overload_tbl = NULL;
1092
1093 TAILQ_INIT(&dst->rpool.list);
1094 dst->rpool.cur = NULL;
1095
1096 /* initialize refcounting */
1097 dst->states = 0;
1098 dst->src_nodes = 0;
1099
1100 dst->entries.tqe_prev = NULL;
1101 dst->entries.tqe_next = NULL;
1102 if ((uint8_t)minordev == PFDEV_PFM)
1103 dst->rule_flag |= PFRULE_PFM;
1104 }
1105
1106 static void
1107 pf_rule_copyout(struct pf_rule *src, struct pf_rule *dst)
1108 {
1109 bcopy(src, dst, sizeof (struct pf_rule));
1110
1111 dst->anchor = NULL;
1112 dst->kif = NULL;
1113 dst->overload_tbl = NULL;
1114
1115 TAILQ_INIT(&dst->rpool.list);
1116 dst->rpool.cur = NULL;
1117
1118 dst->entries.tqe_prev = NULL;
1119 dst->entries.tqe_next = NULL;
1120 }
1121
1122 static void
1123 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk,
1124 struct pf_state *s)
1125 {
1126 uint64_t secs = pf_time_second();
1127 bzero(sp, sizeof (struct pfsync_state));
1128
1129 /* copy from state key */
1130 sp->lan.addr = sk->lan.addr;
1131 sp->lan.xport = sk->lan.xport;
1132 sp->gwy.addr = sk->gwy.addr;
1133 sp->gwy.xport = sk->gwy.xport;
1134 sp->ext_lan.addr = sk->ext_lan.addr;
1135 sp->ext_lan.xport = sk->ext_lan.xport;
1136 sp->ext_gwy.addr = sk->ext_gwy.addr;
1137 sp->ext_gwy.xport = sk->ext_gwy.xport;
1138 sp->proto_variant = sk->proto_variant;
1139 sp->tag = s->tag;
1140 sp->proto = sk->proto;
1141 sp->af_lan = sk->af_lan;
1142 sp->af_gwy = sk->af_gwy;
1143 sp->direction = sk->direction;
1144 sp->flowhash = sk->flowhash;
1145
1146 /* copy from state */
1147 memcpy(&sp->id, &s->id, sizeof (sp->id));
1148 sp->creatorid = s->creatorid;
1149 strlcpy(sp->ifname, s->kif->pfik_name, sizeof (sp->ifname));
1150 pf_state_peer_to_pfsync(&s->src, &sp->src);
1151 pf_state_peer_to_pfsync(&s->dst, &sp->dst);
1152
1153 sp->rule = s->rule.ptr->nr;
1154 sp->nat_rule = (s->nat_rule.ptr == NULL) ?
1155 (unsigned)-1 : s->nat_rule.ptr->nr;
1156 sp->anchor = (s->anchor.ptr == NULL) ?
1157 (unsigned)-1 : s->anchor.ptr->nr;
1158
1159 pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]);
1160 pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]);
1161 pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]);
1162 pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]);
1163 sp->creation = secs - s->creation;
1164 sp->expire = pf_state_expires(s);
1165 sp->log = s->log;
1166 sp->allow_opts = s->allow_opts;
1167 sp->timeout = s->timeout;
1168
1169 if (s->src_node)
1170 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
1171 if (s->nat_src_node)
1172 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
1173
1174 if (sp->expire > secs)
1175 sp->expire -= secs;
1176 else
1177 sp->expire = 0;
1178
1179 }
1180
1181 static void
1182 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk,
1183 struct pf_state *s)
1184 {
1185 /* copy to state key */
1186 sk->lan.addr = sp->lan.addr;
1187 sk->lan.xport = sp->lan.xport;
1188 sk->gwy.addr = sp->gwy.addr;
1189 sk->gwy.xport = sp->gwy.xport;
1190 sk->ext_lan.addr = sp->ext_lan.addr;
1191 sk->ext_lan.xport = sp->ext_lan.xport;
1192 sk->ext_gwy.addr = sp->ext_gwy.addr;
1193 sk->ext_gwy.xport = sp->ext_gwy.xport;
1194 sk->proto_variant = sp->proto_variant;
1195 s->tag = sp->tag;
1196 sk->proto = sp->proto;
1197 sk->af_lan = sp->af_lan;
1198 sk->af_gwy = sp->af_gwy;
1199 sk->direction = sp->direction;
1200 sk->flowhash = pf_calc_state_key_flowhash(sk);
1201
1202 /* copy to state */
1203 memcpy(&s->id, &sp->id, sizeof (sp->id));
1204 s->creatorid = sp->creatorid;
1205 pf_state_peer_from_pfsync(&sp->src, &s->src);
1206 pf_state_peer_from_pfsync(&sp->dst, &s->dst);
1207
1208 s->rule.ptr = &pf_default_rule;
1209 s->nat_rule.ptr = NULL;
1210 s->anchor.ptr = NULL;
1211 s->rt_kif = NULL;
1212 s->creation = pf_time_second();
1213 s->expire = pf_time_second();
1214 if (sp->expire > 0)
1215 s->expire -= pf_default_rule.timeout[sp->timeout] - sp->expire;
1216 s->pfsync_time = 0;
1217 s->packets[0] = s->packets[1] = 0;
1218 s->bytes[0] = s->bytes[1] = 0;
1219 }
1220
1221 static void
1222 pf_pooladdr_copyin(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1223 {
1224 bcopy(src, dst, sizeof (struct pf_pooladdr));
1225
1226 dst->entries.tqe_prev = NULL;
1227 dst->entries.tqe_next = NULL;
1228 dst->ifname[sizeof (dst->ifname) - 1] = '\0';
1229 dst->kif = NULL;
1230 }
1231
1232 static void
1233 pf_pooladdr_copyout(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1234 {
1235 bcopy(src, dst, sizeof (struct pf_pooladdr));
1236
1237 dst->entries.tqe_prev = NULL;
1238 dst->entries.tqe_next = NULL;
1239 dst->kif = NULL;
1240 }
1241
1242 static int
1243 pf_setup_pfsync_matching(struct pf_ruleset *rs)
1244 {
1245 MD5_CTX ctx;
1246 struct pf_rule *rule;
1247 int rs_cnt;
1248 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
1249
1250 MD5Init(&ctx);
1251 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1252 /* XXX PF_RULESET_SCRUB as well? */
1253 if (rs_cnt == PF_RULESET_SCRUB)
1254 continue;
1255
1256 if (rs->rules[rs_cnt].inactive.ptr_array)
1257 _FREE(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1258 rs->rules[rs_cnt].inactive.ptr_array = NULL;
1259
1260 if (rs->rules[rs_cnt].inactive.rcount) {
1261 rs->rules[rs_cnt].inactive.ptr_array =
1262 _MALLOC(sizeof (caddr_t) *
1263 rs->rules[rs_cnt].inactive.rcount,
1264 M_TEMP, M_WAITOK);
1265
1266 if (!rs->rules[rs_cnt].inactive.ptr_array)
1267 return (ENOMEM);
1268 }
1269
1270 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1271 entries) {
1272 pf_hash_rule(&ctx, rule);
1273 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1274 }
1275 }
1276
1277 MD5Final(digest, &ctx);
1278 memcpy(pf_status.pf_chksum, digest, sizeof (pf_status.pf_chksum));
1279 return (0);
1280 }
1281
1282 static void
1283 pf_start(void)
1284 {
1285 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1286
1287 VERIFY(pf_is_enabled == 0);
1288
1289 pf_is_enabled = 1;
1290 pf_status.running = 1;
1291 pf_status.since = pf_calendar_time_second();
1292 if (pf_status.stateid == 0) {
1293 pf_status.stateid = pf_time_second();
1294 pf_status.stateid = pf_status.stateid << 32;
1295 }
1296 wakeup(pf_purge_thread_fn);
1297 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1298 }
1299
1300 static void
1301 pf_stop(void)
1302 {
1303 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1304
1305 VERIFY(pf_is_enabled);
1306
1307 pf_status.running = 0;
1308 pf_is_enabled = 0;
1309 pf_status.since = pf_calendar_time_second();
1310 wakeup(pf_purge_thread_fn);
1311 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1312 }
1313
1314 static int
1315 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1316 {
1317 #pragma unused(dev)
1318 int p64 = proc_is64bit(p);
1319 int error = 0;
1320 int minordev = minor(dev);
1321
1322 if (kauth_cred_issuser(kauth_cred_get()) == 0)
1323 return (EPERM);
1324
1325 /* XXX keep in sync with switch() below */
1326 if (securelevel > 1)
1327 switch (cmd) {
1328 case DIOCGETRULES:
1329 case DIOCGETRULE:
1330 case DIOCGETADDRS:
1331 case DIOCGETADDR:
1332 case DIOCGETSTATE:
1333 case DIOCSETSTATUSIF:
1334 case DIOCGETSTATUS:
1335 case DIOCCLRSTATUS:
1336 case DIOCNATLOOK:
1337 case DIOCSETDEBUG:
1338 case DIOCGETSTATES:
1339 case DIOCINSERTRULE:
1340 case DIOCDELETERULE:
1341 case DIOCGETTIMEOUT:
1342 case DIOCCLRRULECTRS:
1343 case DIOCGETLIMIT:
1344 case DIOCGETALTQS:
1345 case DIOCGETALTQ:
1346 case DIOCGETQSTATS:
1347 case DIOCGETRULESETS:
1348 case DIOCGETRULESET:
1349 case DIOCRGETTABLES:
1350 case DIOCRGETTSTATS:
1351 case DIOCRCLRTSTATS:
1352 case DIOCRCLRADDRS:
1353 case DIOCRADDADDRS:
1354 case DIOCRDELADDRS:
1355 case DIOCRSETADDRS:
1356 case DIOCRGETADDRS:
1357 case DIOCRGETASTATS:
1358 case DIOCRCLRASTATS:
1359 case DIOCRTSTADDRS:
1360 case DIOCOSFPGET:
1361 case DIOCGETSRCNODES:
1362 case DIOCCLRSRCNODES:
1363 case DIOCIGETIFACES:
1364 case DIOCGIFSPEED:
1365 case DIOCSETIFFLAG:
1366 case DIOCCLRIFFLAG:
1367 break;
1368 case DIOCRCLRTABLES:
1369 case DIOCRADDTABLES:
1370 case DIOCRDELTABLES:
1371 case DIOCRSETTFLAGS: {
1372 int pfrio_flags;
1373
1374 bcopy(&((struct pfioc_table *)(void *)addr)->
1375 pfrio_flags, &pfrio_flags, sizeof (pfrio_flags));
1376
1377 if (pfrio_flags & PFR_FLAG_DUMMY)
1378 break; /* dummy operation ok */
1379 return (EPERM);
1380 }
1381 default:
1382 return (EPERM);
1383 }
1384
1385 if (!(flags & FWRITE))
1386 switch (cmd) {
1387 case DIOCSTART:
1388 case DIOCSTARTREF:
1389 case DIOCSTOP:
1390 case DIOCSTOPREF:
1391 case DIOCGETSTARTERS:
1392 case DIOCGETRULES:
1393 case DIOCGETADDRS:
1394 case DIOCGETADDR:
1395 case DIOCGETSTATE:
1396 case DIOCGETSTATUS:
1397 case DIOCGETSTATES:
1398 case DIOCINSERTRULE:
1399 case DIOCDELETERULE:
1400 case DIOCGETTIMEOUT:
1401 case DIOCGETLIMIT:
1402 case DIOCGETALTQS:
1403 case DIOCGETALTQ:
1404 case DIOCGETQSTATS:
1405 case DIOCGETRULESETS:
1406 case DIOCGETRULESET:
1407 case DIOCNATLOOK:
1408 case DIOCRGETTABLES:
1409 case DIOCRGETTSTATS:
1410 case DIOCRGETADDRS:
1411 case DIOCRGETASTATS:
1412 case DIOCRTSTADDRS:
1413 case DIOCOSFPGET:
1414 case DIOCGETSRCNODES:
1415 case DIOCIGETIFACES:
1416 case DIOCGIFSPEED:
1417 break;
1418 case DIOCRCLRTABLES:
1419 case DIOCRADDTABLES:
1420 case DIOCRDELTABLES:
1421 case DIOCRCLRTSTATS:
1422 case DIOCRCLRADDRS:
1423 case DIOCRADDADDRS:
1424 case DIOCRDELADDRS:
1425 case DIOCRSETADDRS:
1426 case DIOCRSETTFLAGS: {
1427 int pfrio_flags;
1428
1429 bcopy(&((struct pfioc_table *)(void *)addr)->
1430 pfrio_flags, &pfrio_flags, sizeof (pfrio_flags));
1431
1432 if (pfrio_flags & PFR_FLAG_DUMMY) {
1433 flags |= FWRITE; /* need write lock for dummy */
1434 break; /* dummy operation ok */
1435 }
1436 return (EACCES);
1437 }
1438 case DIOCGETRULE: {
1439 u_int32_t action;
1440
1441 bcopy(&((struct pfioc_rule *)(void *)addr)->action,
1442 &action, sizeof (action));
1443
1444 if (action == PF_GET_CLR_CNTR)
1445 return (EACCES);
1446 break;
1447 }
1448 default:
1449 return (EACCES);
1450 }
1451
1452 if (flags & FWRITE)
1453 lck_rw_lock_exclusive(pf_perim_lock);
1454 else
1455 lck_rw_lock_shared(pf_perim_lock);
1456
1457 lck_mtx_lock(pf_lock);
1458
1459 switch (cmd) {
1460
1461 case DIOCSTART:
1462 if (pf_status.running) {
1463 /*
1464 * Increment the reference for a simple -e enable, so
1465 * that even if other processes drop their references,
1466 * pf will still be available to processes that turned
1467 * it on without taking a reference
1468 */
1469 if (nr_tokens == pf_enabled_ref_count) {
1470 pf_enabled_ref_count++;
1471 VERIFY(pf_enabled_ref_count != 0);
1472 }
1473 error = EEXIST;
1474 } else if (pf_purge_thread == NULL) {
1475 error = ENOMEM;
1476 } else {
1477 pf_start();
1478 pf_enabled_ref_count++;
1479 VERIFY(pf_enabled_ref_count != 0);
1480 }
1481 break;
1482
1483 case DIOCSTARTREF: /* u_int64_t */
1484 if (pf_purge_thread == NULL) {
1485 error = ENOMEM;
1486 } else {
1487 u_int64_t token;
1488
1489 /* small enough to be on stack */
1490 if ((token = generate_token(p)) != 0) {
1491 if (pf_is_enabled == 0) {
1492 pf_start();
1493 }
1494 pf_enabled_ref_count++;
1495 VERIFY(pf_enabled_ref_count != 0);
1496 } else {
1497 error = ENOMEM;
1498 DPFPRINTF(PF_DEBUG_URGENT,
1499 ("pf: unable to generate token\n"));
1500 }
1501 bcopy(&token, addr, sizeof (token));
1502 }
1503 break;
1504
1505 case DIOCSTOP:
1506 if (!pf_status.running) {
1507 error = ENOENT;
1508 } else {
1509 pf_stop();
1510 pf_enabled_ref_count = 0;
1511 invalidate_all_tokens();
1512 }
1513 break;
1514
1515 case DIOCSTOPREF: /* struct pfioc_remove_token */
1516 if (!pf_status.running) {
1517 error = ENOENT;
1518 } else {
1519 struct pfioc_remove_token pfrt;
1520
1521 /* small enough to be on stack */
1522 bcopy(addr, &pfrt, sizeof (pfrt));
1523 if ((error = remove_token(&pfrt)) == 0) {
1524 VERIFY(pf_enabled_ref_count != 0);
1525 pf_enabled_ref_count--;
1526 /* return currently held references */
1527 pfrt.refcount = pf_enabled_ref_count;
1528 DPFPRINTF(PF_DEBUG_MISC,
1529 ("pf: enabled refcount decremented\n"));
1530 } else {
1531 error = EINVAL;
1532 DPFPRINTF(PF_DEBUG_URGENT,
1533 ("pf: token mismatch\n"));
1534 }
1535 bcopy(&pfrt, addr, sizeof (pfrt));
1536
1537 if (error == 0 && pf_enabled_ref_count == 0)
1538 pf_stop();
1539 }
1540 break;
1541
1542 case DIOCGETSTARTERS: { /* struct pfioc_tokens */
1543 PFIOCX_STRUCT_DECL(pfioc_tokens);
1544
1545 PFIOCX_STRUCT_BEGIN(addr, pfioc_tokens, error = ENOMEM; break;);
1546 error = pfioctl_ioc_tokens(cmd,
1547 PFIOCX_STRUCT_ADDR32(pfioc_tokens),
1548 PFIOCX_STRUCT_ADDR64(pfioc_tokens), p);
1549 PFIOCX_STRUCT_END(pfioc_tokens, addr);
1550 break;
1551 }
1552
1553 case DIOCADDRULE: /* struct pfioc_rule */
1554 case DIOCGETRULES: /* struct pfioc_rule */
1555 case DIOCGETRULE: /* struct pfioc_rule */
1556 case DIOCCHANGERULE: /* struct pfioc_rule */
1557 case DIOCINSERTRULE: /* struct pfioc_rule */
1558 case DIOCDELETERULE: { /* struct pfioc_rule */
1559 struct pfioc_rule *pr = NULL;
1560
1561 PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break;);
1562 error = pfioctl_ioc_rule(cmd, minordev, pr, p);
1563 PFIOC_STRUCT_END(pr, addr);
1564 break;
1565 }
1566
1567 case DIOCCLRSTATES: /* struct pfioc_state_kill */
1568 case DIOCKILLSTATES: { /* struct pfioc_state_kill */
1569 struct pfioc_state_kill *psk = NULL;
1570
1571 PFIOC_STRUCT_BEGIN(addr, psk, error = ENOMEM; break;);
1572 error = pfioctl_ioc_state_kill(cmd, psk, p);
1573 PFIOC_STRUCT_END(psk, addr);
1574 break;
1575 }
1576
1577 case DIOCADDSTATE: /* struct pfioc_state */
1578 case DIOCGETSTATE: { /* struct pfioc_state */
1579 struct pfioc_state *ps = NULL;
1580
1581 PFIOC_STRUCT_BEGIN(addr, ps, error = ENOMEM; break;);
1582 error = pfioctl_ioc_state(cmd, ps, p);
1583 PFIOC_STRUCT_END(ps, addr);
1584 break;
1585 }
1586
1587 case DIOCGETSTATES: { /* struct pfioc_states */
1588 PFIOCX_STRUCT_DECL(pfioc_states);
1589
1590 PFIOCX_STRUCT_BEGIN(addr, pfioc_states, error = ENOMEM; break;);
1591 error = pfioctl_ioc_states(cmd,
1592 PFIOCX_STRUCT_ADDR32(pfioc_states),
1593 PFIOCX_STRUCT_ADDR64(pfioc_states), p);
1594 PFIOCX_STRUCT_END(pfioc_states, addr);
1595 break;
1596 }
1597
1598 case DIOCGETSTATUS: { /* struct pf_status */
1599 struct pf_status *s = NULL;
1600
1601 PFIOC_STRUCT_BEGIN(&pf_status, s, error = ENOMEM; break;);
1602 pfi_update_status(s->ifname, s);
1603 PFIOC_STRUCT_END(s, addr);
1604 break;
1605 }
1606
1607 case DIOCSETSTATUSIF: { /* struct pfioc_if */
1608 struct pfioc_if *pi = (struct pfioc_if *)(void *)addr;
1609
1610 /* OK for unaligned accesses */
1611 if (pi->ifname[0] == 0) {
1612 bzero(pf_status.ifname, IFNAMSIZ);
1613 break;
1614 }
1615 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1616 break;
1617 }
1618
1619 case DIOCCLRSTATUS: {
1620 bzero(pf_status.counters, sizeof (pf_status.counters));
1621 bzero(pf_status.fcounters, sizeof (pf_status.fcounters));
1622 bzero(pf_status.scounters, sizeof (pf_status.scounters));
1623 pf_status.since = pf_calendar_time_second();
1624 if (*pf_status.ifname)
1625 pfi_update_status(pf_status.ifname, NULL);
1626 break;
1627 }
1628
1629 case DIOCNATLOOK: { /* struct pfioc_natlook */
1630 struct pfioc_natlook *pnl = NULL;
1631
1632 PFIOC_STRUCT_BEGIN(addr, pnl, error = ENOMEM; break;);
1633 error = pfioctl_ioc_natlook(cmd, pnl, p);
1634 PFIOC_STRUCT_END(pnl, addr);
1635 break;
1636 }
1637
1638 case DIOCSETTIMEOUT: /* struct pfioc_tm */
1639 case DIOCGETTIMEOUT: { /* struct pfioc_tm */
1640 struct pfioc_tm pt;
1641
1642 /* small enough to be on stack */
1643 bcopy(addr, &pt, sizeof (pt));
1644 error = pfioctl_ioc_tm(cmd, &pt, p);
1645 bcopy(&pt, addr, sizeof (pt));
1646 break;
1647 }
1648
1649 case DIOCGETLIMIT: /* struct pfioc_limit */
1650 case DIOCSETLIMIT: { /* struct pfioc_limit */
1651 struct pfioc_limit pl;
1652
1653 /* small enough to be on stack */
1654 bcopy(addr, &pl, sizeof (pl));
1655 error = pfioctl_ioc_limit(cmd, &pl, p);
1656 bcopy(&pl, addr, sizeof (pl));
1657 break;
1658 }
1659
1660 case DIOCSETDEBUG: { /* u_int32_t */
1661 bcopy(addr, &pf_status.debug, sizeof (u_int32_t));
1662 break;
1663 }
1664
1665 case DIOCCLRRULECTRS: {
1666 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1667 struct pf_ruleset *ruleset = &pf_main_ruleset;
1668 struct pf_rule *rule;
1669
1670 TAILQ_FOREACH(rule,
1671 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1672 rule->evaluations = 0;
1673 rule->packets[0] = rule->packets[1] = 0;
1674 rule->bytes[0] = rule->bytes[1] = 0;
1675 }
1676 break;
1677 }
1678
1679 case DIOCGIFSPEED: {
1680 struct pf_ifspeed *psp = (struct pf_ifspeed *)(void *)addr;
1681 struct pf_ifspeed ps;
1682 struct ifnet *ifp;
1683 u_int64_t baudrate;
1684
1685 if (psp->ifname[0] != '\0') {
1686 /* Can we completely trust user-land? */
1687 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
1688 ps.ifname[IFNAMSIZ - 1] = '\0';
1689 ifp = ifunit(ps.ifname);
1690 if (ifp != NULL) {
1691 baudrate = ifp->if_output_bw.max_bw;
1692 bcopy(&baudrate, &psp->baudrate,
1693 sizeof (baudrate));
1694 } else {
1695 error = EINVAL;
1696 }
1697 } else {
1698 error = EINVAL;
1699 }
1700 break;
1701 }
1702
1703 case DIOCBEGINADDRS: /* struct pfioc_pooladdr */
1704 case DIOCADDADDR: /* struct pfioc_pooladdr */
1705 case DIOCGETADDRS: /* struct pfioc_pooladdr */
1706 case DIOCGETADDR: /* struct pfioc_pooladdr */
1707 case DIOCCHANGEADDR: { /* struct pfioc_pooladdr */
1708 struct pfioc_pooladdr *pp = NULL;
1709
1710 PFIOC_STRUCT_BEGIN(addr, pp, error = ENOMEM; break;)
1711 error = pfioctl_ioc_pooladdr(cmd, pp, p);
1712 PFIOC_STRUCT_END(pp, addr);
1713 break;
1714 }
1715
1716 case DIOCGETRULESETS: /* struct pfioc_ruleset */
1717 case DIOCGETRULESET: { /* struct pfioc_ruleset */
1718 struct pfioc_ruleset *pr = NULL;
1719
1720 PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break;);
1721 error = pfioctl_ioc_ruleset(cmd, pr, p);
1722 PFIOC_STRUCT_END(pr, addr);
1723 break;
1724 }
1725
1726 case DIOCRCLRTABLES: /* struct pfioc_table */
1727 case DIOCRADDTABLES: /* struct pfioc_table */
1728 case DIOCRDELTABLES: /* struct pfioc_table */
1729 case DIOCRGETTABLES: /* struct pfioc_table */
1730 case DIOCRGETTSTATS: /* struct pfioc_table */
1731 case DIOCRCLRTSTATS: /* struct pfioc_table */
1732 case DIOCRSETTFLAGS: /* struct pfioc_table */
1733 case DIOCRCLRADDRS: /* struct pfioc_table */
1734 case DIOCRADDADDRS: /* struct pfioc_table */
1735 case DIOCRDELADDRS: /* struct pfioc_table */
1736 case DIOCRSETADDRS: /* struct pfioc_table */
1737 case DIOCRGETADDRS: /* struct pfioc_table */
1738 case DIOCRGETASTATS: /* struct pfioc_table */
1739 case DIOCRCLRASTATS: /* struct pfioc_table */
1740 case DIOCRTSTADDRS: /* struct pfioc_table */
1741 case DIOCRINADEFINE: { /* struct pfioc_table */
1742 PFIOCX_STRUCT_DECL(pfioc_table);
1743
1744 PFIOCX_STRUCT_BEGIN(addr, pfioc_table, error = ENOMEM; break;);
1745 error = pfioctl_ioc_table(cmd,
1746 PFIOCX_STRUCT_ADDR32(pfioc_table),
1747 PFIOCX_STRUCT_ADDR64(pfioc_table), p);
1748 PFIOCX_STRUCT_END(pfioc_table, addr);
1749 break;
1750 }
1751
1752 case DIOCOSFPADD: /* struct pf_osfp_ioctl */
1753 case DIOCOSFPGET: { /* struct pf_osfp_ioctl */
1754 struct pf_osfp_ioctl *io = NULL;
1755
1756 PFIOC_STRUCT_BEGIN(addr, io, error = ENOMEM; break;);
1757 if (cmd == DIOCOSFPADD) {
1758 error = pf_osfp_add(io);
1759 } else {
1760 VERIFY(cmd == DIOCOSFPGET);
1761 error = pf_osfp_get(io);
1762 }
1763 PFIOC_STRUCT_END(io, addr);
1764 break;
1765 }
1766
1767 case DIOCXBEGIN: /* struct pfioc_trans */
1768 case DIOCXROLLBACK: /* struct pfioc_trans */
1769 case DIOCXCOMMIT: { /* struct pfioc_trans */
1770 PFIOCX_STRUCT_DECL(pfioc_trans);
1771
1772 PFIOCX_STRUCT_BEGIN(addr, pfioc_trans, error = ENOMEM; break;);
1773 error = pfioctl_ioc_trans(cmd,
1774 PFIOCX_STRUCT_ADDR32(pfioc_trans),
1775 PFIOCX_STRUCT_ADDR64(pfioc_trans), p);
1776 PFIOCX_STRUCT_END(pfioc_trans, addr);
1777 break;
1778 }
1779
1780 case DIOCGETSRCNODES: { /* struct pfioc_src_nodes */
1781 PFIOCX_STRUCT_DECL(pfioc_src_nodes);
1782
1783 PFIOCX_STRUCT_BEGIN(addr, pfioc_src_nodes,
1784 error = ENOMEM; break;);
1785 error = pfioctl_ioc_src_nodes(cmd,
1786 PFIOCX_STRUCT_ADDR32(pfioc_src_nodes),
1787 PFIOCX_STRUCT_ADDR64(pfioc_src_nodes), p);
1788 PFIOCX_STRUCT_END(pfioc_src_nodes, addr);
1789 break;
1790 }
1791
1792 case DIOCCLRSRCNODES: {
1793 struct pf_src_node *n;
1794 struct pf_state *state;
1795
1796 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1797 state->src_node = NULL;
1798 state->nat_src_node = NULL;
1799 }
1800 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
1801 n->expire = 1;
1802 n->states = 0;
1803 }
1804 pf_purge_expired_src_nodes();
1805 pf_status.src_nodes = 0;
1806 break;
1807 }
1808
1809 case DIOCKILLSRCNODES: { /* struct pfioc_src_node_kill */
1810 struct pfioc_src_node_kill *psnk = NULL;
1811
1812 PFIOC_STRUCT_BEGIN(addr, psnk, error = ENOMEM; break;);
1813 error = pfioctl_ioc_src_node_kill(cmd, psnk, p);
1814 PFIOC_STRUCT_END(psnk, addr);
1815 break;
1816 }
1817
1818 case DIOCSETHOSTID: { /* u_int32_t */
1819 u_int32_t hid;
1820
1821 /* small enough to be on stack */
1822 bcopy(addr, &hid, sizeof (hid));
1823 if (hid == 0)
1824 pf_status.hostid = random();
1825 else
1826 pf_status.hostid = hid;
1827 break;
1828 }
1829
1830 case DIOCOSFPFLUSH:
1831 pf_osfp_flush();
1832 break;
1833
1834 case DIOCIGETIFACES: /* struct pfioc_iface */
1835 case DIOCSETIFFLAG: /* struct pfioc_iface */
1836 case DIOCCLRIFFLAG: { /* struct pfioc_iface */
1837 PFIOCX_STRUCT_DECL(pfioc_iface);
1838
1839 PFIOCX_STRUCT_BEGIN(addr, pfioc_iface, error = ENOMEM; break;);
1840 error = pfioctl_ioc_iface(cmd,
1841 PFIOCX_STRUCT_ADDR32(pfioc_iface),
1842 PFIOCX_STRUCT_ADDR64(pfioc_iface), p);
1843 PFIOCX_STRUCT_END(pfioc_iface, addr);
1844 break;
1845 }
1846
1847 default:
1848 error = ENODEV;
1849 break;
1850 }
1851
1852 lck_mtx_unlock(pf_lock);
1853 lck_rw_done(pf_perim_lock);
1854
1855 return (error);
1856 }
1857
1858 static int
1859 pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32,
1860 struct pfioc_table_64 *io64, struct proc *p)
1861 {
1862 int p64 = proc_is64bit(p);
1863 int error = 0;
1864
1865 if (!p64)
1866 goto struct32;
1867
1868 /*
1869 * 64-bit structure processing
1870 */
1871 switch (cmd) {
1872 case DIOCRCLRTABLES:
1873 if (io64->pfrio_esize != 0) {
1874 error = ENODEV;
1875 break;
1876 }
1877 pfr_table_copyin_cleanup(&io64->pfrio_table);
1878 error = pfr_clr_tables(&io64->pfrio_table, &io64->pfrio_ndel,
1879 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1880 break;
1881
1882 case DIOCRADDTABLES:
1883 if (io64->pfrio_esize != sizeof (struct pfr_table)) {
1884 error = ENODEV;
1885 break;
1886 }
1887 error = pfr_add_tables(io64->pfrio_buffer, io64->pfrio_size,
1888 &io64->pfrio_nadd, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1889 break;
1890
1891 case DIOCRDELTABLES:
1892 if (io64->pfrio_esize != sizeof (struct pfr_table)) {
1893 error = ENODEV;
1894 break;
1895 }
1896 error = pfr_del_tables(io64->pfrio_buffer, io64->pfrio_size,
1897 &io64->pfrio_ndel, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1898 break;
1899
1900 case DIOCRGETTABLES:
1901 if (io64->pfrio_esize != sizeof (struct pfr_table)) {
1902 error = ENODEV;
1903 break;
1904 }
1905 pfr_table_copyin_cleanup(&io64->pfrio_table);
1906 error = pfr_get_tables(&io64->pfrio_table, io64->pfrio_buffer,
1907 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1908 break;
1909
1910 case DIOCRGETTSTATS:
1911 if (io64->pfrio_esize != sizeof (struct pfr_tstats)) {
1912 error = ENODEV;
1913 break;
1914 }
1915 pfr_table_copyin_cleanup(&io64->pfrio_table);
1916 error = pfr_get_tstats(&io64->pfrio_table, io64->pfrio_buffer,
1917 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1918 break;
1919
1920 case DIOCRCLRTSTATS:
1921 if (io64->pfrio_esize != sizeof (struct pfr_table)) {
1922 error = ENODEV;
1923 break;
1924 }
1925 error = pfr_clr_tstats(io64->pfrio_buffer, io64->pfrio_size,
1926 &io64->pfrio_nzero, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1927 break;
1928
1929 case DIOCRSETTFLAGS:
1930 if (io64->pfrio_esize != sizeof (struct pfr_table)) {
1931 error = ENODEV;
1932 break;
1933 }
1934 error = pfr_set_tflags(io64->pfrio_buffer, io64->pfrio_size,
1935 io64->pfrio_setflag, io64->pfrio_clrflag,
1936 &io64->pfrio_nchange, &io64->pfrio_ndel,
1937 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1938 break;
1939
1940 case DIOCRCLRADDRS:
1941 if (io64->pfrio_esize != 0) {
1942 error = ENODEV;
1943 break;
1944 }
1945 pfr_table_copyin_cleanup(&io64->pfrio_table);
1946 error = pfr_clr_addrs(&io64->pfrio_table, &io64->pfrio_ndel,
1947 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1948 break;
1949
1950 case DIOCRADDADDRS:
1951 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
1952 error = ENODEV;
1953 break;
1954 }
1955 pfr_table_copyin_cleanup(&io64->pfrio_table);
1956 error = pfr_add_addrs(&io64->pfrio_table, io64->pfrio_buffer,
1957 io64->pfrio_size, &io64->pfrio_nadd, io64->pfrio_flags |
1958 PFR_FLAG_USERIOCTL);
1959 break;
1960
1961 case DIOCRDELADDRS:
1962 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
1963 error = ENODEV;
1964 break;
1965 }
1966 pfr_table_copyin_cleanup(&io64->pfrio_table);
1967 error = pfr_del_addrs(&io64->pfrio_table, io64->pfrio_buffer,
1968 io64->pfrio_size, &io64->pfrio_ndel, io64->pfrio_flags |
1969 PFR_FLAG_USERIOCTL);
1970 break;
1971
1972 case DIOCRSETADDRS:
1973 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
1974 error = ENODEV;
1975 break;
1976 }
1977 pfr_table_copyin_cleanup(&io64->pfrio_table);
1978 error = pfr_set_addrs(&io64->pfrio_table, io64->pfrio_buffer,
1979 io64->pfrio_size, &io64->pfrio_size2, &io64->pfrio_nadd,
1980 &io64->pfrio_ndel, &io64->pfrio_nchange, io64->pfrio_flags |
1981 PFR_FLAG_USERIOCTL, 0);
1982 break;
1983
1984 case DIOCRGETADDRS:
1985 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
1986 error = ENODEV;
1987 break;
1988 }
1989 pfr_table_copyin_cleanup(&io64->pfrio_table);
1990 error = pfr_get_addrs(&io64->pfrio_table, io64->pfrio_buffer,
1991 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1992 break;
1993
1994 case DIOCRGETASTATS:
1995 if (io64->pfrio_esize != sizeof (struct pfr_astats)) {
1996 error = ENODEV;
1997 break;
1998 }
1999 pfr_table_copyin_cleanup(&io64->pfrio_table);
2000 error = pfr_get_astats(&io64->pfrio_table, io64->pfrio_buffer,
2001 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2002 break;
2003
2004 case DIOCRCLRASTATS:
2005 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2006 error = ENODEV;
2007 break;
2008 }
2009 pfr_table_copyin_cleanup(&io64->pfrio_table);
2010 error = pfr_clr_astats(&io64->pfrio_table, io64->pfrio_buffer,
2011 io64->pfrio_size, &io64->pfrio_nzero, io64->pfrio_flags |
2012 PFR_FLAG_USERIOCTL);
2013 break;
2014
2015 case DIOCRTSTADDRS:
2016 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2017 error = ENODEV;
2018 break;
2019 }
2020 pfr_table_copyin_cleanup(&io64->pfrio_table);
2021 error = pfr_tst_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2022 io64->pfrio_size, &io64->pfrio_nmatch, io64->pfrio_flags |
2023 PFR_FLAG_USERIOCTL);
2024 break;
2025
2026 case DIOCRINADEFINE:
2027 if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
2028 error = ENODEV;
2029 break;
2030 }
2031 pfr_table_copyin_cleanup(&io64->pfrio_table);
2032 error = pfr_ina_define(&io64->pfrio_table, io64->pfrio_buffer,
2033 io64->pfrio_size, &io64->pfrio_nadd, &io64->pfrio_naddr,
2034 io64->pfrio_ticket, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2035 break;
2036
2037 default:
2038 VERIFY(0);
2039 /* NOTREACHED */
2040 }
2041 goto done;
2042
2043 struct32:
2044 /*
2045 * 32-bit structure processing
2046 */
2047 switch (cmd) {
2048 case DIOCRCLRTABLES:
2049 if (io32->pfrio_esize != 0) {
2050 error = ENODEV;
2051 break;
2052 }
2053 pfr_table_copyin_cleanup(&io32->pfrio_table);
2054 error = pfr_clr_tables(&io32->pfrio_table, &io32->pfrio_ndel,
2055 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2056 break;
2057
2058 case DIOCRADDTABLES:
2059 if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2060 error = ENODEV;
2061 break;
2062 }
2063 error = pfr_add_tables(io32->pfrio_buffer, io32->pfrio_size,
2064 &io32->pfrio_nadd, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2065 break;
2066
2067 case DIOCRDELTABLES:
2068 if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2069 error = ENODEV;
2070 break;
2071 }
2072 error = pfr_del_tables(io32->pfrio_buffer, io32->pfrio_size,
2073 &io32->pfrio_ndel, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2074 break;
2075
2076 case DIOCRGETTABLES:
2077 if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2078 error = ENODEV;
2079 break;
2080 }
2081 pfr_table_copyin_cleanup(&io32->pfrio_table);
2082 error = pfr_get_tables(&io32->pfrio_table, io32->pfrio_buffer,
2083 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2084 break;
2085
2086 case DIOCRGETTSTATS:
2087 if (io32->pfrio_esize != sizeof (struct pfr_tstats)) {
2088 error = ENODEV;
2089 break;
2090 }
2091 pfr_table_copyin_cleanup(&io32->pfrio_table);
2092 error = pfr_get_tstats(&io32->pfrio_table, io32->pfrio_buffer,
2093 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2094 break;
2095
2096 case DIOCRCLRTSTATS:
2097 if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2098 error = ENODEV;
2099 break;
2100 }
2101 error = pfr_clr_tstats(io32->pfrio_buffer, io32->pfrio_size,
2102 &io32->pfrio_nzero, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2103 break;
2104
2105 case DIOCRSETTFLAGS:
2106 if (io32->pfrio_esize != sizeof (struct pfr_table)) {
2107 error = ENODEV;
2108 break;
2109 }
2110 error = pfr_set_tflags(io32->pfrio_buffer, io32->pfrio_size,
2111 io32->pfrio_setflag, io32->pfrio_clrflag,
2112 &io32->pfrio_nchange, &io32->pfrio_ndel,
2113 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2114 break;
2115
2116 case DIOCRCLRADDRS:
2117 if (io32->pfrio_esize != 0) {
2118 error = ENODEV;
2119 break;
2120 }
2121 pfr_table_copyin_cleanup(&io32->pfrio_table);
2122 error = pfr_clr_addrs(&io32->pfrio_table, &io32->pfrio_ndel,
2123 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2124 break;
2125
2126 case DIOCRADDADDRS:
2127 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2128 error = ENODEV;
2129 break;
2130 }
2131 pfr_table_copyin_cleanup(&io32->pfrio_table);
2132 error = pfr_add_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2133 io32->pfrio_size, &io32->pfrio_nadd, io32->pfrio_flags |
2134 PFR_FLAG_USERIOCTL);
2135 break;
2136
2137 case DIOCRDELADDRS:
2138 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2139 error = ENODEV;
2140 break;
2141 }
2142 pfr_table_copyin_cleanup(&io32->pfrio_table);
2143 error = pfr_del_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2144 io32->pfrio_size, &io32->pfrio_ndel, io32->pfrio_flags |
2145 PFR_FLAG_USERIOCTL);
2146 break;
2147
2148 case DIOCRSETADDRS:
2149 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2150 error = ENODEV;
2151 break;
2152 }
2153 pfr_table_copyin_cleanup(&io32->pfrio_table);
2154 error = pfr_set_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2155 io32->pfrio_size, &io32->pfrio_size2, &io32->pfrio_nadd,
2156 &io32->pfrio_ndel, &io32->pfrio_nchange, io32->pfrio_flags |
2157 PFR_FLAG_USERIOCTL, 0);
2158 break;
2159
2160 case DIOCRGETADDRS:
2161 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2162 error = ENODEV;
2163 break;
2164 }
2165 pfr_table_copyin_cleanup(&io32->pfrio_table);
2166 error = pfr_get_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2167 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2168 break;
2169
2170 case DIOCRGETASTATS:
2171 if (io32->pfrio_esize != sizeof (struct pfr_astats)) {
2172 error = ENODEV;
2173 break;
2174 }
2175 pfr_table_copyin_cleanup(&io32->pfrio_table);
2176 error = pfr_get_astats(&io32->pfrio_table, io32->pfrio_buffer,
2177 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2178 break;
2179
2180 case DIOCRCLRASTATS:
2181 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2182 error = ENODEV;
2183 break;
2184 }
2185 pfr_table_copyin_cleanup(&io32->pfrio_table);
2186 error = pfr_clr_astats(&io32->pfrio_table, io32->pfrio_buffer,
2187 io32->pfrio_size, &io32->pfrio_nzero, io32->pfrio_flags |
2188 PFR_FLAG_USERIOCTL);
2189 break;
2190
2191 case DIOCRTSTADDRS:
2192 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2193 error = ENODEV;
2194 break;
2195 }
2196 pfr_table_copyin_cleanup(&io32->pfrio_table);
2197 error = pfr_tst_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2198 io32->pfrio_size, &io32->pfrio_nmatch, io32->pfrio_flags |
2199 PFR_FLAG_USERIOCTL);
2200 break;
2201
2202 case DIOCRINADEFINE:
2203 if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
2204 error = ENODEV;
2205 break;
2206 }
2207 pfr_table_copyin_cleanup(&io32->pfrio_table);
2208 error = pfr_ina_define(&io32->pfrio_table, io32->pfrio_buffer,
2209 io32->pfrio_size, &io32->pfrio_nadd, &io32->pfrio_naddr,
2210 io32->pfrio_ticket, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2211 break;
2212
2213 default:
2214 VERIFY(0);
2215 /* NOTREACHED */
2216 }
2217
2218 done:
2219 return (error);
2220 }
2221
2222 static int
2223 pfioctl_ioc_tokens(u_long cmd, struct pfioc_tokens_32 *tok32,
2224 struct pfioc_tokens_64 *tok64, struct proc *p)
2225 {
2226 struct pfioc_token *tokens;
2227 struct pfioc_kernel_token *entry, *tmp;
2228 user_addr_t token_buf;
2229 int ocnt, cnt, error = 0, p64 = proc_is64bit(p);
2230 char *ptr;
2231
2232 switch (cmd) {
2233 case DIOCGETSTARTERS: {
2234 int size;
2235
2236 if (nr_tokens == 0) {
2237 error = ENOENT;
2238 break;
2239 }
2240
2241 size = sizeof (struct pfioc_token) * nr_tokens;
2242 ocnt = cnt = (p64 ? tok64->size : tok32->size);
2243 if (cnt == 0) {
2244 if (p64)
2245 tok64->size = size;
2246 else
2247 tok32->size = size;
2248 break;
2249 }
2250
2251 token_buf = (p64 ? tok64->pgt_buf : tok32->pgt_buf);
2252 tokens = _MALLOC(size, M_TEMP, M_WAITOK|M_ZERO);
2253 if (tokens == NULL) {
2254 error = ENOMEM;
2255 break;
2256 }
2257
2258 ptr = (void *)tokens;
2259 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
2260 struct pfioc_token *t;
2261
2262 if ((unsigned)cnt < sizeof (*tokens))
2263 break; /* no more buffer space left */
2264
2265 t = (struct pfioc_token *)(void *)ptr;
2266 t->token_value = entry->token.token_value;
2267 t->timestamp = entry->token.timestamp;
2268 t->pid = entry->token.pid;
2269 bcopy(entry->token.proc_name, t->proc_name,
2270 PFTOK_PROCNAME_LEN);
2271 ptr += sizeof (struct pfioc_token);
2272
2273 cnt -= sizeof (struct pfioc_token);
2274 }
2275
2276 if (cnt < ocnt)
2277 error = copyout(tokens, token_buf, ocnt - cnt);
2278
2279 if (p64)
2280 tok64->size = ocnt - cnt;
2281 else
2282 tok32->size = ocnt - cnt;
2283
2284 _FREE(tokens, M_TEMP);
2285 break;
2286 }
2287
2288 default:
2289 VERIFY(0);
2290 /* NOTREACHED */
2291 }
2292
2293 return (error);
2294 }
2295
2296 static void
2297 pf_expire_states_and_src_nodes(struct pf_rule *rule)
2298 {
2299 struct pf_state *state;
2300 struct pf_src_node *sn;
2301 int killed = 0;
2302
2303 /* expire the states */
2304 state = TAILQ_FIRST(&state_list);
2305 while (state) {
2306 if (state->rule.ptr == rule)
2307 state->timeout = PFTM_PURGE;
2308 state = TAILQ_NEXT(state, entry_list);
2309 }
2310 pf_purge_expired_states(pf_status.states);
2311
2312 /* expire the src_nodes */
2313 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2314 if (sn->rule.ptr != rule)
2315 continue;
2316 if (sn->states != 0) {
2317 RB_FOREACH(state, pf_state_tree_id,
2318 &tree_id) {
2319 if (state->src_node == sn)
2320 state->src_node = NULL;
2321 if (state->nat_src_node == sn)
2322 state->nat_src_node = NULL;
2323 }
2324 sn->states = 0;
2325 }
2326 sn->expire = 1;
2327 killed++;
2328 }
2329 if (killed)
2330 pf_purge_expired_src_nodes();
2331 }
2332
2333 static void
2334 pf_delete_rule_from_ruleset(struct pf_ruleset *ruleset, int rs_num,
2335 struct pf_rule *rule)
2336 {
2337 struct pf_rule *r;
2338 int nr = 0;
2339
2340 pf_expire_states_and_src_nodes(rule);
2341
2342 pf_rm_rule(ruleset->rules[rs_num].active.ptr, rule);
2343 if (ruleset->rules[rs_num].active.rcount-- == 0)
2344 panic("%s: rcount value broken!", __func__);
2345 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2346
2347 while (r) {
2348 r->nr = nr++;
2349 r = TAILQ_NEXT(r, entries);
2350 }
2351 }
2352
2353
2354 static void
2355 pf_ruleset_cleanup(struct pf_ruleset *ruleset, int rs)
2356 {
2357 pf_calc_skip_steps(ruleset->rules[rs].active.ptr);
2358 ruleset->rules[rs].active.ticket =
2359 ++ruleset->rules[rs].inactive.ticket;
2360 }
2361
2362 /*
2363 * req_dev encodes the PF interface. Currently, possible values are
2364 * 0 or PFRULE_PFM
2365 */
2366 static int
2367 pf_delete_rule_by_ticket(struct pfioc_rule *pr, u_int32_t req_dev)
2368 {
2369 struct pf_ruleset *ruleset;
2370 struct pf_rule *rule = NULL;
2371 int is_anchor;
2372 int error;
2373 int i;
2374
2375 is_anchor = (pr->anchor_call[0] != '\0');
2376 if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
2377 pr->rule.owner, is_anchor, &error)) == NULL)
2378 return (error);
2379
2380 for (i = 0; i < PF_RULESET_MAX && rule == NULL; i++) {
2381 rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2382 while (rule && (rule->ticket != pr->rule.ticket))
2383 rule = TAILQ_NEXT(rule, entries);
2384 }
2385 if (rule == NULL)
2386 return (ENOENT);
2387 else
2388 i--;
2389
2390 if (strcmp(rule->owner, pr->rule.owner))
2391 return (EACCES);
2392
2393 delete_rule:
2394 if (rule->anchor && (ruleset != &pf_main_ruleset) &&
2395 ((strcmp(ruleset->anchor->owner, "")) == 0) &&
2396 ((ruleset->rules[i].active.rcount - 1) == 0)) {
2397 /* set rule & ruleset to parent and repeat */
2398 struct pf_rule *delete_rule = rule;
2399 struct pf_ruleset *delete_ruleset = ruleset;
2400
2401 #define parent_ruleset ruleset->anchor->parent->ruleset
2402 if (ruleset->anchor->parent == NULL)
2403 ruleset = &pf_main_ruleset;
2404 else
2405 ruleset = &parent_ruleset;
2406
2407 rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2408 while (rule &&
2409 (rule->anchor != delete_ruleset->anchor))
2410 rule = TAILQ_NEXT(rule, entries);
2411 if (rule == NULL)
2412 panic("%s: rule not found!", __func__);
2413
2414 /*
2415 * if reqest device != rule's device, bail :
2416 * with error if ticket matches;
2417 * without error if ticket doesn't match (i.e. its just cleanup)
2418 */
2419 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2420 if (rule->ticket != pr->rule.ticket) {
2421 return (0);
2422 } else {
2423 return EACCES;
2424 }
2425 }
2426
2427 if (delete_rule->rule_flag & PFRULE_PFM) {
2428 pffwrules--;
2429 }
2430
2431 pf_delete_rule_from_ruleset(delete_ruleset,
2432 i, delete_rule);
2433 delete_ruleset->rules[i].active.ticket =
2434 ++delete_ruleset->rules[i].inactive.ticket;
2435 goto delete_rule;
2436 } else {
2437 /*
2438 * process deleting rule only if device that added the
2439 * rule matches device that issued the request
2440 */
2441 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev)
2442 return EACCES;
2443 if (rule->rule_flag & PFRULE_PFM)
2444 pffwrules--;
2445 pf_delete_rule_from_ruleset(ruleset, i,
2446 rule);
2447 pf_ruleset_cleanup(ruleset, i);
2448 }
2449
2450 return (0);
2451 }
2452
2453 /*
2454 * req_dev encodes the PF interface. Currently, possible values are
2455 * 0 or PFRULE_PFM
2456 */
2457 static void
2458 pf_delete_rule_by_owner(char *owner, u_int32_t req_dev)
2459 {
2460 struct pf_ruleset *ruleset;
2461 struct pf_rule *rule, *next;
2462 int deleted = 0;
2463
2464 for (int rs = 0; rs < PF_RULESET_MAX; rs++) {
2465 rule = TAILQ_FIRST(pf_main_ruleset.rules[rs].active.ptr);
2466 ruleset = &pf_main_ruleset;
2467 while (rule) {
2468 next = TAILQ_NEXT(rule, entries);
2469 /*
2470 * process deleting rule only if device that added the
2471 * rule matches device that issued the request
2472 */
2473 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2474 rule = next;
2475 continue;
2476 }
2477 if (rule->anchor) {
2478 if (((strcmp(rule->owner, owner)) == 0) ||
2479 ((strcmp(rule->owner, "")) == 0)) {
2480 if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2481 if (deleted) {
2482 pf_ruleset_cleanup(ruleset, rs);
2483 deleted = 0;
2484 }
2485 /* step into anchor */
2486 ruleset =
2487 &rule->anchor->ruleset;
2488 rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2489 continue;
2490 } else {
2491 if (rule->rule_flag &
2492 PFRULE_PFM)
2493 pffwrules--;
2494 pf_delete_rule_from_ruleset(ruleset, rs, rule);
2495 deleted = 1;
2496 rule = next;
2497 }
2498 } else
2499 rule = next;
2500 } else {
2501 if (((strcmp(rule->owner, owner)) == 0)) {
2502 /* delete rule */
2503 if (rule->rule_flag & PFRULE_PFM)
2504 pffwrules--;
2505 pf_delete_rule_from_ruleset(ruleset,
2506 rs, rule);
2507 deleted = 1;
2508 }
2509 rule = next;
2510 }
2511 if (rule == NULL) {
2512 if (deleted) {
2513 pf_ruleset_cleanup(ruleset, rs);
2514 deleted = 0;
2515 }
2516 if (ruleset != &pf_main_ruleset)
2517 pf_deleterule_anchor_step_out(&ruleset,
2518 rs, &rule);
2519 }
2520 }
2521 }
2522 }
2523
2524 static void
2525 pf_deleterule_anchor_step_out(struct pf_ruleset **ruleset_ptr,
2526 int rs, struct pf_rule **rule_ptr)
2527 {
2528 struct pf_ruleset *ruleset = *ruleset_ptr;
2529 struct pf_rule *rule = *rule_ptr;
2530
2531 /* step out of anchor */
2532 struct pf_ruleset *rs_copy = ruleset;
2533 ruleset = ruleset->anchor->parent?
2534 &ruleset->anchor->parent->ruleset:&pf_main_ruleset;
2535
2536 rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2537 while (rule && (rule->anchor != rs_copy->anchor))
2538 rule = TAILQ_NEXT(rule, entries);
2539 if (rule == NULL)
2540 panic("%s: parent rule of anchor not found!", __func__);
2541 if (rule->anchor->ruleset.rules[rs].active.rcount > 0)
2542 rule = TAILQ_NEXT(rule, entries);
2543
2544 *ruleset_ptr = ruleset;
2545 *rule_ptr = rule;
2546 }
2547
2548 static void
2549 pf_addrwrap_setup(struct pf_addr_wrap *aw)
2550 {
2551 VERIFY(aw);
2552 bzero(&aw->p, sizeof aw->p);
2553 }
2554
2555 static int
2556 pf_rule_setup(struct pfioc_rule *pr, struct pf_rule *rule,
2557 struct pf_ruleset *ruleset) {
2558 struct pf_pooladdr *apa;
2559 int error = 0;
2560
2561 if (rule->ifname[0]) {
2562 rule->kif = pfi_kif_get(rule->ifname);
2563 if (rule->kif == NULL) {
2564 pool_put(&pf_rule_pl, rule);
2565 return (EINVAL);
2566 }
2567 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
2568 }
2569 if (rule->tagname[0])
2570 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2571 error = EBUSY;
2572 if (rule->match_tagname[0])
2573 if ((rule->match_tag =
2574 pf_tagname2tag(rule->match_tagname)) == 0)
2575 error = EBUSY;
2576 if (rule->rt && !rule->direction)
2577 error = EINVAL;
2578 #if PFLOG
2579 if (!rule->log)
2580 rule->logif = 0;
2581 if (rule->logif >= PFLOGIFS_MAX)
2582 error = EINVAL;
2583 #endif /* PFLOG */
2584 pf_addrwrap_setup(&rule->src.addr);
2585 pf_addrwrap_setup(&rule->dst.addr);
2586 if (pf_rtlabel_add(&rule->src.addr) ||
2587 pf_rtlabel_add(&rule->dst.addr))
2588 error = EBUSY;
2589 if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
2590 error = EINVAL;
2591 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
2592 error = EINVAL;
2593 if (pf_tbladdr_setup(ruleset, &rule->src.addr))
2594 error = EINVAL;
2595 if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
2596 error = EINVAL;
2597 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
2598 error = EINVAL;
2599 TAILQ_FOREACH(apa, &pf_pabuf, entries)
2600 if (pf_tbladdr_setup(ruleset, &apa->addr))
2601 error = EINVAL;
2602
2603 if (rule->overload_tblname[0]) {
2604 if ((rule->overload_tbl = pfr_attach_table(ruleset,
2605 rule->overload_tblname)) == NULL)
2606 error = EINVAL;
2607 else
2608 rule->overload_tbl->pfrkt_flags |=
2609 PFR_TFLAG_ACTIVE;
2610 }
2611
2612 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
2613
2614 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2615 (rule->action == PF_BINAT) || (rule->action == PF_NAT64)) &&
2616 rule->anchor == NULL) ||
2617 (rule->rt > PF_FASTROUTE)) &&
2618 (TAILQ_FIRST(&rule->rpool.list) == NULL))
2619 error = EINVAL;
2620
2621 if (error) {
2622 pf_rm_rule(NULL, rule);
2623 return (error);
2624 }
2625 /* For a NAT64 rule the rule's address family is AF_INET6 whereas
2626 * the address pool's family will be AF_INET
2627 */
2628 rule->rpool.af = (rule->action == PF_NAT64) ? AF_INET: rule->af;
2629 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2630 rule->evaluations = rule->packets[0] = rule->packets[1] =
2631 rule->bytes[0] = rule->bytes[1] = 0;
2632
2633 return (0);
2634 }
2635
2636 static int
2637 pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p)
2638 {
2639 int error = 0;
2640 u_int32_t req_dev = 0;
2641
2642 switch (cmd) {
2643 case DIOCADDRULE: {
2644 struct pf_ruleset *ruleset;
2645 struct pf_rule *rule, *tail;
2646 int rs_num;
2647
2648 pr->anchor[sizeof (pr->anchor) - 1] = '\0';
2649 pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
2650 ruleset = pf_find_ruleset(pr->anchor);
2651 if (ruleset == NULL) {
2652 error = EINVAL;
2653 break;
2654 }
2655 rs_num = pf_get_ruleset_number(pr->rule.action);
2656 if (rs_num >= PF_RULESET_MAX) {
2657 error = EINVAL;
2658 break;
2659 }
2660 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
2661 error = EINVAL;
2662 break;
2663 }
2664 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
2665 error = EBUSY;
2666 break;
2667 }
2668 if (pr->pool_ticket != ticket_pabuf) {
2669 error = EBUSY;
2670 break;
2671 }
2672 rule = pool_get(&pf_rule_pl, PR_WAITOK);
2673 if (rule == NULL) {
2674 error = ENOMEM;
2675 break;
2676 }
2677 pf_rule_copyin(&pr->rule, rule, p, minordev);
2678 #if !INET
2679 if (rule->af == AF_INET) {
2680 pool_put(&pf_rule_pl, rule);
2681 error = EAFNOSUPPORT;
2682 break;
2683 }
2684 #endif /* INET */
2685 #if !INET6
2686 if (rule->af == AF_INET6) {
2687 pool_put(&pf_rule_pl, rule);
2688 error = EAFNOSUPPORT;
2689 break;
2690 }
2691 #endif /* INET6 */
2692 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2693 pf_rulequeue);
2694 if (tail)
2695 rule->nr = tail->nr + 1;
2696 else
2697 rule->nr = 0;
2698
2699 if ((error = pf_rule_setup(pr, rule, ruleset)))
2700 break;
2701
2702 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2703 rule, entries);
2704 ruleset->rules[rs_num].inactive.rcount++;
2705 if (rule->rule_flag & PFRULE_PFM)
2706 pffwrules++;
2707
2708 if (rule->action == PF_NAT64)
2709 atomic_add_16(&pf_nat64_configured, 1);
2710
2711 if (pr->anchor_call[0] == '\0') {
2712 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
2713 if (rule->rule_flag & PFRULE_PFM) {
2714 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
2715 }
2716 }
2717
2718 #if DUMMYNET
2719 if (rule->action == PF_DUMMYNET) {
2720 struct dummynet_event dn_event;
2721 uint32_t direction = DN_INOUT;;
2722 bzero(&dn_event, sizeof(dn_event));
2723
2724 dn_event.dn_event_code = DUMMYNET_RULE_CONFIG;
2725
2726 if (rule->direction == PF_IN)
2727 direction = DN_IN;
2728 else if (rule->direction == PF_OUT)
2729 direction = DN_OUT;
2730
2731 dn_event.dn_event_rule_config.dir = direction;
2732 dn_event.dn_event_rule_config.af = rule->af;
2733 dn_event.dn_event_rule_config.proto = rule->proto;
2734 dn_event.dn_event_rule_config.src_port = rule->src.xport.range.port[0];
2735 dn_event.dn_event_rule_config.dst_port = rule->dst.xport.range.port[0];
2736 strlcpy(dn_event.dn_event_rule_config.ifname, rule->ifname,
2737 sizeof(dn_event.dn_event_rule_config.ifname));
2738
2739 dummynet_event_enqueue_nwk_wq_entry(&dn_event);
2740 }
2741 #endif
2742 break;
2743 }
2744
2745 case DIOCGETRULES: {
2746 struct pf_ruleset *ruleset;
2747 struct pf_rule *tail;
2748 int rs_num;
2749
2750 pr->anchor[sizeof (pr->anchor) - 1] = '\0';
2751 pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
2752 ruleset = pf_find_ruleset(pr->anchor);
2753 if (ruleset == NULL) {
2754 error = EINVAL;
2755 break;
2756 }
2757 rs_num = pf_get_ruleset_number(pr->rule.action);
2758 if (rs_num >= PF_RULESET_MAX) {
2759 error = EINVAL;
2760 break;
2761 }
2762 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2763 pf_rulequeue);
2764 if (tail)
2765 pr->nr = tail->nr + 1;
2766 else
2767 pr->nr = 0;
2768 pr->ticket = ruleset->rules[rs_num].active.ticket;
2769 break;
2770 }
2771
2772 case DIOCGETRULE: {
2773 struct pf_ruleset *ruleset;
2774 struct pf_rule *rule;
2775 int rs_num, i;
2776
2777 pr->anchor[sizeof (pr->anchor) - 1] = '\0';
2778 pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
2779 ruleset = pf_find_ruleset(pr->anchor);
2780 if (ruleset == NULL) {
2781 error = EINVAL;
2782 break;
2783 }
2784 rs_num = pf_get_ruleset_number(pr->rule.action);
2785 if (rs_num >= PF_RULESET_MAX) {
2786 error = EINVAL;
2787 break;
2788 }
2789 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
2790 error = EBUSY;
2791 break;
2792 }
2793 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2794 while ((rule != NULL) && (rule->nr != pr->nr))
2795 rule = TAILQ_NEXT(rule, entries);
2796 if (rule == NULL) {
2797 error = EBUSY;
2798 break;
2799 }
2800 pf_rule_copyout(rule, &pr->rule);
2801 if (pf_anchor_copyout(ruleset, rule, pr)) {
2802 error = EBUSY;
2803 break;
2804 }
2805 pfi_dynaddr_copyout(&pr->rule.src.addr);
2806 pfi_dynaddr_copyout(&pr->rule.dst.addr);
2807 pf_tbladdr_copyout(&pr->rule.src.addr);
2808 pf_tbladdr_copyout(&pr->rule.dst.addr);
2809 pf_rtlabel_copyout(&pr->rule.src.addr);
2810 pf_rtlabel_copyout(&pr->rule.dst.addr);
2811 for (i = 0; i < PF_SKIP_COUNT; ++i)
2812 if (rule->skip[i].ptr == NULL)
2813 pr->rule.skip[i].nr = -1;
2814 else
2815 pr->rule.skip[i].nr =
2816 rule->skip[i].ptr->nr;
2817
2818 if (pr->action == PF_GET_CLR_CNTR) {
2819 rule->evaluations = 0;
2820 rule->packets[0] = rule->packets[1] = 0;
2821 rule->bytes[0] = rule->bytes[1] = 0;
2822 }
2823 break;
2824 }
2825
2826 case DIOCCHANGERULE: {
2827 struct pfioc_rule *pcr = pr;
2828 struct pf_ruleset *ruleset;
2829 struct pf_rule *oldrule = NULL, *newrule = NULL;
2830 struct pf_pooladdr *pa;
2831 u_int32_t nr = 0;
2832 int rs_num;
2833
2834 if (!(pcr->action == PF_CHANGE_REMOVE ||
2835 pcr->action == PF_CHANGE_GET_TICKET) &&
2836 pcr->pool_ticket != ticket_pabuf) {
2837 error = EBUSY;
2838 break;
2839 }
2840
2841 if (pcr->action < PF_CHANGE_ADD_HEAD ||
2842 pcr->action > PF_CHANGE_GET_TICKET) {
2843 error = EINVAL;
2844 break;
2845 }
2846 pcr->anchor[sizeof (pcr->anchor) - 1] = '\0';
2847 pcr->anchor_call[sizeof (pcr->anchor_call) - 1] = '\0';
2848 ruleset = pf_find_ruleset(pcr->anchor);
2849 if (ruleset == NULL) {
2850 error = EINVAL;
2851 break;
2852 }
2853 rs_num = pf_get_ruleset_number(pcr->rule.action);
2854 if (rs_num >= PF_RULESET_MAX) {
2855 error = EINVAL;
2856 break;
2857 }
2858
2859 if (pcr->action == PF_CHANGE_GET_TICKET) {
2860 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
2861 break;
2862 } else {
2863 if (pcr->ticket !=
2864 ruleset->rules[rs_num].active.ticket) {
2865 error = EINVAL;
2866 break;
2867 }
2868 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
2869 error = EINVAL;
2870 break;
2871 }
2872 }
2873
2874 if (pcr->action != PF_CHANGE_REMOVE) {
2875 newrule = pool_get(&pf_rule_pl, PR_WAITOK);
2876 if (newrule == NULL) {
2877 error = ENOMEM;
2878 break;
2879 }
2880 pf_rule_copyin(&pcr->rule, newrule, p, minordev);
2881 #if !INET
2882 if (newrule->af == AF_INET) {
2883 pool_put(&pf_rule_pl, newrule);
2884 error = EAFNOSUPPORT;
2885 break;
2886 }
2887 #endif /* INET */
2888 #if !INET6
2889 if (newrule->af == AF_INET6) {
2890 pool_put(&pf_rule_pl, newrule);
2891 error = EAFNOSUPPORT;
2892 break;
2893 }
2894 #endif /* INET6 */
2895 if (newrule->ifname[0]) {
2896 newrule->kif = pfi_kif_get(newrule->ifname);
2897 if (newrule->kif == NULL) {
2898 pool_put(&pf_rule_pl, newrule);
2899 error = EINVAL;
2900 break;
2901 }
2902 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
2903 } else
2904 newrule->kif = NULL;
2905
2906 if (newrule->tagname[0])
2907 if ((newrule->tag =
2908 pf_tagname2tag(newrule->tagname)) == 0)
2909 error = EBUSY;
2910 if (newrule->match_tagname[0])
2911 if ((newrule->match_tag = pf_tagname2tag(
2912 newrule->match_tagname)) == 0)
2913 error = EBUSY;
2914 if (newrule->rt && !newrule->direction)
2915 error = EINVAL;
2916 #if PFLOG
2917 if (!newrule->log)
2918 newrule->logif = 0;
2919 if (newrule->logif >= PFLOGIFS_MAX)
2920 error = EINVAL;
2921 #endif /* PFLOG */
2922 pf_addrwrap_setup(&newrule->src.addr);
2923 pf_addrwrap_setup(&newrule->dst.addr);
2924 if (pf_rtlabel_add(&newrule->src.addr) ||
2925 pf_rtlabel_add(&newrule->dst.addr))
2926 error = EBUSY;
2927 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
2928 error = EINVAL;
2929 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
2930 error = EINVAL;
2931 if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
2932 error = EINVAL;
2933 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
2934 error = EINVAL;
2935 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
2936 error = EINVAL;
2937 TAILQ_FOREACH(pa, &pf_pabuf, entries)
2938 if (pf_tbladdr_setup(ruleset, &pa->addr))
2939 error = EINVAL;
2940
2941 if (newrule->overload_tblname[0]) {
2942 if ((newrule->overload_tbl = pfr_attach_table(
2943 ruleset, newrule->overload_tblname)) ==
2944 NULL)
2945 error = EINVAL;
2946 else
2947 newrule->overload_tbl->pfrkt_flags |=
2948 PFR_TFLAG_ACTIVE;
2949 }
2950
2951 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
2952 if (((((newrule->action == PF_NAT) ||
2953 (newrule->action == PF_RDR) ||
2954 (newrule->action == PF_BINAT) ||
2955 (newrule->rt > PF_FASTROUTE)) &&
2956 !newrule->anchor)) &&
2957 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
2958 error = EINVAL;
2959
2960 if (error) {
2961 pf_rm_rule(NULL, newrule);
2962 break;
2963 }
2964 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
2965 newrule->evaluations = 0;
2966 newrule->packets[0] = newrule->packets[1] = 0;
2967 newrule->bytes[0] = newrule->bytes[1] = 0;
2968 }
2969 pf_empty_pool(&pf_pabuf);
2970
2971 if (pcr->action == PF_CHANGE_ADD_HEAD)
2972 oldrule = TAILQ_FIRST(
2973 ruleset->rules[rs_num].active.ptr);
2974 else if (pcr->action == PF_CHANGE_ADD_TAIL)
2975 oldrule = TAILQ_LAST(
2976 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
2977 else {
2978 oldrule = TAILQ_FIRST(
2979 ruleset->rules[rs_num].active.ptr);
2980 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
2981 oldrule = TAILQ_NEXT(oldrule, entries);
2982 if (oldrule == NULL) {
2983 if (newrule != NULL)
2984 pf_rm_rule(NULL, newrule);
2985 error = EINVAL;
2986 break;
2987 }
2988 }
2989
2990 if (pcr->action == PF_CHANGE_REMOVE) {
2991 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
2992 ruleset->rules[rs_num].active.rcount--;
2993 } else {
2994 if (oldrule == NULL)
2995 TAILQ_INSERT_TAIL(
2996 ruleset->rules[rs_num].active.ptr,
2997 newrule, entries);
2998 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
2999 pcr->action == PF_CHANGE_ADD_BEFORE)
3000 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3001 else
3002 TAILQ_INSERT_AFTER(
3003 ruleset->rules[rs_num].active.ptr,
3004 oldrule, newrule, entries);
3005 ruleset->rules[rs_num].active.rcount++;
3006 }
3007
3008 nr = 0;
3009 TAILQ_FOREACH(oldrule,
3010 ruleset->rules[rs_num].active.ptr, entries)
3011 oldrule->nr = nr++;
3012
3013 ruleset->rules[rs_num].active.ticket++;
3014
3015 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3016 pf_remove_if_empty_ruleset(ruleset);
3017
3018 break;
3019 }
3020
3021 case DIOCINSERTRULE: {
3022 struct pf_ruleset *ruleset;
3023 struct pf_rule *rule, *tail, *r;
3024 int rs_num;
3025 int is_anchor;
3026
3027 pr->anchor[sizeof (pr->anchor) - 1] = '\0';
3028 pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
3029 is_anchor = (pr->anchor_call[0] != '\0');
3030
3031 if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
3032 pr->rule.owner, is_anchor, &error)) == NULL)
3033 break;
3034
3035 rs_num = pf_get_ruleset_number(pr->rule.action);
3036 if (rs_num >= PF_RULESET_MAX) {
3037 error = EINVAL;
3038 break;
3039 }
3040 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3041 error = EINVAL;
3042 break;
3043 }
3044
3045 /* make sure this anchor rule doesn't exist already */
3046 if (is_anchor) {
3047 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3048 while (r) {
3049 if (r->anchor &&
3050 ((strcmp(r->anchor->name,
3051 pr->anchor_call)) == 0)) {
3052 if (((strcmp(pr->rule.owner,
3053 r->owner)) == 0) ||
3054 ((strcmp(r->owner, "")) == 0))
3055 error = EEXIST;
3056 else
3057 error = EPERM;
3058 break;
3059 }
3060 r = TAILQ_NEXT(r, entries);
3061 }
3062 if (error != 0)
3063 return (error);
3064 }
3065
3066 rule = pool_get(&pf_rule_pl, PR_WAITOK);
3067 if (rule == NULL) {
3068 error = ENOMEM;
3069 break;
3070 }
3071 pf_rule_copyin(&pr->rule, rule, p, minordev);
3072 #if !INET
3073 if (rule->af == AF_INET) {
3074 pool_put(&pf_rule_pl, rule);
3075 error = EAFNOSUPPORT;
3076 break;
3077 }
3078 #endif /* INET */
3079 #if !INET6
3080 if (rule->af == AF_INET6) {
3081 pool_put(&pf_rule_pl, rule);
3082 error = EAFNOSUPPORT;
3083 break;
3084 }
3085
3086 #endif /* INET6 */
3087 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3088 while ((r != NULL) && (rule->priority >= (unsigned)r->priority))
3089 r = TAILQ_NEXT(r, entries);
3090 if (r == NULL) {
3091 if ((tail =
3092 TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3093 pf_rulequeue)) != NULL)
3094 rule->nr = tail->nr + 1;
3095 else
3096 rule->nr = 0;
3097 } else {
3098 rule->nr = r->nr;
3099 }
3100
3101 if ((error = pf_rule_setup(pr, rule, ruleset)))
3102 break;
3103
3104 if (rule->anchor != NULL)
3105 strlcpy(rule->anchor->owner, rule->owner,
3106 PF_OWNER_NAME_SIZE);
3107
3108 if (r) {
3109 TAILQ_INSERT_BEFORE(r, rule, entries);
3110 while (r && ++r->nr)
3111 r = TAILQ_NEXT(r, entries);
3112 } else
3113 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].active.ptr,
3114 rule, entries);
3115 ruleset->rules[rs_num].active.rcount++;
3116
3117 /* Calculate checksum for the main ruleset */
3118 if (ruleset == &pf_main_ruleset)
3119 error = pf_setup_pfsync_matching(ruleset);
3120
3121 pf_ruleset_cleanup(ruleset, rs_num);
3122 rule->ticket = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)rule);
3123
3124 pr->rule.ticket = rule->ticket;
3125 pf_rule_copyout(rule, &pr->rule);
3126 if (rule->rule_flag & PFRULE_PFM)
3127 pffwrules++;
3128 if (rule->action == PF_NAT64)
3129 atomic_add_16(&pf_nat64_configured, 1);
3130
3131 if (pr->anchor_call[0] == '\0') {
3132 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
3133 if (rule->rule_flag & PFRULE_PFM) {
3134 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
3135 }
3136 }
3137 break;
3138 }
3139
3140 case DIOCDELETERULE: {
3141 pr->anchor[sizeof (pr->anchor) - 1] = '\0';
3142 pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
3143
3144 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3145 error = EINVAL;
3146 break;
3147 }
3148
3149 /* get device through which request is made */
3150 if ((uint8_t)minordev == PFDEV_PFM)
3151 req_dev |= PFRULE_PFM;
3152
3153 if (pr->rule.ticket) {
3154 if ((error = pf_delete_rule_by_ticket(pr, req_dev)))
3155 break;
3156 } else
3157 pf_delete_rule_by_owner(pr->rule.owner, req_dev);
3158 pr->nr = pffwrules;
3159 if (pr->rule.action == PF_NAT64)
3160 atomic_add_16(&pf_nat64_configured, -1);
3161 break;
3162 }
3163
3164 default:
3165 VERIFY(0);
3166 /* NOTREACHED */
3167 }
3168
3169 return (error);
3170 }
3171
3172 static int
3173 pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p)
3174 {
3175 #pragma unused(p)
3176 int error = 0;
3177
3178 psk->psk_ifname[sizeof (psk->psk_ifname) - 1] = '\0';
3179 psk->psk_ownername[sizeof(psk->psk_ownername) - 1] = '\0';
3180
3181 bool ifname_matched = true;
3182 bool owner_matched = true;
3183
3184 switch (cmd) {
3185 case DIOCCLRSTATES: {
3186 struct pf_state *s, *nexts;
3187 int killed = 0;
3188
3189 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
3190 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3191 /*
3192 * Purge all states only when neither ifname
3193 * or owner is provided. If any of these are provided
3194 * we purge only the states with meta data that match
3195 */
3196 bool unlink_state = false;
3197 ifname_matched = true;
3198 owner_matched = true;
3199
3200 if (psk->psk_ifname[0] &&
3201 strcmp(psk->psk_ifname, s->kif->pfik_name)) {
3202 ifname_matched = false;
3203 }
3204
3205 if (psk->psk_ownername[0] &&
3206 ((NULL == s->rule.ptr) ||
3207 strcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3208 owner_matched = false;
3209 }
3210
3211 unlink_state = ifname_matched && owner_matched;
3212
3213 if (unlink_state) {
3214 #if NPFSYNC
3215 /* don't send out individual delete messages */
3216 s->sync_flags = PFSTATE_NOSYNC;
3217 #endif
3218 pf_unlink_state(s);
3219 killed++;
3220 }
3221 }
3222 psk->psk_af = killed;
3223 #if NPFSYNC
3224 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
3225 #endif
3226 break;
3227 }
3228
3229 case DIOCKILLSTATES: {
3230 struct pf_state *s, *nexts;
3231 struct pf_state_key *sk;
3232 struct pf_state_host *src, *dst;
3233 int killed = 0;
3234
3235 for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
3236 s = nexts) {
3237 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3238 sk = s->state_key;
3239 ifname_matched = true;
3240 owner_matched = true;
3241
3242 if (psk->psk_ifname[0] &&
3243 strcmp(psk->psk_ifname, s->kif->pfik_name)) {
3244 ifname_matched = false;
3245 }
3246
3247 if (psk->psk_ownername[0] &&
3248 ((NULL == s->rule.ptr) ||
3249 strcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3250 owner_matched = false;
3251 }
3252
3253 if (sk->direction == PF_OUT) {
3254 src = &sk->lan;
3255 dst = &sk->ext_lan;
3256 } else {
3257 src = &sk->ext_lan;
3258 dst = &sk->lan;
3259 }
3260 if ((!psk->psk_af || sk->af_lan == psk->psk_af) &&
3261 (!psk->psk_proto || psk->psk_proto == sk->proto) &&
3262 PF_MATCHA(psk->psk_src.neg,
3263 &psk->psk_src.addr.v.a.addr,
3264 &psk->psk_src.addr.v.a.mask,
3265 &src->addr, sk->af_lan) &&
3266 PF_MATCHA(psk->psk_dst.neg,
3267 &psk->psk_dst.addr.v.a.addr,
3268 &psk->psk_dst.addr.v.a.mask,
3269 &dst->addr, sk->af_lan) &&
3270 (pf_match_xport(psk->psk_proto,
3271 psk->psk_proto_variant, &psk->psk_src.xport,
3272 &src->xport)) &&
3273 (pf_match_xport(psk->psk_proto,
3274 psk->psk_proto_variant, &psk->psk_dst.xport,
3275 &dst->xport)) &&
3276 ifname_matched &&
3277 owner_matched) {
3278 #if NPFSYNC
3279 /* send immediate delete of state */
3280 pfsync_delete_state(s);
3281 s->sync_flags |= PFSTATE_NOSYNC;
3282 #endif
3283 pf_unlink_state(s);
3284 killed++;
3285 }
3286 }
3287 psk->psk_af = killed;
3288 break;
3289 }
3290
3291 default:
3292 VERIFY(0);
3293 /* NOTREACHED */
3294 }
3295
3296 return (error);
3297 }
3298
3299 static int
3300 pfioctl_ioc_state(u_long cmd, struct pfioc_state *ps, struct proc *p)
3301 {
3302 #pragma unused(p)
3303 int error = 0;
3304
3305 switch (cmd) {
3306 case DIOCADDSTATE: {
3307 struct pfsync_state *sp = &ps->state;
3308 struct pf_state *s;
3309 struct pf_state_key *sk;
3310 struct pfi_kif *kif;
3311
3312 if (sp->timeout >= PFTM_MAX) {
3313 error = EINVAL;
3314 break;
3315 }
3316 s = pool_get(&pf_state_pl, PR_WAITOK);
3317 if (s == NULL) {
3318 error = ENOMEM;
3319 break;
3320 }
3321 bzero(s, sizeof (struct pf_state));
3322 if ((sk = pf_alloc_state_key(s, NULL)) == NULL) {
3323 pool_put(&pf_state_pl, s);
3324 error = ENOMEM;
3325 break;
3326 }
3327 pf_state_import(sp, sk, s);
3328 kif = pfi_kif_get(sp->ifname);
3329 if (kif == NULL) {
3330 pool_put(&pf_state_pl, s);
3331 pool_put(&pf_state_key_pl, sk);
3332 error = ENOENT;
3333 break;
3334 }
3335 TAILQ_INIT(&s->unlink_hooks);
3336 s->state_key->app_state = 0;
3337 if (pf_insert_state(kif, s)) {
3338 pfi_kif_unref(kif, PFI_KIF_REF_NONE);
3339 pool_put(&pf_state_pl, s);
3340 error = EEXIST;
3341 break;
3342 }
3343 pf_default_rule.states++;
3344 VERIFY(pf_default_rule.states != 0);
3345 break;
3346 }
3347
3348 case DIOCGETSTATE: {
3349 struct pf_state *s;
3350 struct pf_state_cmp id_key;
3351
3352 bcopy(ps->state.id, &id_key.id, sizeof (id_key.id));
3353 id_key.creatorid = ps->state.creatorid;
3354
3355 s = pf_find_state_byid(&id_key);
3356 if (s == NULL) {
3357 error = ENOENT;
3358 break;
3359 }
3360
3361 pf_state_export(&ps->state, s->state_key, s);
3362 break;
3363 }
3364
3365 default:
3366 VERIFY(0);
3367 /* NOTREACHED */
3368 }
3369
3370 return (error);
3371 }
3372
3373 static int
3374 pfioctl_ioc_states(u_long cmd, struct pfioc_states_32 *ps32,
3375 struct pfioc_states_64 *ps64, struct proc *p)
3376 {
3377 int p64 = proc_is64bit(p);
3378 int error = 0;
3379
3380 switch (cmd) {
3381 case DIOCGETSTATES: { /* struct pfioc_states */
3382 struct pf_state *state;
3383 struct pfsync_state *pstore;
3384 user_addr_t buf;
3385 u_int32_t nr = 0;
3386 int len, size;
3387
3388 len = (p64 ? ps64->ps_len : ps32->ps_len);
3389 if (len == 0) {
3390 size = sizeof (struct pfsync_state) * pf_status.states;
3391 if (p64)
3392 ps64->ps_len = size;
3393 else
3394 ps32->ps_len = size;
3395 break;
3396 }
3397
3398 pstore = _MALLOC(sizeof (*pstore), M_TEMP, M_WAITOK);
3399 if (pstore == NULL) {
3400 error = ENOMEM;
3401 break;
3402 }
3403 buf = (p64 ? ps64->ps_buf : ps32->ps_buf);
3404
3405 state = TAILQ_FIRST(&state_list);
3406 while (state) {
3407 if (state->timeout != PFTM_UNLINKED) {
3408 if ((nr + 1) * sizeof (*pstore) > (unsigned)len)
3409 break;
3410
3411 pf_state_export(pstore,
3412 state->state_key, state);
3413 error = copyout(pstore, buf, sizeof (*pstore));
3414 if (error) {
3415 _FREE(pstore, M_TEMP);
3416 goto fail;
3417 }
3418 buf += sizeof (*pstore);
3419 nr++;
3420 }
3421 state = TAILQ_NEXT(state, entry_list);
3422 }
3423
3424 size = sizeof (struct pfsync_state) * nr;
3425 if (p64)
3426 ps64->ps_len = size;
3427 else
3428 ps32->ps_len = size;
3429
3430 _FREE(pstore, M_TEMP);
3431 break;
3432 }
3433
3434 default:
3435 VERIFY(0);
3436 /* NOTREACHED */
3437 }
3438 fail:
3439 return (error);
3440 }
3441
3442 static int
3443 pfioctl_ioc_natlook(u_long cmd, struct pfioc_natlook *pnl, struct proc *p)
3444 {
3445 #pragma unused(p)
3446 int error = 0;
3447
3448 switch (cmd) {
3449 case DIOCNATLOOK: {
3450 struct pf_state_key *sk;
3451 struct pf_state *state;
3452 struct pf_state_key_cmp key;
3453 int m = 0, direction = pnl->direction;
3454
3455 key.proto = pnl->proto;
3456 key.proto_variant = pnl->proto_variant;
3457
3458 if (!pnl->proto ||
3459 PF_AZERO(&pnl->saddr, pnl->af) ||
3460 PF_AZERO(&pnl->daddr, pnl->af) ||
3461 ((pnl->proto == IPPROTO_TCP ||
3462 pnl->proto == IPPROTO_UDP) &&
3463 (!pnl->dxport.port || !pnl->sxport.port)))
3464 error = EINVAL;
3465 else {
3466 /*
3467 * userland gives us source and dest of connection,
3468 * reverse the lookup so we ask for what happens with
3469 * the return traffic, enabling us to find it in the
3470 * state tree.
3471 */
3472 if (direction == PF_IN) {
3473 key.af_gwy = pnl->af;
3474 PF_ACPY(&key.ext_gwy.addr, &pnl->daddr,
3475 pnl->af);
3476 memcpy(&key.ext_gwy.xport, &pnl->dxport,
3477 sizeof (key.ext_gwy.xport));
3478 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
3479 memcpy(&key.gwy.xport, &pnl->sxport,
3480 sizeof (key.gwy.xport));
3481 state = pf_find_state_all(&key, PF_IN, &m);
3482 } else {
3483 key.af_lan = pnl->af;
3484 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
3485 memcpy(&key.lan.xport, &pnl->dxport,
3486 sizeof (key.lan.xport));
3487 PF_ACPY(&key.ext_lan.addr, &pnl->saddr,
3488 pnl->af);
3489 memcpy(&key.ext_lan.xport, &pnl->sxport,
3490 sizeof (key.ext_lan.xport));
3491 state = pf_find_state_all(&key, PF_OUT, &m);
3492 }
3493 if (m > 1)
3494 error = E2BIG; /* more than one state */
3495 else if (state != NULL) {
3496 sk = state->state_key;
3497 if (direction == PF_IN) {
3498 PF_ACPY(&pnl->rsaddr, &sk->lan.addr,
3499 sk->af_lan);
3500 memcpy(&pnl->rsxport, &sk->lan.xport,
3501 sizeof (pnl->rsxport));
3502 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
3503 pnl->af);
3504 memcpy(&pnl->rdxport, &pnl->dxport,
3505 sizeof (pnl->rdxport));
3506 } else {
3507 PF_ACPY(&pnl->rdaddr, &sk->gwy.addr,
3508 sk->af_gwy);
3509 memcpy(&pnl->rdxport, &sk->gwy.xport,
3510 sizeof (pnl->rdxport));
3511 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
3512 pnl->af);
3513 memcpy(&pnl->rsxport, &pnl->sxport,
3514 sizeof (pnl->rsxport));
3515 }
3516 } else
3517 error = ENOENT;
3518 }
3519 break;
3520 }
3521
3522 default:
3523 VERIFY(0);
3524 /* NOTREACHED */
3525 }
3526
3527 return (error);
3528 }
3529
3530 static int
3531 pfioctl_ioc_tm(u_long cmd, struct pfioc_tm *pt, struct proc *p)
3532 {
3533 #pragma unused(p)
3534 int error = 0;
3535
3536 switch (cmd) {
3537 case DIOCSETTIMEOUT: {
3538 int old;
3539
3540 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
3541 pt->seconds < 0) {
3542 error = EINVAL;
3543 goto fail;
3544 }
3545 old = pf_default_rule.timeout[pt->timeout];
3546 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
3547 pt->seconds = 1;
3548 pf_default_rule.timeout[pt->timeout] = pt->seconds;
3549 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
3550 wakeup(pf_purge_thread_fn);
3551 pt->seconds = old;
3552 break;
3553 }
3554
3555 case DIOCGETTIMEOUT: {
3556 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
3557 error = EINVAL;
3558 goto fail;
3559 }
3560 pt->seconds = pf_default_rule.timeout[pt->timeout];
3561 break;
3562 }
3563
3564 default:
3565 VERIFY(0);
3566 /* NOTREACHED */
3567 }
3568 fail:
3569 return (error);
3570 }
3571
3572 static int
3573 pfioctl_ioc_limit(u_long cmd, struct pfioc_limit *pl, struct proc *p)
3574 {
3575 #pragma unused(p)
3576 int error = 0;
3577
3578 switch (cmd) {
3579 case DIOCGETLIMIT: {
3580
3581 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
3582 error = EINVAL;
3583 goto fail;
3584 }
3585 pl->limit = pf_pool_limits[pl->index].limit;
3586 break;
3587 }
3588
3589 case DIOCSETLIMIT: {
3590 int old_limit;
3591
3592 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
3593 pf_pool_limits[pl->index].pp == NULL) {
3594 error = EINVAL;
3595 goto fail;
3596 }
3597 pool_sethardlimit(pf_pool_limits[pl->index].pp,
3598 pl->limit, NULL, 0);
3599 old_limit = pf_pool_limits[pl->index].limit;
3600 pf_pool_limits[pl->index].limit = pl->limit;
3601 pl->limit = old_limit;
3602 break;
3603 }
3604
3605 default:
3606 VERIFY(0);
3607 /* NOTREACHED */
3608 }
3609 fail:
3610 return (error);
3611 }
3612
3613 static int
3614 pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p)
3615 {
3616 #pragma unused(p)
3617 struct pf_pooladdr *pa = NULL;
3618 struct pf_pool *pool = NULL;
3619 int error = 0;
3620
3621 switch (cmd) {
3622 case DIOCBEGINADDRS: {
3623 pf_empty_pool(&pf_pabuf);
3624 pp->ticket = ++ticket_pabuf;
3625 break;
3626 }
3627
3628 case DIOCADDADDR: {
3629 pp->anchor[sizeof (pp->anchor) - 1] = '\0';
3630 if (pp->ticket != ticket_pabuf) {
3631 error = EBUSY;
3632 break;
3633 }
3634 #if !INET
3635 if (pp->af == AF_INET) {
3636 error = EAFNOSUPPORT;
3637 break;
3638 }
3639 #endif /* INET */
3640 #if !INET6
3641 if (pp->af == AF_INET6) {
3642 error = EAFNOSUPPORT;
3643 break;
3644 }
3645 #endif /* INET6 */
3646 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
3647 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
3648 pp->addr.addr.type != PF_ADDR_TABLE) {
3649 error = EINVAL;
3650 break;
3651 }
3652 pa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3653 if (pa == NULL) {
3654 error = ENOMEM;
3655 break;
3656 }
3657 pf_pooladdr_copyin(&pp->addr, pa);
3658 if (pa->ifname[0]) {
3659 pa->kif = pfi_kif_get(pa->ifname);
3660 if (pa->kif == NULL) {
3661 pool_put(&pf_pooladdr_pl, pa);
3662 error = EINVAL;
3663 break;
3664 }
3665 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
3666 }
3667 pf_addrwrap_setup(&pa->addr);
3668 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
3669 pfi_dynaddr_remove(&pa->addr);
3670 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
3671 pool_put(&pf_pooladdr_pl, pa);
3672 error = EINVAL;
3673 break;
3674 }
3675 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
3676 break;
3677 }
3678
3679 case DIOCGETADDRS: {
3680 pp->nr = 0;
3681 pp->anchor[sizeof (pp->anchor) - 1] = '\0';
3682 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
3683 pp->r_num, 0, 1, 0);
3684 if (pool == NULL) {
3685 error = EBUSY;
3686 break;
3687 }
3688 TAILQ_FOREACH(pa, &pool->list, entries)
3689 pp->nr++;
3690 break;
3691 }
3692
3693 case DIOCGETADDR: {
3694 u_int32_t nr = 0;
3695
3696 pp->anchor[sizeof (pp->anchor) - 1] = '\0';
3697 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
3698 pp->r_num, 0, 1, 1);
3699 if (pool == NULL) {
3700 error = EBUSY;
3701 break;
3702 }
3703 pa = TAILQ_FIRST(&pool->list);
3704 while ((pa != NULL) && (nr < pp->nr)) {
3705 pa = TAILQ_NEXT(pa, entries);
3706 nr++;
3707 }
3708 if (pa == NULL) {
3709 error = EBUSY;
3710 break;
3711 }
3712 pf_pooladdr_copyout(pa, &pp->addr);
3713 pfi_dynaddr_copyout(&pp->addr.addr);
3714 pf_tbladdr_copyout(&pp->addr.addr);
3715 pf_rtlabel_copyout(&pp->addr.addr);
3716 break;
3717 }
3718
3719 case DIOCCHANGEADDR: {
3720 struct pfioc_pooladdr *pca = pp;
3721 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
3722 struct pf_ruleset *ruleset;
3723
3724 if (pca->action < PF_CHANGE_ADD_HEAD ||
3725 pca->action > PF_CHANGE_REMOVE) {
3726 error = EINVAL;
3727 break;
3728 }
3729 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
3730 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
3731 pca->addr.addr.type != PF_ADDR_TABLE) {
3732 error = EINVAL;
3733 break;
3734 }
3735
3736 pca->anchor[sizeof (pca->anchor) - 1] = '\0';
3737 ruleset = pf_find_ruleset(pca->anchor);
3738 if (ruleset == NULL) {
3739 error = EBUSY;
3740 break;
3741 }
3742 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
3743 pca->r_num, pca->r_last, 1, 1);
3744 if (pool == NULL) {
3745 error = EBUSY;
3746 break;
3747 }
3748 if (pca->action != PF_CHANGE_REMOVE) {
3749 newpa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3750 if (newpa == NULL) {
3751 error = ENOMEM;
3752 break;
3753 }
3754 pf_pooladdr_copyin(&pca->addr, newpa);
3755 #if !INET
3756 if (pca->af == AF_INET) {
3757 pool_put(&pf_pooladdr_pl, newpa);
3758 error = EAFNOSUPPORT;
3759 break;
3760 }
3761 #endif /* INET */
3762 #if !INET6
3763 if (pca->af == AF_INET6) {
3764 pool_put(&pf_pooladdr_pl, newpa);
3765 error = EAFNOSUPPORT;
3766 break;
3767 }
3768 #endif /* INET6 */
3769 if (newpa->ifname[0]) {
3770 newpa->kif = pfi_kif_get(newpa->ifname);
3771 if (newpa->kif == NULL) {
3772 pool_put(&pf_pooladdr_pl, newpa);
3773 error = EINVAL;
3774 break;
3775 }
3776 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
3777 } else
3778 newpa->kif = NULL;
3779 pf_addrwrap_setup(&newpa->addr);
3780 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
3781 pf_tbladdr_setup(ruleset, &newpa->addr)) {
3782 pfi_dynaddr_remove(&newpa->addr);
3783 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
3784 pool_put(&pf_pooladdr_pl, newpa);
3785 error = EINVAL;
3786 break;
3787 }
3788 }
3789
3790 if (pca->action == PF_CHANGE_ADD_HEAD)
3791 oldpa = TAILQ_FIRST(&pool->list);
3792 else if (pca->action == PF_CHANGE_ADD_TAIL)
3793 oldpa = TAILQ_LAST(&pool->list, pf_palist);
3794 else {
3795 int i = 0;
3796
3797 oldpa = TAILQ_FIRST(&pool->list);
3798 while ((oldpa != NULL) && (i < (int)pca->nr)) {
3799 oldpa = TAILQ_NEXT(oldpa, entries);
3800 i++;
3801 }
3802 if (oldpa == NULL) {
3803 error = EINVAL;
3804 break;
3805 }
3806 }
3807
3808 if (pca->action == PF_CHANGE_REMOVE) {
3809 TAILQ_REMOVE(&pool->list, oldpa, entries);
3810 pfi_dynaddr_remove(&oldpa->addr);
3811 pf_tbladdr_remove(&oldpa->addr);
3812 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
3813 pool_put(&pf_pooladdr_pl, oldpa);
3814 } else {
3815 if (oldpa == NULL)
3816 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
3817 else if (pca->action == PF_CHANGE_ADD_HEAD ||
3818 pca->action == PF_CHANGE_ADD_BEFORE)
3819 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
3820 else
3821 TAILQ_INSERT_AFTER(&pool->list, oldpa,
3822 newpa, entries);
3823 }
3824
3825 pool->cur = TAILQ_FIRST(&pool->list);
3826 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
3827 pca->af);
3828 break;
3829 }
3830
3831 default:
3832 VERIFY(0);
3833 /* NOTREACHED */
3834 }
3835
3836 return (error);
3837 }
3838
3839 static int
3840 pfioctl_ioc_ruleset(u_long cmd, struct pfioc_ruleset *pr, struct proc *p)
3841 {
3842 #pragma unused(p)
3843 int error = 0;
3844
3845 switch (cmd) {
3846 case DIOCGETRULESETS: {
3847 struct pf_ruleset *ruleset;
3848 struct pf_anchor *anchor;
3849
3850 pr->path[sizeof (pr->path) - 1] = '\0';
3851 pr->name[sizeof (pr->name) - 1] = '\0';
3852 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
3853 error = EINVAL;
3854 break;
3855 }
3856 pr->nr = 0;
3857 if (ruleset->anchor == NULL) {
3858 /* XXX kludge for pf_main_ruleset */
3859 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
3860 if (anchor->parent == NULL)
3861 pr->nr++;
3862 } else {
3863 RB_FOREACH(anchor, pf_anchor_node,
3864 &ruleset->anchor->children)
3865 pr->nr++;
3866 }
3867 break;
3868 }
3869
3870 case DIOCGETRULESET: {
3871 struct pf_ruleset *ruleset;
3872 struct pf_anchor *anchor;
3873 u_int32_t nr = 0;
3874
3875 pr->path[sizeof (pr->path) - 1] = '\0';
3876 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
3877 error = EINVAL;
3878 break;
3879 }
3880 pr->name[0] = 0;
3881 if (ruleset->anchor == NULL) {
3882 /* XXX kludge for pf_main_ruleset */
3883 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
3884 if (anchor->parent == NULL && nr++ == pr->nr) {
3885 strlcpy(pr->name, anchor->name,
3886 sizeof (pr->name));
3887 break;
3888 }
3889 } else {
3890 RB_FOREACH(anchor, pf_anchor_node,
3891 &ruleset->anchor->children)
3892 if (nr++ == pr->nr) {
3893 strlcpy(pr->name, anchor->name,
3894 sizeof (pr->name));
3895 break;
3896 }
3897 }
3898 if (!pr->name[0])
3899 error = EBUSY;
3900 break;
3901 }
3902
3903 default:
3904 VERIFY(0);
3905 /* NOTREACHED */
3906 }
3907
3908 return (error);
3909 }
3910
3911 static int
3912 pfioctl_ioc_trans(u_long cmd, struct pfioc_trans_32 *io32,
3913 struct pfioc_trans_64 *io64, struct proc *p)
3914 {
3915 int p64 = proc_is64bit(p);
3916 int error = 0, esize, size;
3917 user_addr_t buf;
3918
3919 esize = (p64 ? io64->esize : io32->esize);
3920 size = (p64 ? io64->size : io32->size);
3921 buf = (p64 ? io64->array : io32->array);
3922
3923 switch (cmd) {
3924 case DIOCXBEGIN: {
3925 struct pfioc_trans_e *ioe;
3926 struct pfr_table *table;
3927 int i;
3928
3929 if (esize != sizeof (*ioe)) {
3930 error = ENODEV;
3931 goto fail;
3932 }
3933 ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK);
3934 table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK);
3935 for (i = 0; i < size; i++, buf += sizeof (*ioe)) {
3936 if (copyin(buf, ioe, sizeof (*ioe))) {
3937 _FREE(table, M_TEMP);
3938 _FREE(ioe, M_TEMP);
3939 error = EFAULT;
3940 goto fail;
3941 }
3942 ioe->anchor[sizeof (ioe->anchor) - 1] = '\0';
3943 switch (ioe->rs_num) {
3944 case PF_RULESET_ALTQ:
3945 break;
3946 case PF_RULESET_TABLE:
3947 bzero(table, sizeof (*table));
3948 strlcpy(table->pfrt_anchor, ioe->anchor,
3949 sizeof (table->pfrt_anchor));
3950 if ((error = pfr_ina_begin(table,
3951 &ioe->ticket, NULL, 0))) {
3952 _FREE(table, M_TEMP);
3953 _FREE(ioe, M_TEMP);
3954 goto fail;
3955 }
3956 break;
3957 default:
3958 if ((error = pf_begin_rules(&ioe->ticket,
3959 ioe->rs_num, ioe->anchor))) {
3960 _FREE(table, M_TEMP);
3961 _FREE(ioe, M_TEMP);
3962 goto fail;
3963 }
3964 break;
3965 }
3966 if (copyout(ioe, buf, sizeof (*ioe))) {
3967 _FREE(table, M_TEMP);
3968 _FREE(ioe, M_TEMP);
3969 error = EFAULT;
3970 goto fail;
3971 }
3972 }
3973 _FREE(table, M_TEMP);
3974 _FREE(ioe, M_TEMP);
3975 break;
3976 }
3977
3978 case DIOCXROLLBACK: {
3979 struct pfioc_trans_e *ioe;
3980 struct pfr_table *table;
3981 int i;
3982
3983 if (esize != sizeof (*ioe)) {
3984 error = ENODEV;
3985 goto fail;
3986 }
3987 ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK);
3988 table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK);
3989 for (i = 0; i < size; i++, buf += sizeof (*ioe)) {
3990 if (copyin(buf, ioe, sizeof (*ioe))) {
3991 _FREE(table, M_TEMP);
3992 _FREE(ioe, M_TEMP);
3993 error = EFAULT;
3994 goto fail;
3995 }
3996 ioe->anchor[sizeof (ioe->anchor) - 1] = '\0';
3997 switch (ioe->rs_num) {
3998 case PF_RULESET_ALTQ:
3999 break;
4000 case PF_RULESET_TABLE:
4001 bzero(table, sizeof (*table));
4002 strlcpy(table->pfrt_anchor, ioe->anchor,
4003 sizeof (table->pfrt_anchor));
4004 if ((error = pfr_ina_rollback(table,
4005 ioe->ticket, NULL, 0))) {
4006 _FREE(table, M_TEMP);
4007 _FREE(ioe, M_TEMP);
4008 goto fail; /* really bad */
4009 }
4010 break;
4011 default:
4012 if ((error = pf_rollback_rules(ioe->ticket,
4013 ioe->rs_num, ioe->anchor))) {
4014 _FREE(table, M_TEMP);
4015 _FREE(ioe, M_TEMP);
4016 goto fail; /* really bad */
4017 }
4018 break;
4019 }
4020 }
4021 _FREE(table, M_TEMP);
4022 _FREE(ioe, M_TEMP);
4023 break;
4024 }
4025
4026 case DIOCXCOMMIT: {
4027 struct pfioc_trans_e *ioe;
4028 struct pfr_table *table;
4029 struct pf_ruleset *rs;
4030 user_addr_t _buf = buf;
4031 int i;
4032
4033 if (esize != sizeof (*ioe)) {
4034 error = ENODEV;
4035 goto fail;
4036 }
4037 ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK);
4038 table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK);
4039 /* first makes sure everything will succeed */
4040 for (i = 0; i < size; i++, buf += sizeof (*ioe)) {
4041 if (copyin(buf, ioe, sizeof (*ioe))) {
4042 _FREE(table, M_TEMP);
4043 _FREE(ioe, M_TEMP);
4044 error = EFAULT;
4045 goto fail;
4046 }
4047 ioe->anchor[sizeof (ioe->anchor) - 1] = '\0';
4048 switch (ioe->rs_num) {
4049 case PF_RULESET_ALTQ:
4050 break;
4051 case PF_RULESET_TABLE:
4052 rs = pf_find_ruleset(ioe->anchor);
4053 if (rs == NULL || !rs->topen || ioe->ticket !=
4054 rs->tticket) {
4055 _FREE(table, M_TEMP);
4056 _FREE(ioe, M_TEMP);
4057 error = EBUSY;
4058 goto fail;
4059 }
4060 break;
4061 default:
4062 if (ioe->rs_num < 0 || ioe->rs_num >=
4063 PF_RULESET_MAX) {
4064 _FREE(table, M_TEMP);
4065 _FREE(ioe, M_TEMP);
4066 error = EINVAL;
4067 goto fail;
4068 }
4069 rs = pf_find_ruleset(ioe->anchor);
4070 if (rs == NULL ||
4071 !rs->rules[ioe->rs_num].inactive.open ||
4072 rs->rules[ioe->rs_num].inactive.ticket !=
4073 ioe->ticket) {
4074 _FREE(table, M_TEMP);
4075 _FREE(ioe, M_TEMP);
4076 error = EBUSY;
4077 goto fail;
4078 }
4079 break;
4080 }
4081 }
4082 buf = _buf;
4083 /* now do the commit - no errors should happen here */
4084 for (i = 0; i < size; i++, buf += sizeof (*ioe)) {
4085 if (copyin(buf, ioe, sizeof (*ioe))) {
4086 _FREE(table, M_TEMP);
4087 _FREE(ioe, M_TEMP);
4088 error = EFAULT;
4089 goto fail;
4090 }
4091 ioe->anchor[sizeof (ioe->anchor) - 1] = '\0';
4092 switch (ioe->rs_num) {
4093 case PF_RULESET_ALTQ:
4094 break;
4095 case PF_RULESET_TABLE:
4096 bzero(table, sizeof (*table));
4097 strlcpy(table->pfrt_anchor, ioe->anchor,
4098 sizeof (table->pfrt_anchor));
4099 if ((error = pfr_ina_commit(table, ioe->ticket,
4100 NULL, NULL, 0))) {
4101 _FREE(table, M_TEMP);
4102 _FREE(ioe, M_TEMP);
4103 goto fail; /* really bad */
4104 }
4105 break;
4106 default:
4107 if ((error = pf_commit_rules(ioe->ticket,
4108 ioe->rs_num, ioe->anchor))) {
4109 _FREE(table, M_TEMP);
4110 _FREE(ioe, M_TEMP);
4111 goto fail; /* really bad */
4112 }
4113 break;
4114 }
4115 }
4116 _FREE(table, M_TEMP);
4117 _FREE(ioe, M_TEMP);
4118 break;
4119 }
4120
4121 default:
4122 VERIFY(0);
4123 /* NOTREACHED */
4124 }
4125 fail:
4126 return (error);
4127 }
4128
4129 static int
4130 pfioctl_ioc_src_nodes(u_long cmd, struct pfioc_src_nodes_32 *psn32,
4131 struct pfioc_src_nodes_64 *psn64, struct proc *p)
4132 {
4133 int p64 = proc_is64bit(p);
4134 int error = 0;
4135
4136 switch (cmd) {
4137 case DIOCGETSRCNODES: {
4138 struct pf_src_node *n, *pstore;
4139 user_addr_t buf;
4140 u_int32_t nr = 0;
4141 int space, size;
4142
4143 space = (p64 ? psn64->psn_len : psn32->psn_len);
4144 if (space == 0) {
4145 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
4146 nr++;
4147
4148 size = sizeof (struct pf_src_node) * nr;
4149 if (p64)
4150 psn64->psn_len = size;
4151 else
4152 psn32->psn_len = size;
4153 break;
4154 }
4155
4156 pstore = _MALLOC(sizeof (*pstore), M_TEMP, M_WAITOK);
4157 if (pstore == NULL) {
4158 error = ENOMEM;
4159 break;
4160 }
4161 buf = (p64 ? psn64->psn_buf : psn32->psn_buf);
4162
4163 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
4164 uint64_t secs = pf_time_second(), diff;
4165
4166 if ((nr + 1) * sizeof (*pstore) > (unsigned)space)
4167 break;
4168
4169 bcopy(n, pstore, sizeof (*pstore));
4170 if (n->rule.ptr != NULL)
4171 pstore->rule.nr = n->rule.ptr->nr;
4172 pstore->creation = secs - pstore->creation;
4173 if (pstore->expire > secs)
4174 pstore->expire -= secs;
4175 else
4176 pstore->expire = 0;
4177
4178 /* adjust the connection rate estimate */
4179 diff = secs - n->conn_rate.last;
4180 if (diff >= n->conn_rate.seconds)
4181 pstore->conn_rate.count = 0;
4182 else
4183 pstore->conn_rate.count -=
4184 n->conn_rate.count * diff /
4185 n->conn_rate.seconds;
4186
4187 _RB_PARENT(pstore, entry) = NULL;
4188 RB_LEFT(pstore, entry) = RB_RIGHT(pstore, entry) = NULL;
4189 pstore->kif = NULL;
4190
4191 error = copyout(pstore, buf, sizeof (*pstore));
4192 if (error) {
4193 _FREE(pstore, M_TEMP);
4194 goto fail;
4195 }
4196 buf += sizeof (*pstore);
4197 nr++;
4198 }
4199
4200 size = sizeof (struct pf_src_node) * nr;
4201 if (p64)
4202 psn64->psn_len = size;
4203 else
4204 psn32->psn_len = size;
4205
4206 _FREE(pstore, M_TEMP);
4207 break;
4208 }
4209
4210 default:
4211 VERIFY(0);
4212 /* NOTREACHED */
4213 }
4214 fail:
4215 return (error);
4216
4217 }
4218
4219 static int
4220 pfioctl_ioc_src_node_kill(u_long cmd, struct pfioc_src_node_kill *psnk,
4221 struct proc *p)
4222 {
4223 #pragma unused(p)
4224 int error = 0;
4225
4226 switch (cmd) {
4227 case DIOCKILLSRCNODES: {
4228 struct pf_src_node *sn;
4229 struct pf_state *s;
4230 int killed = 0;
4231
4232 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
4233 if (PF_MATCHA(psnk->psnk_src.neg,
4234 &psnk->psnk_src.addr.v.a.addr,
4235 &psnk->psnk_src.addr.v.a.mask,
4236 &sn->addr, sn->af) &&
4237 PF_MATCHA(psnk->psnk_dst.neg,
4238 &psnk->psnk_dst.addr.v.a.addr,
4239 &psnk->psnk_dst.addr.v.a.mask,
4240 &sn->raddr, sn->af)) {
4241 /* Handle state to src_node linkage */
4242 if (sn->states != 0) {
4243 RB_FOREACH(s, pf_state_tree_id,
4244 &tree_id) {
4245 if (s->src_node == sn)
4246 s->src_node = NULL;
4247 if (s->nat_src_node == sn)
4248 s->nat_src_node = NULL;
4249 }
4250 sn->states = 0;
4251 }
4252 sn->expire = 1;
4253 killed++;
4254 }
4255 }
4256
4257 if (killed > 0)
4258 pf_purge_expired_src_nodes();
4259
4260 psnk->psnk_af = killed;
4261 break;
4262 }
4263
4264 default:
4265 VERIFY(0);
4266 /* NOTREACHED */
4267 }
4268
4269 return (error);
4270 }
4271
4272 static int
4273 pfioctl_ioc_iface(u_long cmd, struct pfioc_iface_32 *io32,
4274 struct pfioc_iface_64 *io64, struct proc *p)
4275 {
4276 int p64 = proc_is64bit(p);
4277 int error = 0;
4278
4279 switch (cmd) {
4280 case DIOCIGETIFACES: {
4281 user_addr_t buf;
4282 int esize;
4283
4284 buf = (p64 ? io64->pfiio_buffer : io32->pfiio_buffer);
4285 esize = (p64 ? io64->pfiio_esize : io32->pfiio_esize);
4286
4287 /* esize must be that of the user space version of pfi_kif */
4288 if (esize != sizeof (struct pfi_uif)) {
4289 error = ENODEV;
4290 break;
4291 }
4292 if (p64)
4293 io64->pfiio_name[sizeof (io64->pfiio_name) - 1] = '\0';
4294 else
4295 io32->pfiio_name[sizeof (io32->pfiio_name) - 1] = '\0';
4296 error = pfi_get_ifaces(
4297 p64 ? io64->pfiio_name : io32->pfiio_name, buf,
4298 p64 ? &io64->pfiio_size : &io32->pfiio_size);
4299 break;
4300 }
4301
4302 case DIOCSETIFFLAG: {
4303 if (p64)
4304 io64->pfiio_name[sizeof (io64->pfiio_name) - 1] = '\0';
4305 else
4306 io32->pfiio_name[sizeof (io32->pfiio_name) - 1] = '\0';
4307
4308 error = pfi_set_flags(
4309 p64 ? io64->pfiio_name : io32->pfiio_name,
4310 p64 ? io64->pfiio_flags : io32->pfiio_flags);
4311 break;
4312 }
4313
4314 case DIOCCLRIFFLAG: {
4315 if (p64)
4316 io64->pfiio_name[sizeof (io64->pfiio_name) - 1] = '\0';
4317 else
4318 io32->pfiio_name[sizeof (io32->pfiio_name) - 1] = '\0';
4319
4320 error = pfi_clear_flags(
4321 p64 ? io64->pfiio_name : io32->pfiio_name,
4322 p64 ? io64->pfiio_flags : io32->pfiio_flags);
4323 break;
4324 }
4325
4326 default:
4327 VERIFY(0);
4328 /* NOTREACHED */
4329 }
4330
4331 return (error);
4332 }
4333
4334 int
4335 pf_af_hook(struct ifnet *ifp, struct mbuf **mppn, struct mbuf **mp,
4336 unsigned int af, int input, struct ip_fw_args *fwa)
4337 {
4338 int error = 0;
4339 struct mbuf *nextpkt;
4340 net_thread_marks_t marks;
4341 struct ifnet * pf_ifp = ifp;
4342
4343 /* Always allow traffic on co-processor interfaces. */
4344 if (!intcoproc_unrestricted && ifp && IFNET_IS_INTCOPROC(ifp))
4345 return (0);
4346
4347 marks = net_thread_marks_push(NET_THREAD_HELD_PF);
4348
4349 if (marks != net_thread_marks_none) {
4350 lck_rw_lock_shared(pf_perim_lock);
4351 if (!pf_is_enabled)
4352 goto done;
4353 lck_mtx_lock(pf_lock);
4354 }
4355
4356 if (mppn != NULL && *mppn != NULL)
4357 VERIFY(*mppn == *mp);
4358 if ((nextpkt = (*mp)->m_nextpkt) != NULL)
4359 (*mp)->m_nextpkt = NULL;
4360
4361 /*
4362 * For packets destined to locally hosted IP address
4363 * ip_output_list sets Mbuf's pkt header's rcvif to
4364 * the interface hosting the IP address.
4365 * While on the output path ifp passed to pf_af_hook
4366 * to such local communication is the loopback interface,
4367 * the input path derives ifp from mbuf packet header's
4368 * rcvif.
4369 * This asymmetry caues issues with PF.
4370 * To handle that case, we have a limited change here to
4371 * pass interface as loopback if packets are looped in.
4372 */
4373 if (input && ((*mp)->m_pkthdr.pkt_flags & PKTF_LOOP)) {
4374 pf_ifp = lo_ifp;
4375 }
4376
4377 switch (af) {
4378 #if INET
4379 case AF_INET: {
4380 error = pf_inet_hook(pf_ifp, mp, input, fwa);
4381 break;
4382 }
4383 #endif /* INET */
4384 #if INET6
4385 case AF_INET6:
4386 error = pf_inet6_hook(pf_ifp, mp, input, fwa);
4387 break;
4388 #endif /* INET6 */
4389 default:
4390 break;
4391 }
4392
4393 /* When packet valid, link to the next packet */
4394 if (*mp != NULL && nextpkt != NULL) {
4395 struct mbuf *m = *mp;
4396 while (m->m_nextpkt != NULL)
4397 m = m->m_nextpkt;
4398 m->m_nextpkt = nextpkt;
4399 }
4400 /* Fix up linkage of previous packet in the chain */
4401 if (mppn != NULL) {
4402 if (*mp != NULL)
4403 *mppn = *mp;
4404 else
4405 *mppn = nextpkt;
4406 }
4407
4408 if (marks != net_thread_marks_none)
4409 lck_mtx_unlock(pf_lock);
4410
4411 done:
4412 if (marks != net_thread_marks_none)
4413 lck_rw_done(pf_perim_lock);
4414
4415 net_thread_marks_pop(marks);
4416 return (error);
4417 }
4418
4419
4420 #if INET
4421 static int
4422 pf_inet_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4423 struct ip_fw_args *fwa)
4424 {
4425 struct mbuf *m = *mp;
4426 #if BYTE_ORDER != BIG_ENDIAN
4427 struct ip *ip = mtod(m, struct ip *);
4428 #endif
4429 int error = 0;
4430
4431 /*
4432 * If the packet is outbound, is originated locally, is flagged for
4433 * delayed UDP/TCP checksum calculation, and is about to be processed
4434 * for an interface that doesn't support the appropriate checksum
4435 * offloading, then calculated the checksum here so that PF can adjust
4436 * it properly.
4437 */
4438 if (!input && m->m_pkthdr.rcvif == NULL) {
4439 static const int mask = CSUM_DELAY_DATA;
4440 const int flags = m->m_pkthdr.csum_flags &
4441 ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4442
4443 if (flags & mask) {
4444 in_delayed_cksum(m);
4445 m->m_pkthdr.csum_flags &= ~mask;
4446 }
4447 }
4448
4449 #if BYTE_ORDER != BIG_ENDIAN
4450 HTONS(ip->ip_len);
4451 HTONS(ip->ip_off);
4452 #endif
4453 if (pf_test_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4454 if (*mp != NULL) {
4455 m_freem(*mp);
4456 *mp = NULL;
4457 error = EHOSTUNREACH;
4458 } else {
4459 error = ENOBUFS;
4460 }
4461 }
4462 #if BYTE_ORDER != BIG_ENDIAN
4463 else {
4464 if (*mp != NULL) {
4465 ip = mtod(*mp, struct ip *);
4466 NTOHS(ip->ip_len);
4467 NTOHS(ip->ip_off);
4468 }
4469 }
4470 #endif
4471 return (error);
4472 }
4473 #endif /* INET */
4474
4475 #if INET6
4476 int
4477 pf_inet6_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4478 struct ip_fw_args *fwa)
4479 {
4480 int error = 0;
4481
4482 /*
4483 * If the packet is outbound, is originated locally, is flagged for
4484 * delayed UDP/TCP checksum calculation, and is about to be processed
4485 * for an interface that doesn't support the appropriate checksum
4486 * offloading, then calculated the checksum here so that PF can adjust
4487 * it properly.
4488 */
4489 if (!input && (*mp)->m_pkthdr.rcvif == NULL) {
4490 static const int mask = CSUM_DELAY_IPV6_DATA;
4491 const int flags = (*mp)->m_pkthdr.csum_flags &
4492 ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4493
4494 if (flags & mask) {
4495 /*
4496 * Checksum offload should not have been enabled
4497 * when extension headers exist, thus 0 for optlen.
4498 */
4499 in6_delayed_cksum(*mp);
4500 (*mp)->m_pkthdr.csum_flags &= ~mask;
4501 }
4502 }
4503
4504 if (pf_test6_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4505 if (*mp != NULL) {
4506 m_freem(*mp);
4507 *mp = NULL;
4508 error = EHOSTUNREACH;
4509 } else {
4510 error = ENOBUFS;
4511 }
4512 }
4513 return (error);
4514 }
4515 #endif /* INET6 */
4516
4517 int
4518 pf_ifaddr_hook(struct ifnet *ifp)
4519 {
4520 struct pfi_kif *kif = ifp->if_pf_kif;
4521
4522 if (kif != NULL) {
4523 lck_rw_lock_shared(pf_perim_lock);
4524 lck_mtx_lock(pf_lock);
4525
4526 pfi_kifaddr_update(kif);
4527
4528 lck_mtx_unlock(pf_lock);
4529 lck_rw_done(pf_perim_lock);
4530 }
4531 return (0);
4532 }
4533
4534 /*
4535 * Caller acquires dlil lock as writer (exclusive)
4536 */
4537 void
4538 pf_ifnet_hook(struct ifnet *ifp, int attach)
4539 {
4540 lck_rw_lock_shared(pf_perim_lock);
4541 lck_mtx_lock(pf_lock);
4542 if (attach)
4543 pfi_attach_ifnet(ifp);
4544 else
4545 pfi_detach_ifnet(ifp);
4546 lck_mtx_unlock(pf_lock);
4547 lck_rw_done(pf_perim_lock);
4548 }
4549
4550 static void
4551 pf_attach_hooks(void)
4552 {
4553 ifnet_head_lock_shared();
4554 /*
4555 * Check against ifnet_addrs[] before proceeding, in case this
4556 * is called very early on, e.g. during dlil_init() before any
4557 * network interface is attached.
4558 */
4559 if (ifnet_addrs != NULL) {
4560 int i;
4561
4562 for (i = 0; i <= if_index; i++) {
4563 struct ifnet *ifp = ifindex2ifnet[i];
4564 if (ifp != NULL) {
4565 pfi_attach_ifnet(ifp);
4566 }
4567 }
4568 }
4569 ifnet_head_done();
4570 }
4571
4572 #if 0
4573 /* currently unused along with pfdetach() */
4574 static void
4575 pf_detach_hooks(void)
4576 {
4577 ifnet_head_lock_shared();
4578 if (ifnet_addrs != NULL) {
4579 for (i = 0; i <= if_index; i++) {
4580 int i;
4581
4582 struct ifnet *ifp = ifindex2ifnet[i];
4583 if (ifp != NULL && ifp->if_pf_kif != NULL) {
4584 pfi_detach_ifnet(ifp);
4585 }
4586 }
4587 }
4588 ifnet_head_done();
4589 }
4590 #endif
4591
4592 /*
4593 * 'D' group ioctls.
4594 *
4595 * The switch statement below does nothing at runtime, as it serves as a
4596 * compile time check to ensure that all of the socket 'D' ioctls (those
4597 * in the 'D' group going thru soo_ioctl) that are made available by the
4598 * networking stack is unique. This works as long as this routine gets
4599 * updated each time a new interface ioctl gets added.
4600 *
4601 * Any failures at compile time indicates duplicated ioctl values.
4602 */
4603 static __attribute__((unused)) void
4604 pfioctl_cassert(void)
4605 {
4606 /*
4607 * This is equivalent to _CASSERT() and the compiler wouldn't
4608 * generate any instructions, thus for compile time only.
4609 */
4610 switch ((u_long)0) {
4611 case 0:
4612
4613 /* bsd/net/pfvar.h */
4614 case DIOCSTART:
4615 case DIOCSTOP:
4616 case DIOCADDRULE:
4617 case DIOCGETSTARTERS:
4618 case DIOCGETRULES:
4619 case DIOCGETRULE:
4620 case DIOCSTARTREF:
4621 case DIOCSTOPREF:
4622 case DIOCCLRSTATES:
4623 case DIOCGETSTATE:
4624 case DIOCSETSTATUSIF:
4625 case DIOCGETSTATUS:
4626 case DIOCCLRSTATUS:
4627 case DIOCNATLOOK:
4628 case DIOCSETDEBUG:
4629 case DIOCGETSTATES:
4630 case DIOCCHANGERULE:
4631 case DIOCINSERTRULE:
4632 case DIOCDELETERULE:
4633 case DIOCSETTIMEOUT:
4634 case DIOCGETTIMEOUT:
4635 case DIOCADDSTATE:
4636 case DIOCCLRRULECTRS:
4637 case DIOCGETLIMIT:
4638 case DIOCSETLIMIT:
4639 case DIOCKILLSTATES:
4640 case DIOCSTARTALTQ:
4641 case DIOCSTOPALTQ:
4642 case DIOCADDALTQ:
4643 case DIOCGETALTQS:
4644 case DIOCGETALTQ:
4645 case DIOCCHANGEALTQ:
4646 case DIOCGETQSTATS:
4647 case DIOCBEGINADDRS:
4648 case DIOCADDADDR:
4649 case DIOCGETADDRS:
4650 case DIOCGETADDR:
4651 case DIOCCHANGEADDR:
4652 case DIOCGETRULESETS:
4653 case DIOCGETRULESET:
4654 case DIOCRCLRTABLES:
4655 case DIOCRADDTABLES:
4656 case DIOCRDELTABLES:
4657 case DIOCRGETTABLES:
4658 case DIOCRGETTSTATS:
4659 case DIOCRCLRTSTATS:
4660 case DIOCRCLRADDRS:
4661 case DIOCRADDADDRS:
4662 case DIOCRDELADDRS:
4663 case DIOCRSETADDRS:
4664 case DIOCRGETADDRS:
4665 case DIOCRGETASTATS:
4666 case DIOCRCLRASTATS:
4667 case DIOCRTSTADDRS:
4668 case DIOCRSETTFLAGS:
4669 case DIOCRINADEFINE:
4670 case DIOCOSFPFLUSH:
4671 case DIOCOSFPADD:
4672 case DIOCOSFPGET:
4673 case DIOCXBEGIN:
4674 case DIOCXCOMMIT:
4675 case DIOCXROLLBACK:
4676 case DIOCGETSRCNODES:
4677 case DIOCCLRSRCNODES:
4678 case DIOCSETHOSTID:
4679 case DIOCIGETIFACES:
4680 case DIOCSETIFFLAG:
4681 case DIOCCLRIFFLAG:
4682 case DIOCKILLSRCNODES:
4683 case DIOCGIFSPEED:
4684 ;
4685 }
4686 }