]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/pf_ioctl.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / bsd / net / pf_ioctl.c
1 /*
2 * Copyright (c) 2007-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */
30 /* $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
31
32 /*
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * - Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * - Redistributions in binary form must reproduce the above
44 * copyright notice, this list of conditions and the following
45 * disclaimer in the documentation and/or other materials provided
46 * with the distribution.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 *
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
64 *
65 */
66
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/mbuf.h>
71 #include <sys/filio.h>
72 #include <sys/fcntl.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/kernel.h>
76 #include <sys/time.h>
77 #include <sys/proc_internal.h>
78 #include <sys/malloc.h>
79 #include <sys/kauth.h>
80 #include <sys/conf.h>
81 #include <sys/mcache.h>
82 #include <sys/queue.h>
83 #include <os/log.h>
84
85 #include <mach/vm_param.h>
86
87 #include <net/dlil.h>
88 #include <net/if.h>
89 #include <net/if_types.h>
90 #include <net/net_api_stats.h>
91 #include <net/route.h>
92
93 #include <netinet/in.h>
94 #include <netinet/in_var.h>
95 #include <netinet/in_systm.h>
96 #include <netinet/ip.h>
97 #include <netinet/ip_var.h>
98 #include <netinet/ip_icmp.h>
99 #include <netinet/if_ether.h>
100
101 #if DUMMYNET
102 #include <netinet/ip_dummynet.h>
103 #else
104 struct ip_fw_args;
105 #endif /* DUMMYNET */
106
107 #include <libkern/crypto/md5.h>
108
109 #include <machine/machine_routines.h>
110
111 #include <miscfs/devfs/devfs.h>
112
113 #include <net/pfvar.h>
114
115 #if NPFSYNC
116 #include <net/if_pfsync.h>
117 #endif /* NPFSYNC */
118
119 #if PFLOG
120 #include <net/if_pflog.h>
121 #endif /* PFLOG */
122
123 #if INET6
124 #include <netinet/ip6.h>
125 #include <netinet/in_pcb.h>
126 #endif /* INET6 */
127
128 #include <dev/random/randomdev.h>
129
130 #if 0
131 static void pfdetach(void);
132 #endif
133 static int pfopen(dev_t, int, int, struct proc *);
134 static int pfclose(dev_t, int, int, struct proc *);
135 static int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
136 static int pfioctl_ioc_table(u_long, struct pfioc_table_32 *,
137 struct pfioc_table_64 *, struct proc *);
138 static int pfioctl_ioc_tokens(u_long, struct pfioc_tokens_32 *,
139 struct pfioc_tokens_64 *, struct proc *);
140 static int pfioctl_ioc_rule(u_long, int, struct pfioc_rule *, struct proc *);
141 static int pfioctl_ioc_state_kill(u_long, struct pfioc_state_kill *,
142 struct proc *);
143 static int pfioctl_ioc_state(u_long, struct pfioc_state *, struct proc *);
144 static int pfioctl_ioc_states(u_long, struct pfioc_states_32 *,
145 struct pfioc_states_64 *, struct proc *);
146 static int pfioctl_ioc_natlook(u_long, struct pfioc_natlook *, struct proc *);
147 static int pfioctl_ioc_tm(u_long, struct pfioc_tm *, struct proc *);
148 static int pfioctl_ioc_limit(u_long, struct pfioc_limit *, struct proc *);
149 static int pfioctl_ioc_pooladdr(u_long, struct pfioc_pooladdr *, struct proc *);
150 static int pfioctl_ioc_ruleset(u_long, struct pfioc_ruleset *, struct proc *);
151 static int pfioctl_ioc_trans(u_long, struct pfioc_trans_32 *,
152 struct pfioc_trans_64 *, struct proc *);
153 static int pfioctl_ioc_src_nodes(u_long, struct pfioc_src_nodes_32 *,
154 struct pfioc_src_nodes_64 *, struct proc *);
155 static int pfioctl_ioc_src_node_kill(u_long, struct pfioc_src_node_kill *,
156 struct proc *);
157 static int pfioctl_ioc_iface(u_long, struct pfioc_iface_32 *,
158 struct pfioc_iface_64 *, struct proc *);
159 static struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
160 u_int8_t, u_int8_t, u_int8_t);
161 static void pf_mv_pool(struct pf_palist *, struct pf_palist *);
162 static void pf_empty_pool(struct pf_palist *);
163 static int pf_begin_rules(u_int32_t *, int, const char *);
164 static int pf_rollback_rules(u_int32_t, int, char *);
165 static int pf_setup_pfsync_matching(struct pf_ruleset *);
166 static void pf_hash_rule(MD5_CTX *, struct pf_rule *);
167 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *, u_int8_t);
168 static int pf_commit_rules(u_int32_t, int, char *);
169 static void pf_rule_copyin(struct pf_rule *, struct pf_rule *, struct proc *,
170 int);
171 static void pf_rule_copyout(struct pf_rule *, struct pf_rule *);
172 static void pf_state_export(struct pfsync_state *, struct pf_state_key *,
173 struct pf_state *);
174 static void pf_state_import(struct pfsync_state *, struct pf_state_key *,
175 struct pf_state *);
176 static void pf_pooladdr_copyin(struct pf_pooladdr *, struct pf_pooladdr *);
177 static void pf_pooladdr_copyout(struct pf_pooladdr *, struct pf_pooladdr *);
178 static void pf_expire_states_and_src_nodes(struct pf_rule *);
179 static void pf_delete_rule_from_ruleset(struct pf_ruleset *,
180 int, struct pf_rule *);
181 static void pf_addrwrap_setup(struct pf_addr_wrap *);
182 static int pf_rule_setup(struct pfioc_rule *, struct pf_rule *,
183 struct pf_ruleset *);
184 static void pf_delete_rule_by_owner(char *, u_int32_t);
185 static int pf_delete_rule_by_ticket(struct pfioc_rule *, u_int32_t);
186 static void pf_ruleset_cleanup(struct pf_ruleset *, int);
187 static void pf_deleterule_anchor_step_out(struct pf_ruleset **,
188 int, struct pf_rule **);
189
190 #define PF_CDEV_MAJOR (-1)
191
192 static struct cdevsw pf_cdevsw = {
193 .d_open = pfopen,
194 .d_close = pfclose,
195 .d_read = eno_rdwrt,
196 .d_write = eno_rdwrt,
197 .d_ioctl = pfioctl,
198 .d_stop = eno_stop,
199 .d_reset = eno_reset,
200 .d_ttys = NULL,
201 .d_select = eno_select,
202 .d_mmap = eno_mmap,
203 .d_strategy = eno_strat,
204 .d_reserved_1 = eno_getc,
205 .d_reserved_2 = eno_putc,
206 .d_type = 0
207 };
208
209 static void pf_attach_hooks(void);
210 #if 0
211 /* currently unused along with pfdetach() */
212 static void pf_detach_hooks(void);
213 #endif
214
215 /*
216 * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer,
217 * and used in pf_af_hook() for performance optimization, such that packets
218 * will enter pf_test() or pf_test6() only when PF is running.
219 */
220 int pf_is_enabled = 0;
221
222 u_int32_t pf_hash_seed;
223 int16_t pf_nat64_configured = 0;
224
225 /*
226 * These are the pf enabled reference counting variables
227 */
228 #define NR_TOKENS_LIMIT (INT_MAX / sizeof(struct pfioc_token))
229
230 static u_int64_t pf_enabled_ref_count;
231 static u_int32_t nr_tokens = 0;
232 static u_int64_t pffwrules;
233 static u_int32_t pfdevcnt;
234
235 SLIST_HEAD(list_head, pfioc_kernel_token);
236 static struct list_head token_list_head;
237
238 struct pf_rule pf_default_rule;
239
240 #define TAGID_MAX 50000
241 static TAILQ_HEAD(pf_tags, pf_tagname) pf_tags =
242 TAILQ_HEAD_INITIALIZER(pf_tags);
243
244 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
245 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
246 #endif
247 static u_int16_t tagname2tag(struct pf_tags *, char *);
248 static void tag2tagname(struct pf_tags *, u_int16_t, char *);
249 static void tag_unref(struct pf_tags *, u_int16_t);
250 static int pf_rtlabel_add(struct pf_addr_wrap *);
251 static void pf_rtlabel_remove(struct pf_addr_wrap *);
252 static void pf_rtlabel_copyout(struct pf_addr_wrap *);
253
254 #if INET
255 static int pf_inet_hook(struct ifnet *, struct mbuf **, int,
256 struct ip_fw_args *);
257 #endif /* INET */
258 #if INET6
259 static int pf_inet6_hook(struct ifnet *, struct mbuf **, int,
260 struct ip_fw_args *);
261 #endif /* INET6 */
262
263 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
264
265 /*
266 * Helper macros for ioctl structures which vary in size (32-bit vs. 64-bit)
267 */
268 #define PFIOCX_STRUCT_DECL(s) \
269 struct { \
270 union { \
271 struct s##_32 _s##_32; \
272 struct s##_64 _s##_64; \
273 } _u; \
274 } *s##_un = NULL \
275
276 #define PFIOCX_STRUCT_BEGIN(a, s, _action) { \
277 VERIFY(s##_un == NULL); \
278 s##_un = _MALLOC(sizeof (*s##_un), M_TEMP, M_WAITOK|M_ZERO); \
279 if (s##_un == NULL) { \
280 _action \
281 } else { \
282 if (p64) \
283 bcopy(a, &s##_un->_u._s##_64, \
284 sizeof (struct s##_64)); \
285 else \
286 bcopy(a, &s##_un->_u._s##_32, \
287 sizeof (struct s##_32)); \
288 } \
289 }
290
291 #define PFIOCX_STRUCT_END(s, a) { \
292 VERIFY(s##_un != NULL); \
293 if (p64) \
294 bcopy(&s##_un->_u._s##_64, a, sizeof (struct s##_64)); \
295 else \
296 bcopy(&s##_un->_u._s##_32, a, sizeof (struct s##_32)); \
297 _FREE(s##_un, M_TEMP); \
298 s##_un = NULL; \
299 }
300
301 #define PFIOCX_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
302 #define PFIOCX_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
303
304 /*
305 * Helper macros for regular ioctl structures.
306 */
307 #define PFIOC_STRUCT_BEGIN(a, v, _action) { \
308 VERIFY((v) == NULL); \
309 (v) = _MALLOC(sizeof (*(v)), M_TEMP, M_WAITOK|M_ZERO); \
310 if ((v) == NULL) { \
311 _action \
312 } else { \
313 bcopy(a, v, sizeof (*(v))); \
314 } \
315 }
316
317 #define PFIOC_STRUCT_END(v, a) { \
318 VERIFY((v) != NULL); \
319 bcopy(v, a, sizeof (*(v))); \
320 _FREE(v, M_TEMP); \
321 (v) = NULL; \
322 }
323
324 #define PFIOC_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
325 #define PFIOC_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
326
327 static lck_attr_t *pf_perim_lock_attr;
328 static lck_grp_t *pf_perim_lock_grp;
329 static lck_grp_attr_t *pf_perim_lock_grp_attr;
330
331 static lck_attr_t *pf_lock_attr;
332 static lck_grp_t *pf_lock_grp;
333 static lck_grp_attr_t *pf_lock_grp_attr;
334
335 struct thread *pf_purge_thread;
336
337 extern void pfi_kifaddr_update(void *);
338
339 /* pf enable ref-counting helper functions */
340 static u_int64_t generate_token(struct proc *);
341 static int remove_token(struct pfioc_remove_token *);
342 static void invalidate_all_tokens(void);
343
344 static u_int64_t
345 generate_token(struct proc *p)
346 {
347 u_int64_t token_value;
348 struct pfioc_kernel_token *new_token;
349
350 if (nr_tokens + 1 > NR_TOKENS_LIMIT) {
351 os_log_error(OS_LOG_DEFAULT, "%s: NR_TOKENS_LIMIT reached", __func__);
352 return 0;
353 }
354
355 new_token = _MALLOC(sizeof(struct pfioc_kernel_token), M_TEMP,
356 M_WAITOK | M_ZERO);
357
358 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
359
360 if (new_token == NULL) {
361 /* malloc failed! bail! */
362 os_log_error(OS_LOG_DEFAULT, "%s: unable to allocate pf token structure!", __func__);
363 return 0;
364 }
365
366 token_value = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)new_token);
367
368 new_token->token.token_value = token_value;
369 new_token->token.pid = proc_pid(p);
370 proc_name(new_token->token.pid, new_token->token.proc_name,
371 sizeof(new_token->token.proc_name));
372 new_token->token.timestamp = pf_calendar_time_second();
373
374 SLIST_INSERT_HEAD(&token_list_head, new_token, next);
375 nr_tokens++;
376
377 return token_value;
378 }
379
380 static int
381 remove_token(struct pfioc_remove_token *tok)
382 {
383 struct pfioc_kernel_token *entry, *tmp;
384
385 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
386
387 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
388 if (tok->token_value == entry->token.token_value) {
389 SLIST_REMOVE(&token_list_head, entry,
390 pfioc_kernel_token, next);
391 _FREE(entry, M_TEMP);
392 nr_tokens--;
393 return 0; /* success */
394 }
395 }
396
397 printf("pf : remove failure\n");
398 return ESRCH; /* failure */
399 }
400
401 static void
402 invalidate_all_tokens(void)
403 {
404 struct pfioc_kernel_token *entry, *tmp;
405
406 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
407
408 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
409 SLIST_REMOVE(&token_list_head, entry, pfioc_kernel_token, next);
410 _FREE(entry, M_TEMP);
411 }
412
413 nr_tokens = 0;
414 }
415
416 void
417 pfinit(void)
418 {
419 u_int32_t *t = pf_default_rule.timeout;
420 int maj;
421
422 pf_perim_lock_grp_attr = lck_grp_attr_alloc_init();
423 pf_perim_lock_grp = lck_grp_alloc_init("pf_perim",
424 pf_perim_lock_grp_attr);
425 pf_perim_lock_attr = lck_attr_alloc_init();
426 lck_rw_init(pf_perim_lock, pf_perim_lock_grp, pf_perim_lock_attr);
427
428 pf_lock_grp_attr = lck_grp_attr_alloc_init();
429 pf_lock_grp = lck_grp_alloc_init("pf", pf_lock_grp_attr);
430 pf_lock_attr = lck_attr_alloc_init();
431 lck_mtx_init(pf_lock, pf_lock_grp, pf_lock_attr);
432
433 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
434 NULL);
435 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
436 "pfsrctrpl", NULL);
437 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
438 NULL);
439 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0,
440 "pfstatekeypl", NULL);
441 pool_init(&pf_app_state_pl, sizeof(struct pf_app_state), 0, 0, 0,
442 "pfappstatepl", NULL);
443 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
444 "pfpooladdrpl", NULL);
445 pfr_initialize();
446 pfi_initialize();
447 pf_osfp_initialize();
448
449 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
450 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
451
452 if (max_mem <= 256 * 1024 * 1024) {
453 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
454 PFR_KENTRY_HIWAT_SMALL;
455 }
456
457 RB_INIT(&tree_src_tracking);
458 RB_INIT(&pf_anchors);
459 pf_init_ruleset(&pf_main_ruleset);
460 TAILQ_INIT(&pf_pabuf);
461 TAILQ_INIT(&state_list);
462
463 _CASSERT((SC_BE & SCIDX_MASK) == SCIDX_BE);
464 _CASSERT((SC_BK_SYS & SCIDX_MASK) == SCIDX_BK_SYS);
465 _CASSERT((SC_BK & SCIDX_MASK) == SCIDX_BK);
466 _CASSERT((SC_RD & SCIDX_MASK) == SCIDX_RD);
467 _CASSERT((SC_OAM & SCIDX_MASK) == SCIDX_OAM);
468 _CASSERT((SC_AV & SCIDX_MASK) == SCIDX_AV);
469 _CASSERT((SC_RV & SCIDX_MASK) == SCIDX_RV);
470 _CASSERT((SC_VI & SCIDX_MASK) == SCIDX_VI);
471 _CASSERT((SC_SIG & SCIDX_MASK) == SCIDX_SIG);
472 _CASSERT((SC_VO & SCIDX_MASK) == SCIDX_VO);
473 _CASSERT((SC_CTL & SCIDX_MASK) == SCIDX_CTL);
474
475 /* default rule should never be garbage collected */
476 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
477 pf_default_rule.action = PF_PASS;
478 pf_default_rule.nr = -1;
479 pf_default_rule.rtableid = IFSCOPE_NONE;
480
481 /* initialize default timeouts */
482 t[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
483 t[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
484 t[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
485 t[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
486 t[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
487 t[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
488 t[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
489 t[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
490 t[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
491 t[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
492 t[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
493 t[PFTM_GREv1_FIRST_PACKET] = PFTM_GREv1_FIRST_PACKET_VAL;
494 t[PFTM_GREv1_INITIATING] = PFTM_GREv1_INITIATING_VAL;
495 t[PFTM_GREv1_ESTABLISHED] = PFTM_GREv1_ESTABLISHED_VAL;
496 t[PFTM_ESP_FIRST_PACKET] = PFTM_ESP_FIRST_PACKET_VAL;
497 t[PFTM_ESP_INITIATING] = PFTM_ESP_INITIATING_VAL;
498 t[PFTM_ESP_ESTABLISHED] = PFTM_ESP_ESTABLISHED_VAL;
499 t[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
500 t[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
501 t[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
502 t[PFTM_FRAG] = PFTM_FRAG_VAL;
503 t[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
504 t[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
505 t[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
506 t[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
507 t[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
508
509 pf_normalize_init();
510 bzero(&pf_status, sizeof(pf_status));
511 pf_status.debug = PF_DEBUG_URGENT;
512 pf_hash_seed = RandomULong();
513
514 /* XXX do our best to avoid a conflict */
515 pf_status.hostid = random();
516
517 if (kernel_thread_start(pf_purge_thread_fn, NULL,
518 &pf_purge_thread) != 0) {
519 printf("%s: unable to start purge thread!", __func__);
520 return;
521 }
522
523 maj = cdevsw_add(PF_CDEV_MAJOR, &pf_cdevsw);
524 if (maj == -1) {
525 printf("%s: failed to allocate major number!\n", __func__);
526 return;
527 }
528 (void) devfs_make_node(makedev(maj, PFDEV_PF), DEVFS_CHAR,
529 UID_ROOT, GID_WHEEL, 0600, "pf", 0);
530
531 (void) devfs_make_node(makedev(maj, PFDEV_PFM), DEVFS_CHAR,
532 UID_ROOT, GID_WHEEL, 0600, "pfm", 0);
533
534 pf_attach_hooks();
535 #if DUMMYNET
536 dummynet_init();
537 #endif
538 }
539
540 #if 0
541 static void
542 pfdetach(void)
543 {
544 struct pf_anchor *anchor;
545 struct pf_state *state;
546 struct pf_src_node *node;
547 struct pfioc_table pt;
548 u_int32_t ticket;
549 int i;
550 char r = '\0';
551
552 pf_detach_hooks();
553
554 pf_status.running = 0;
555 wakeup(pf_purge_thread_fn);
556
557 /* clear the rulesets */
558 for (i = 0; i < PF_RULESET_MAX; i++) {
559 if (pf_begin_rules(&ticket, i, &r) == 0) {
560 pf_commit_rules(ticket, i, &r);
561 }
562 }
563
564 /* clear states */
565 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
566 state->timeout = PFTM_PURGE;
567 #if NPFSYNC
568 state->sync_flags = PFSTATE_NOSYNC;
569 #endif
570 }
571 pf_purge_expired_states(pf_status.states);
572
573 #if NPFSYNC
574 pfsync_clear_states(pf_status.hostid, NULL);
575 #endif
576
577 /* clear source nodes */
578 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
579 state->src_node = NULL;
580 state->nat_src_node = NULL;
581 }
582 RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
583 node->expire = 1;
584 node->states = 0;
585 }
586 pf_purge_expired_src_nodes();
587
588 /* clear tables */
589 memset(&pt, '\0', sizeof(pt));
590 pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
591
592 /* destroy anchors */
593 while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
594 for (i = 0; i < PF_RULESET_MAX; i++) {
595 if (pf_begin_rules(&ticket, i, anchor->name) == 0) {
596 pf_commit_rules(ticket, i, anchor->name);
597 }
598 }
599 }
600
601 /* destroy main ruleset */
602 pf_remove_if_empty_ruleset(&pf_main_ruleset);
603
604 /* destroy the pools */
605 pool_destroy(&pf_pooladdr_pl);
606 pool_destroy(&pf_state_pl);
607 pool_destroy(&pf_rule_pl);
608 pool_destroy(&pf_src_tree_pl);
609
610 /* destroy subsystems */
611 pf_normalize_destroy();
612 pf_osfp_destroy();
613 pfr_destroy();
614 pfi_destroy();
615 }
616 #endif
617
618 static int
619 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
620 {
621 #pragma unused(flags, fmt, p)
622 if (minor(dev) >= PFDEV_MAX) {
623 return ENXIO;
624 }
625
626 if (minor(dev) == PFDEV_PFM) {
627 lck_mtx_lock(pf_lock);
628 if (pfdevcnt != 0) {
629 lck_mtx_unlock(pf_lock);
630 return EBUSY;
631 }
632 pfdevcnt++;
633 lck_mtx_unlock(pf_lock);
634 }
635 return 0;
636 }
637
638 static int
639 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
640 {
641 #pragma unused(flags, fmt, p)
642 if (minor(dev) >= PFDEV_MAX) {
643 return ENXIO;
644 }
645
646 if (minor(dev) == PFDEV_PFM) {
647 lck_mtx_lock(pf_lock);
648 VERIFY(pfdevcnt > 0);
649 pfdevcnt--;
650 lck_mtx_unlock(pf_lock);
651 }
652 return 0;
653 }
654
655 static struct pf_pool *
656 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
657 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
658 u_int8_t check_ticket)
659 {
660 struct pf_ruleset *ruleset;
661 struct pf_rule *rule;
662 int rs_num;
663
664 ruleset = pf_find_ruleset(anchor);
665 if (ruleset == NULL) {
666 return NULL;
667 }
668 rs_num = pf_get_ruleset_number(rule_action);
669 if (rs_num >= PF_RULESET_MAX) {
670 return NULL;
671 }
672 if (active) {
673 if (check_ticket && ticket !=
674 ruleset->rules[rs_num].active.ticket) {
675 return NULL;
676 }
677 if (r_last) {
678 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
679 pf_rulequeue);
680 } else {
681 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
682 }
683 } else {
684 if (check_ticket && ticket !=
685 ruleset->rules[rs_num].inactive.ticket) {
686 return NULL;
687 }
688 if (r_last) {
689 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
690 pf_rulequeue);
691 } else {
692 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
693 }
694 }
695 if (!r_last) {
696 while ((rule != NULL) && (rule->nr != rule_number)) {
697 rule = TAILQ_NEXT(rule, entries);
698 }
699 }
700 if (rule == NULL) {
701 return NULL;
702 }
703
704 return &rule->rpool;
705 }
706
707 static void
708 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
709 {
710 struct pf_pooladdr *mv_pool_pa;
711
712 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
713 TAILQ_REMOVE(poola, mv_pool_pa, entries);
714 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
715 }
716 }
717
718 static void
719 pf_empty_pool(struct pf_palist *poola)
720 {
721 struct pf_pooladdr *empty_pool_pa;
722
723 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
724 pfi_dynaddr_remove(&empty_pool_pa->addr);
725 pf_tbladdr_remove(&empty_pool_pa->addr);
726 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
727 TAILQ_REMOVE(poola, empty_pool_pa, entries);
728 pool_put(&pf_pooladdr_pl, empty_pool_pa);
729 }
730 }
731
732 void
733 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
734 {
735 if (rulequeue != NULL) {
736 if (rule->states <= 0) {
737 /*
738 * XXX - we need to remove the table *before* detaching
739 * the rule to make sure the table code does not delete
740 * the anchor under our feet.
741 */
742 pf_tbladdr_remove(&rule->src.addr);
743 pf_tbladdr_remove(&rule->dst.addr);
744 if (rule->overload_tbl) {
745 pfr_detach_table(rule->overload_tbl);
746 }
747 }
748 TAILQ_REMOVE(rulequeue, rule, entries);
749 rule->entries.tqe_prev = NULL;
750 rule->nr = -1;
751 }
752
753 if (rule->states > 0 || rule->src_nodes > 0 ||
754 rule->entries.tqe_prev != NULL) {
755 return;
756 }
757 pf_tag_unref(rule->tag);
758 pf_tag_unref(rule->match_tag);
759 pf_rtlabel_remove(&rule->src.addr);
760 pf_rtlabel_remove(&rule->dst.addr);
761 pfi_dynaddr_remove(&rule->src.addr);
762 pfi_dynaddr_remove(&rule->dst.addr);
763 if (rulequeue == NULL) {
764 pf_tbladdr_remove(&rule->src.addr);
765 pf_tbladdr_remove(&rule->dst.addr);
766 if (rule->overload_tbl) {
767 pfr_detach_table(rule->overload_tbl);
768 }
769 }
770 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
771 pf_anchor_remove(rule);
772 pf_empty_pool(&rule->rpool.list);
773 pool_put(&pf_rule_pl, rule);
774 }
775
776 static u_int16_t
777 tagname2tag(struct pf_tags *head, char *tagname)
778 {
779 struct pf_tagname *tag, *p = NULL;
780 u_int16_t new_tagid = 1;
781
782 TAILQ_FOREACH(tag, head, entries)
783 if (strcmp(tagname, tag->name) == 0) {
784 tag->ref++;
785 return tag->tag;
786 }
787
788 /*
789 * to avoid fragmentation, we do a linear search from the beginning
790 * and take the first free slot we find. if there is none or the list
791 * is empty, append a new entry at the end.
792 */
793
794 /* new entry */
795 if (!TAILQ_EMPTY(head)) {
796 for (p = TAILQ_FIRST(head); p != NULL &&
797 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) {
798 new_tagid = p->tag + 1;
799 }
800 }
801
802 if (new_tagid > TAGID_MAX) {
803 return 0;
804 }
805
806 /* allocate and fill new struct pf_tagname */
807 tag = _MALLOC(sizeof(*tag), M_TEMP, M_WAITOK | M_ZERO);
808 if (tag == NULL) {
809 return 0;
810 }
811 strlcpy(tag->name, tagname, sizeof(tag->name));
812 tag->tag = new_tagid;
813 tag->ref++;
814
815 if (p != NULL) { /* insert new entry before p */
816 TAILQ_INSERT_BEFORE(p, tag, entries);
817 } else { /* either list empty or no free slot in between */
818 TAILQ_INSERT_TAIL(head, tag, entries);
819 }
820
821 return tag->tag;
822 }
823
824 static void
825 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
826 {
827 struct pf_tagname *tag;
828
829 TAILQ_FOREACH(tag, head, entries)
830 if (tag->tag == tagid) {
831 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
832 return;
833 }
834 }
835
836 static void
837 tag_unref(struct pf_tags *head, u_int16_t tag)
838 {
839 struct pf_tagname *p, *next;
840
841 if (tag == 0) {
842 return;
843 }
844
845 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
846 next = TAILQ_NEXT(p, entries);
847 if (tag == p->tag) {
848 if (--p->ref == 0) {
849 TAILQ_REMOVE(head, p, entries);
850 _FREE(p, M_TEMP);
851 }
852 break;
853 }
854 }
855 }
856
857 u_int16_t
858 pf_tagname2tag(char *tagname)
859 {
860 return tagname2tag(&pf_tags, tagname);
861 }
862
863 void
864 pf_tag2tagname(u_int16_t tagid, char *p)
865 {
866 tag2tagname(&pf_tags, tagid, p);
867 }
868
869 void
870 pf_tag_ref(u_int16_t tag)
871 {
872 struct pf_tagname *t;
873
874 TAILQ_FOREACH(t, &pf_tags, entries)
875 if (t->tag == tag) {
876 break;
877 }
878 if (t != NULL) {
879 t->ref++;
880 }
881 }
882
883 void
884 pf_tag_unref(u_int16_t tag)
885 {
886 tag_unref(&pf_tags, tag);
887 }
888
889 static int
890 pf_rtlabel_add(struct pf_addr_wrap *a)
891 {
892 #pragma unused(a)
893 return 0;
894 }
895
896 static void
897 pf_rtlabel_remove(struct pf_addr_wrap *a)
898 {
899 #pragma unused(a)
900 }
901
902 static void
903 pf_rtlabel_copyout(struct pf_addr_wrap *a)
904 {
905 #pragma unused(a)
906 }
907
908 static int
909 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
910 {
911 struct pf_ruleset *rs;
912 struct pf_rule *rule;
913
914 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
915 return EINVAL;
916 }
917 rs = pf_find_or_create_ruleset(anchor);
918 if (rs == NULL) {
919 return EINVAL;
920 }
921 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
922 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
923 rs->rules[rs_num].inactive.rcount--;
924 }
925 *ticket = ++rs->rules[rs_num].inactive.ticket;
926 rs->rules[rs_num].inactive.open = 1;
927 return 0;
928 }
929
930 static int
931 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
932 {
933 struct pf_ruleset *rs;
934 struct pf_rule *rule;
935
936 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
937 return EINVAL;
938 }
939 rs = pf_find_ruleset(anchor);
940 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
941 rs->rules[rs_num].inactive.ticket != ticket) {
942 return 0;
943 }
944 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
945 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
946 rs->rules[rs_num].inactive.rcount--;
947 }
948 rs->rules[rs_num].inactive.open = 0;
949 return 0;
950 }
951
952 #define PF_MD5_UPD(st, elm) \
953 MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm))
954
955 #define PF_MD5_UPD_STR(st, elm) \
956 MD5Update(ctx, (u_int8_t *)(st)->elm, strlen((st)->elm))
957
958 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
959 (stor) = htonl((st)->elm); \
960 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t)); \
961 } while (0)
962
963 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
964 (stor) = htons((st)->elm); \
965 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t)); \
966 } while (0)
967
968 static void
969 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr, u_int8_t proto)
970 {
971 PF_MD5_UPD(pfr, addr.type);
972 switch (pfr->addr.type) {
973 case PF_ADDR_DYNIFTL:
974 PF_MD5_UPD(pfr, addr.v.ifname);
975 PF_MD5_UPD(pfr, addr.iflags);
976 break;
977 case PF_ADDR_TABLE:
978 PF_MD5_UPD(pfr, addr.v.tblname);
979 break;
980 case PF_ADDR_ADDRMASK:
981 /* XXX ignore af? */
982 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
983 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
984 break;
985 case PF_ADDR_RTLABEL:
986 PF_MD5_UPD(pfr, addr.v.rtlabelname);
987 break;
988 }
989
990 switch (proto) {
991 case IPPROTO_TCP:
992 case IPPROTO_UDP:
993 PF_MD5_UPD(pfr, xport.range.port[0]);
994 PF_MD5_UPD(pfr, xport.range.port[1]);
995 PF_MD5_UPD(pfr, xport.range.op);
996 break;
997
998 default:
999 break;
1000 }
1001
1002 PF_MD5_UPD(pfr, neg);
1003 }
1004
1005 static void
1006 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
1007 {
1008 u_int16_t x;
1009 u_int32_t y;
1010
1011 pf_hash_rule_addr(ctx, &rule->src, rule->proto);
1012 pf_hash_rule_addr(ctx, &rule->dst, rule->proto);
1013 PF_MD5_UPD_STR(rule, label);
1014 PF_MD5_UPD_STR(rule, ifname);
1015 PF_MD5_UPD_STR(rule, match_tagname);
1016 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1017 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1018 PF_MD5_UPD_HTONL(rule, prob, y);
1019 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1020 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1021 PF_MD5_UPD(rule, uid.op);
1022 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1023 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1024 PF_MD5_UPD(rule, gid.op);
1025 PF_MD5_UPD_HTONL(rule, rule_flag, y);
1026 PF_MD5_UPD(rule, action);
1027 PF_MD5_UPD(rule, direction);
1028 PF_MD5_UPD(rule, af);
1029 PF_MD5_UPD(rule, quick);
1030 PF_MD5_UPD(rule, ifnot);
1031 PF_MD5_UPD(rule, match_tag_not);
1032 PF_MD5_UPD(rule, natpass);
1033 PF_MD5_UPD(rule, keep_state);
1034 PF_MD5_UPD(rule, proto);
1035 PF_MD5_UPD(rule, type);
1036 PF_MD5_UPD(rule, code);
1037 PF_MD5_UPD(rule, flags);
1038 PF_MD5_UPD(rule, flagset);
1039 PF_MD5_UPD(rule, allow_opts);
1040 PF_MD5_UPD(rule, rt);
1041 PF_MD5_UPD(rule, tos);
1042 }
1043
1044 static int
1045 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1046 {
1047 struct pf_ruleset *rs;
1048 struct pf_rule *rule, **old_array, *r;
1049 struct pf_rulequeue *old_rules;
1050 int error;
1051 u_int32_t old_rcount;
1052
1053 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1054
1055 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
1056 return EINVAL;
1057 }
1058 rs = pf_find_ruleset(anchor);
1059 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1060 ticket != rs->rules[rs_num].inactive.ticket) {
1061 return EBUSY;
1062 }
1063
1064 /* Calculate checksum for the main ruleset */
1065 if (rs == &pf_main_ruleset) {
1066 error = pf_setup_pfsync_matching(rs);
1067 if (error != 0) {
1068 return error;
1069 }
1070 }
1071
1072 /* Swap rules, keep the old. */
1073 old_rules = rs->rules[rs_num].active.ptr;
1074 old_rcount = rs->rules[rs_num].active.rcount;
1075 old_array = rs->rules[rs_num].active.ptr_array;
1076
1077 if (old_rcount != 0) {
1078 r = TAILQ_FIRST(rs->rules[rs_num].active.ptr);
1079 while (r) {
1080 if (r->rule_flag & PFRULE_PFM) {
1081 pffwrules--;
1082 }
1083 r = TAILQ_NEXT(r, entries);
1084 }
1085 }
1086
1087
1088 rs->rules[rs_num].active.ptr =
1089 rs->rules[rs_num].inactive.ptr;
1090 rs->rules[rs_num].active.ptr_array =
1091 rs->rules[rs_num].inactive.ptr_array;
1092 rs->rules[rs_num].active.rcount =
1093 rs->rules[rs_num].inactive.rcount;
1094 rs->rules[rs_num].inactive.ptr = old_rules;
1095 rs->rules[rs_num].inactive.ptr_array = old_array;
1096 rs->rules[rs_num].inactive.rcount = old_rcount;
1097
1098 rs->rules[rs_num].active.ticket =
1099 rs->rules[rs_num].inactive.ticket;
1100 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1101
1102
1103 /* Purge the old rule list. */
1104 while ((rule = TAILQ_FIRST(old_rules)) != NULL) {
1105 pf_rm_rule(old_rules, rule);
1106 }
1107 if (rs->rules[rs_num].inactive.ptr_array) {
1108 _FREE(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1109 }
1110 rs->rules[rs_num].inactive.ptr_array = NULL;
1111 rs->rules[rs_num].inactive.rcount = 0;
1112 rs->rules[rs_num].inactive.open = 0;
1113 pf_remove_if_empty_ruleset(rs);
1114 return 0;
1115 }
1116
1117 static void
1118 pf_rule_copyin(struct pf_rule *src, struct pf_rule *dst, struct proc *p,
1119 int minordev)
1120 {
1121 bcopy(src, dst, sizeof(struct pf_rule));
1122
1123 dst->label[sizeof(dst->label) - 1] = '\0';
1124 dst->ifname[sizeof(dst->ifname) - 1] = '\0';
1125 dst->qname[sizeof(dst->qname) - 1] = '\0';
1126 dst->pqname[sizeof(dst->pqname) - 1] = '\0';
1127 dst->tagname[sizeof(dst->tagname) - 1] = '\0';
1128 dst->match_tagname[sizeof(dst->match_tagname) - 1] = '\0';
1129 dst->overload_tblname[sizeof(dst->overload_tblname) - 1] = '\0';
1130
1131 dst->cuid = kauth_cred_getuid(p->p_ucred);
1132 dst->cpid = p->p_pid;
1133
1134 dst->anchor = NULL;
1135 dst->kif = NULL;
1136 dst->overload_tbl = NULL;
1137
1138 TAILQ_INIT(&dst->rpool.list);
1139 dst->rpool.cur = NULL;
1140
1141 /* initialize refcounting */
1142 dst->states = 0;
1143 dst->src_nodes = 0;
1144
1145 dst->entries.tqe_prev = NULL;
1146 dst->entries.tqe_next = NULL;
1147 if ((uint8_t)minordev == PFDEV_PFM) {
1148 dst->rule_flag |= PFRULE_PFM;
1149 }
1150 }
1151
1152 static void
1153 pf_rule_copyout(struct pf_rule *src, struct pf_rule *dst)
1154 {
1155 bcopy(src, dst, sizeof(struct pf_rule));
1156
1157 dst->anchor = NULL;
1158 dst->kif = NULL;
1159 dst->overload_tbl = NULL;
1160
1161 TAILQ_INIT(&dst->rpool.list);
1162 dst->rpool.cur = NULL;
1163
1164 dst->entries.tqe_prev = NULL;
1165 dst->entries.tqe_next = NULL;
1166 }
1167
1168 static void
1169 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk,
1170 struct pf_state *s)
1171 {
1172 uint64_t secs = pf_time_second();
1173 bzero(sp, sizeof(struct pfsync_state));
1174
1175 /* copy from state key */
1176 sp->lan.addr = sk->lan.addr;
1177 sp->lan.xport = sk->lan.xport;
1178 sp->gwy.addr = sk->gwy.addr;
1179 sp->gwy.xport = sk->gwy.xport;
1180 sp->ext_lan.addr = sk->ext_lan.addr;
1181 sp->ext_lan.xport = sk->ext_lan.xport;
1182 sp->ext_gwy.addr = sk->ext_gwy.addr;
1183 sp->ext_gwy.xport = sk->ext_gwy.xport;
1184 sp->proto_variant = sk->proto_variant;
1185 sp->tag = s->tag;
1186 sp->proto = sk->proto;
1187 sp->af_lan = sk->af_lan;
1188 sp->af_gwy = sk->af_gwy;
1189 sp->direction = sk->direction;
1190 sp->flowhash = sk->flowhash;
1191
1192 /* copy from state */
1193 memcpy(&sp->id, &s->id, sizeof(sp->id));
1194 sp->creatorid = s->creatorid;
1195 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname));
1196 pf_state_peer_to_pfsync(&s->src, &sp->src);
1197 pf_state_peer_to_pfsync(&s->dst, &sp->dst);
1198
1199 sp->rule = s->rule.ptr->nr;
1200 sp->nat_rule = (s->nat_rule.ptr == NULL) ?
1201 (unsigned)-1 : s->nat_rule.ptr->nr;
1202 sp->anchor = (s->anchor.ptr == NULL) ?
1203 (unsigned)-1 : s->anchor.ptr->nr;
1204
1205 pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]);
1206 pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]);
1207 pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]);
1208 pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]);
1209 sp->creation = secs - s->creation;
1210 sp->expire = pf_state_expires(s);
1211 sp->log = s->log;
1212 sp->allow_opts = s->allow_opts;
1213 sp->timeout = s->timeout;
1214
1215 if (s->src_node) {
1216 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
1217 }
1218 if (s->nat_src_node) {
1219 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
1220 }
1221
1222 if (sp->expire > secs) {
1223 sp->expire -= secs;
1224 } else {
1225 sp->expire = 0;
1226 }
1227 }
1228
1229 static void
1230 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk,
1231 struct pf_state *s)
1232 {
1233 /* copy to state key */
1234 sk->lan.addr = sp->lan.addr;
1235 sk->lan.xport = sp->lan.xport;
1236 sk->gwy.addr = sp->gwy.addr;
1237 sk->gwy.xport = sp->gwy.xport;
1238 sk->ext_lan.addr = sp->ext_lan.addr;
1239 sk->ext_lan.xport = sp->ext_lan.xport;
1240 sk->ext_gwy.addr = sp->ext_gwy.addr;
1241 sk->ext_gwy.xport = sp->ext_gwy.xport;
1242 sk->proto_variant = sp->proto_variant;
1243 s->tag = sp->tag;
1244 sk->proto = sp->proto;
1245 sk->af_lan = sp->af_lan;
1246 sk->af_gwy = sp->af_gwy;
1247 sk->direction = sp->direction;
1248 sk->flowhash = pf_calc_state_key_flowhash(sk);
1249
1250 /* copy to state */
1251 memcpy(&s->id, &sp->id, sizeof(sp->id));
1252 s->creatorid = sp->creatorid;
1253 pf_state_peer_from_pfsync(&sp->src, &s->src);
1254 pf_state_peer_from_pfsync(&sp->dst, &s->dst);
1255
1256 s->rule.ptr = &pf_default_rule;
1257 s->nat_rule.ptr = NULL;
1258 s->anchor.ptr = NULL;
1259 s->rt_kif = NULL;
1260 s->creation = pf_time_second();
1261 s->expire = pf_time_second();
1262 if (sp->expire > 0) {
1263 s->expire -= pf_default_rule.timeout[sp->timeout] - sp->expire;
1264 }
1265 s->pfsync_time = 0;
1266 s->packets[0] = s->packets[1] = 0;
1267 s->bytes[0] = s->bytes[1] = 0;
1268 }
1269
1270 static void
1271 pf_pooladdr_copyin(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1272 {
1273 bcopy(src, dst, sizeof(struct pf_pooladdr));
1274
1275 dst->entries.tqe_prev = NULL;
1276 dst->entries.tqe_next = NULL;
1277 dst->ifname[sizeof(dst->ifname) - 1] = '\0';
1278 dst->kif = NULL;
1279 }
1280
1281 static void
1282 pf_pooladdr_copyout(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1283 {
1284 bcopy(src, dst, sizeof(struct pf_pooladdr));
1285
1286 dst->entries.tqe_prev = NULL;
1287 dst->entries.tqe_next = NULL;
1288 dst->kif = NULL;
1289 }
1290
1291 static int
1292 pf_setup_pfsync_matching(struct pf_ruleset *rs)
1293 {
1294 MD5_CTX ctx;
1295 struct pf_rule *rule;
1296 int rs_cnt;
1297 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
1298
1299 MD5Init(&ctx);
1300 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1301 /* XXX PF_RULESET_SCRUB as well? */
1302 if (rs_cnt == PF_RULESET_SCRUB) {
1303 continue;
1304 }
1305
1306 if (rs->rules[rs_cnt].inactive.ptr_array) {
1307 _FREE(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1308 }
1309 rs->rules[rs_cnt].inactive.ptr_array = NULL;
1310
1311 if (rs->rules[rs_cnt].inactive.rcount) {
1312 rs->rules[rs_cnt].inactive.ptr_array =
1313 _MALLOC(sizeof(caddr_t) *
1314 rs->rules[rs_cnt].inactive.rcount,
1315 M_TEMP, M_WAITOK);
1316
1317 if (!rs->rules[rs_cnt].inactive.ptr_array) {
1318 return ENOMEM;
1319 }
1320 }
1321
1322 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1323 entries) {
1324 pf_hash_rule(&ctx, rule);
1325 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1326 }
1327 }
1328
1329 MD5Final(digest, &ctx);
1330 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
1331 return 0;
1332 }
1333
1334 static void
1335 pf_start(void)
1336 {
1337 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1338
1339 VERIFY(pf_is_enabled == 0);
1340
1341 pf_is_enabled = 1;
1342 pf_status.running = 1;
1343 pf_status.since = pf_calendar_time_second();
1344 if (pf_status.stateid == 0) {
1345 pf_status.stateid = pf_time_second();
1346 pf_status.stateid = pf_status.stateid << 32;
1347 }
1348 wakeup(pf_purge_thread_fn);
1349 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1350 }
1351
1352 static void
1353 pf_stop(void)
1354 {
1355 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1356
1357 VERIFY(pf_is_enabled);
1358
1359 pf_status.running = 0;
1360 pf_is_enabled = 0;
1361 pf_status.since = pf_calendar_time_second();
1362 wakeup(pf_purge_thread_fn);
1363 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1364 }
1365
1366 static int
1367 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1368 {
1369 #pragma unused(dev)
1370 int p64 = proc_is64bit(p);
1371 int error = 0;
1372 int minordev = minor(dev);
1373
1374 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
1375 return EPERM;
1376 }
1377
1378 /* XXX keep in sync with switch() below */
1379 if (securelevel > 1) {
1380 switch (cmd) {
1381 case DIOCGETRULES:
1382 case DIOCGETRULE:
1383 case DIOCGETADDRS:
1384 case DIOCGETADDR:
1385 case DIOCGETSTATE:
1386 case DIOCSETSTATUSIF:
1387 case DIOCGETSTATUS:
1388 case DIOCCLRSTATUS:
1389 case DIOCNATLOOK:
1390 case DIOCSETDEBUG:
1391 case DIOCGETSTATES:
1392 case DIOCINSERTRULE:
1393 case DIOCDELETERULE:
1394 case DIOCGETTIMEOUT:
1395 case DIOCCLRRULECTRS:
1396 case DIOCGETLIMIT:
1397 case DIOCGETALTQS:
1398 case DIOCGETALTQ:
1399 case DIOCGETQSTATS:
1400 case DIOCGETRULESETS:
1401 case DIOCGETRULESET:
1402 case DIOCRGETTABLES:
1403 case DIOCRGETTSTATS:
1404 case DIOCRCLRTSTATS:
1405 case DIOCRCLRADDRS:
1406 case DIOCRADDADDRS:
1407 case DIOCRDELADDRS:
1408 case DIOCRSETADDRS:
1409 case DIOCRGETADDRS:
1410 case DIOCRGETASTATS:
1411 case DIOCRCLRASTATS:
1412 case DIOCRTSTADDRS:
1413 case DIOCOSFPGET:
1414 case DIOCGETSRCNODES:
1415 case DIOCCLRSRCNODES:
1416 case DIOCIGETIFACES:
1417 case DIOCGIFSPEED:
1418 case DIOCSETIFFLAG:
1419 case DIOCCLRIFFLAG:
1420 break;
1421 case DIOCRCLRTABLES:
1422 case DIOCRADDTABLES:
1423 case DIOCRDELTABLES:
1424 case DIOCRSETTFLAGS: {
1425 int pfrio_flags;
1426
1427 bcopy(&((struct pfioc_table *)(void *)addr)->
1428 pfrio_flags, &pfrio_flags, sizeof(pfrio_flags));
1429
1430 if (pfrio_flags & PFR_FLAG_DUMMY) {
1431 break; /* dummy operation ok */
1432 }
1433 return EPERM;
1434 }
1435 default:
1436 return EPERM;
1437 }
1438 }
1439
1440 if (!(flags & FWRITE)) {
1441 switch (cmd) {
1442 case DIOCSTART:
1443 case DIOCSTARTREF:
1444 case DIOCSTOP:
1445 case DIOCSTOPREF:
1446 case DIOCGETSTARTERS:
1447 case DIOCGETRULES:
1448 case DIOCGETADDRS:
1449 case DIOCGETADDR:
1450 case DIOCGETSTATE:
1451 case DIOCGETSTATUS:
1452 case DIOCGETSTATES:
1453 case DIOCINSERTRULE:
1454 case DIOCDELETERULE:
1455 case DIOCGETTIMEOUT:
1456 case DIOCGETLIMIT:
1457 case DIOCGETALTQS:
1458 case DIOCGETALTQ:
1459 case DIOCGETQSTATS:
1460 case DIOCGETRULESETS:
1461 case DIOCGETRULESET:
1462 case DIOCNATLOOK:
1463 case DIOCRGETTABLES:
1464 case DIOCRGETTSTATS:
1465 case DIOCRGETADDRS:
1466 case DIOCRGETASTATS:
1467 case DIOCRTSTADDRS:
1468 case DIOCOSFPGET:
1469 case DIOCGETSRCNODES:
1470 case DIOCIGETIFACES:
1471 case DIOCGIFSPEED:
1472 break;
1473 case DIOCRCLRTABLES:
1474 case DIOCRADDTABLES:
1475 case DIOCRDELTABLES:
1476 case DIOCRCLRTSTATS:
1477 case DIOCRCLRADDRS:
1478 case DIOCRADDADDRS:
1479 case DIOCRDELADDRS:
1480 case DIOCRSETADDRS:
1481 case DIOCRSETTFLAGS: {
1482 int pfrio_flags;
1483
1484 bcopy(&((struct pfioc_table *)(void *)addr)->
1485 pfrio_flags, &pfrio_flags, sizeof(pfrio_flags));
1486
1487 if (pfrio_flags & PFR_FLAG_DUMMY) {
1488 flags |= FWRITE; /* need write lock for dummy */
1489 break; /* dummy operation ok */
1490 }
1491 return EACCES;
1492 }
1493 case DIOCGETRULE: {
1494 u_int32_t action;
1495
1496 bcopy(&((struct pfioc_rule *)(void *)addr)->action,
1497 &action, sizeof(action));
1498
1499 if (action == PF_GET_CLR_CNTR) {
1500 return EACCES;
1501 }
1502 break;
1503 }
1504 default:
1505 return EACCES;
1506 }
1507 }
1508
1509 if (flags & FWRITE) {
1510 lck_rw_lock_exclusive(pf_perim_lock);
1511 } else {
1512 lck_rw_lock_shared(pf_perim_lock);
1513 }
1514
1515 lck_mtx_lock(pf_lock);
1516
1517 switch (cmd) {
1518 case DIOCSTART:
1519 if (pf_status.running) {
1520 /*
1521 * Increment the reference for a simple -e enable, so
1522 * that even if other processes drop their references,
1523 * pf will still be available to processes that turned
1524 * it on without taking a reference
1525 */
1526 if (nr_tokens == pf_enabled_ref_count) {
1527 pf_enabled_ref_count++;
1528 VERIFY(pf_enabled_ref_count != 0);
1529 }
1530 error = EEXIST;
1531 } else if (pf_purge_thread == NULL) {
1532 error = ENOMEM;
1533 } else {
1534 pf_start();
1535 pf_enabled_ref_count++;
1536 VERIFY(pf_enabled_ref_count != 0);
1537 }
1538 break;
1539
1540 case DIOCSTARTREF: /* u_int64_t */
1541 if (pf_purge_thread == NULL) {
1542 error = ENOMEM;
1543 } else {
1544 u_int64_t token;
1545
1546 /* small enough to be on stack */
1547 if ((token = generate_token(p)) != 0) {
1548 if (pf_is_enabled == 0) {
1549 pf_start();
1550 }
1551 pf_enabled_ref_count++;
1552 VERIFY(pf_enabled_ref_count != 0);
1553 } else {
1554 error = ENOMEM;
1555 DPFPRINTF(PF_DEBUG_URGENT,
1556 ("pf: unable to generate token\n"));
1557 }
1558 bcopy(&token, addr, sizeof(token));
1559 }
1560 break;
1561
1562 case DIOCSTOP:
1563 if (!pf_status.running) {
1564 error = ENOENT;
1565 } else {
1566 pf_stop();
1567 pf_enabled_ref_count = 0;
1568 invalidate_all_tokens();
1569 }
1570 break;
1571
1572 case DIOCSTOPREF: /* struct pfioc_remove_token */
1573 if (!pf_status.running) {
1574 error = ENOENT;
1575 } else {
1576 struct pfioc_remove_token pfrt;
1577
1578 /* small enough to be on stack */
1579 bcopy(addr, &pfrt, sizeof(pfrt));
1580 if ((error = remove_token(&pfrt)) == 0) {
1581 VERIFY(pf_enabled_ref_count != 0);
1582 pf_enabled_ref_count--;
1583 /* return currently held references */
1584 pfrt.refcount = pf_enabled_ref_count;
1585 DPFPRINTF(PF_DEBUG_MISC,
1586 ("pf: enabled refcount decremented\n"));
1587 } else {
1588 error = EINVAL;
1589 DPFPRINTF(PF_DEBUG_URGENT,
1590 ("pf: token mismatch\n"));
1591 }
1592 bcopy(&pfrt, addr, sizeof(pfrt));
1593
1594 if (error == 0 && pf_enabled_ref_count == 0) {
1595 pf_stop();
1596 }
1597 }
1598 break;
1599
1600 case DIOCGETSTARTERS: { /* struct pfioc_tokens */
1601 PFIOCX_STRUCT_DECL(pfioc_tokens);
1602
1603 PFIOCX_STRUCT_BEGIN(addr, pfioc_tokens, error = ENOMEM; break; );
1604 error = pfioctl_ioc_tokens(cmd,
1605 PFIOCX_STRUCT_ADDR32(pfioc_tokens),
1606 PFIOCX_STRUCT_ADDR64(pfioc_tokens), p);
1607 PFIOCX_STRUCT_END(pfioc_tokens, addr);
1608 break;
1609 }
1610
1611 case DIOCADDRULE: /* struct pfioc_rule */
1612 case DIOCGETRULES: /* struct pfioc_rule */
1613 case DIOCGETRULE: /* struct pfioc_rule */
1614 case DIOCCHANGERULE: /* struct pfioc_rule */
1615 case DIOCINSERTRULE: /* struct pfioc_rule */
1616 case DIOCDELETERULE: { /* struct pfioc_rule */
1617 struct pfioc_rule *pr = NULL;
1618
1619 PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break; );
1620 error = pfioctl_ioc_rule(cmd, minordev, pr, p);
1621 PFIOC_STRUCT_END(pr, addr);
1622 break;
1623 }
1624
1625 case DIOCCLRSTATES: /* struct pfioc_state_kill */
1626 case DIOCKILLSTATES: { /* struct pfioc_state_kill */
1627 struct pfioc_state_kill *psk = NULL;
1628
1629 PFIOC_STRUCT_BEGIN(addr, psk, error = ENOMEM; break; );
1630 error = pfioctl_ioc_state_kill(cmd, psk, p);
1631 PFIOC_STRUCT_END(psk, addr);
1632 break;
1633 }
1634
1635 case DIOCADDSTATE: /* struct pfioc_state */
1636 case DIOCGETSTATE: { /* struct pfioc_state */
1637 struct pfioc_state *ps = NULL;
1638
1639 PFIOC_STRUCT_BEGIN(addr, ps, error = ENOMEM; break; );
1640 error = pfioctl_ioc_state(cmd, ps, p);
1641 PFIOC_STRUCT_END(ps, addr);
1642 break;
1643 }
1644
1645 case DIOCGETSTATES: { /* struct pfioc_states */
1646 PFIOCX_STRUCT_DECL(pfioc_states);
1647
1648 PFIOCX_STRUCT_BEGIN(addr, pfioc_states, error = ENOMEM; break; );
1649 error = pfioctl_ioc_states(cmd,
1650 PFIOCX_STRUCT_ADDR32(pfioc_states),
1651 PFIOCX_STRUCT_ADDR64(pfioc_states), p);
1652 PFIOCX_STRUCT_END(pfioc_states, addr);
1653 break;
1654 }
1655
1656 case DIOCGETSTATUS: { /* struct pf_status */
1657 struct pf_status *s = NULL;
1658
1659 PFIOC_STRUCT_BEGIN(&pf_status, s, error = ENOMEM; break; );
1660 pfi_update_status(s->ifname, s);
1661 PFIOC_STRUCT_END(s, addr);
1662 break;
1663 }
1664
1665 case DIOCSETSTATUSIF: { /* struct pfioc_if */
1666 struct pfioc_if *pi = (struct pfioc_if *)(void *)addr;
1667
1668 /* OK for unaligned accesses */
1669 if (pi->ifname[0] == 0) {
1670 bzero(pf_status.ifname, IFNAMSIZ);
1671 break;
1672 }
1673 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1674 break;
1675 }
1676
1677 case DIOCCLRSTATUS: {
1678 bzero(pf_status.counters, sizeof(pf_status.counters));
1679 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1680 bzero(pf_status.scounters, sizeof(pf_status.scounters));
1681 pf_status.since = pf_calendar_time_second();
1682 if (*pf_status.ifname) {
1683 pfi_update_status(pf_status.ifname, NULL);
1684 }
1685 break;
1686 }
1687
1688 case DIOCNATLOOK: { /* struct pfioc_natlook */
1689 struct pfioc_natlook *pnl = NULL;
1690
1691 PFIOC_STRUCT_BEGIN(addr, pnl, error = ENOMEM; break; );
1692 error = pfioctl_ioc_natlook(cmd, pnl, p);
1693 PFIOC_STRUCT_END(pnl, addr);
1694 break;
1695 }
1696
1697 case DIOCSETTIMEOUT: /* struct pfioc_tm */
1698 case DIOCGETTIMEOUT: { /* struct pfioc_tm */
1699 struct pfioc_tm pt;
1700
1701 /* small enough to be on stack */
1702 bcopy(addr, &pt, sizeof(pt));
1703 error = pfioctl_ioc_tm(cmd, &pt, p);
1704 bcopy(&pt, addr, sizeof(pt));
1705 break;
1706 }
1707
1708 case DIOCGETLIMIT: /* struct pfioc_limit */
1709 case DIOCSETLIMIT: { /* struct pfioc_limit */
1710 struct pfioc_limit pl;
1711
1712 /* small enough to be on stack */
1713 bcopy(addr, &pl, sizeof(pl));
1714 error = pfioctl_ioc_limit(cmd, &pl, p);
1715 bcopy(&pl, addr, sizeof(pl));
1716 break;
1717 }
1718
1719 case DIOCSETDEBUG: { /* u_int32_t */
1720 bcopy(addr, &pf_status.debug, sizeof(u_int32_t));
1721 break;
1722 }
1723
1724 case DIOCCLRRULECTRS: {
1725 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1726 struct pf_ruleset *ruleset = &pf_main_ruleset;
1727 struct pf_rule *rule;
1728
1729 TAILQ_FOREACH(rule,
1730 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1731 rule->evaluations = 0;
1732 rule->packets[0] = rule->packets[1] = 0;
1733 rule->bytes[0] = rule->bytes[1] = 0;
1734 }
1735 break;
1736 }
1737
1738 case DIOCGIFSPEED: {
1739 struct pf_ifspeed *psp = (struct pf_ifspeed *)(void *)addr;
1740 struct pf_ifspeed ps;
1741 struct ifnet *ifp;
1742 u_int64_t baudrate;
1743
1744 if (psp->ifname[0] != '\0') {
1745 /* Can we completely trust user-land? */
1746 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
1747 ps.ifname[IFNAMSIZ - 1] = '\0';
1748 ifp = ifunit(ps.ifname);
1749 if (ifp != NULL) {
1750 baudrate = ifp->if_output_bw.max_bw;
1751 bcopy(&baudrate, &psp->baudrate,
1752 sizeof(baudrate));
1753 } else {
1754 error = EINVAL;
1755 }
1756 } else {
1757 error = EINVAL;
1758 }
1759 break;
1760 }
1761
1762 case DIOCBEGINADDRS: /* struct pfioc_pooladdr */
1763 case DIOCADDADDR: /* struct pfioc_pooladdr */
1764 case DIOCGETADDRS: /* struct pfioc_pooladdr */
1765 case DIOCGETADDR: /* struct pfioc_pooladdr */
1766 case DIOCCHANGEADDR: { /* struct pfioc_pooladdr */
1767 struct pfioc_pooladdr *pp = NULL;
1768
1769 PFIOC_STRUCT_BEGIN(addr, pp, error = ENOMEM; break; )
1770 error = pfioctl_ioc_pooladdr(cmd, pp, p);
1771 PFIOC_STRUCT_END(pp, addr);
1772 break;
1773 }
1774
1775 case DIOCGETRULESETS: /* struct pfioc_ruleset */
1776 case DIOCGETRULESET: { /* struct pfioc_ruleset */
1777 struct pfioc_ruleset *pr = NULL;
1778
1779 PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break; );
1780 error = pfioctl_ioc_ruleset(cmd, pr, p);
1781 PFIOC_STRUCT_END(pr, addr);
1782 break;
1783 }
1784
1785 case DIOCRCLRTABLES: /* struct pfioc_table */
1786 case DIOCRADDTABLES: /* struct pfioc_table */
1787 case DIOCRDELTABLES: /* struct pfioc_table */
1788 case DIOCRGETTABLES: /* struct pfioc_table */
1789 case DIOCRGETTSTATS: /* struct pfioc_table */
1790 case DIOCRCLRTSTATS: /* struct pfioc_table */
1791 case DIOCRSETTFLAGS: /* struct pfioc_table */
1792 case DIOCRCLRADDRS: /* struct pfioc_table */
1793 case DIOCRADDADDRS: /* struct pfioc_table */
1794 case DIOCRDELADDRS: /* struct pfioc_table */
1795 case DIOCRSETADDRS: /* struct pfioc_table */
1796 case DIOCRGETADDRS: /* struct pfioc_table */
1797 case DIOCRGETASTATS: /* struct pfioc_table */
1798 case DIOCRCLRASTATS: /* struct pfioc_table */
1799 case DIOCRTSTADDRS: /* struct pfioc_table */
1800 case DIOCRINADEFINE: { /* struct pfioc_table */
1801 PFIOCX_STRUCT_DECL(pfioc_table);
1802
1803 PFIOCX_STRUCT_BEGIN(addr, pfioc_table, error = ENOMEM; break; );
1804 error = pfioctl_ioc_table(cmd,
1805 PFIOCX_STRUCT_ADDR32(pfioc_table),
1806 PFIOCX_STRUCT_ADDR64(pfioc_table), p);
1807 PFIOCX_STRUCT_END(pfioc_table, addr);
1808 break;
1809 }
1810
1811 case DIOCOSFPADD: /* struct pf_osfp_ioctl */
1812 case DIOCOSFPGET: { /* struct pf_osfp_ioctl */
1813 struct pf_osfp_ioctl *io = NULL;
1814
1815 PFIOC_STRUCT_BEGIN(addr, io, error = ENOMEM; break; );
1816 if (cmd == DIOCOSFPADD) {
1817 error = pf_osfp_add(io);
1818 } else {
1819 VERIFY(cmd == DIOCOSFPGET);
1820 error = pf_osfp_get(io);
1821 }
1822 PFIOC_STRUCT_END(io, addr);
1823 break;
1824 }
1825
1826 case DIOCXBEGIN: /* struct pfioc_trans */
1827 case DIOCXROLLBACK: /* struct pfioc_trans */
1828 case DIOCXCOMMIT: { /* struct pfioc_trans */
1829 PFIOCX_STRUCT_DECL(pfioc_trans);
1830
1831 PFIOCX_STRUCT_BEGIN(addr, pfioc_trans, error = ENOMEM; break; );
1832 error = pfioctl_ioc_trans(cmd,
1833 PFIOCX_STRUCT_ADDR32(pfioc_trans),
1834 PFIOCX_STRUCT_ADDR64(pfioc_trans), p);
1835 PFIOCX_STRUCT_END(pfioc_trans, addr);
1836 break;
1837 }
1838
1839 case DIOCGETSRCNODES: { /* struct pfioc_src_nodes */
1840 PFIOCX_STRUCT_DECL(pfioc_src_nodes);
1841
1842 PFIOCX_STRUCT_BEGIN(addr, pfioc_src_nodes,
1843 error = ENOMEM; break; );
1844 error = pfioctl_ioc_src_nodes(cmd,
1845 PFIOCX_STRUCT_ADDR32(pfioc_src_nodes),
1846 PFIOCX_STRUCT_ADDR64(pfioc_src_nodes), p);
1847 PFIOCX_STRUCT_END(pfioc_src_nodes, addr);
1848 break;
1849 }
1850
1851 case DIOCCLRSRCNODES: {
1852 struct pf_src_node *n;
1853 struct pf_state *state;
1854
1855 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1856 state->src_node = NULL;
1857 state->nat_src_node = NULL;
1858 }
1859 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
1860 n->expire = 1;
1861 n->states = 0;
1862 }
1863 pf_purge_expired_src_nodes();
1864 pf_status.src_nodes = 0;
1865 break;
1866 }
1867
1868 case DIOCKILLSRCNODES: { /* struct pfioc_src_node_kill */
1869 struct pfioc_src_node_kill *psnk = NULL;
1870
1871 PFIOC_STRUCT_BEGIN(addr, psnk, error = ENOMEM; break; );
1872 error = pfioctl_ioc_src_node_kill(cmd, psnk, p);
1873 PFIOC_STRUCT_END(psnk, addr);
1874 break;
1875 }
1876
1877 case DIOCSETHOSTID: { /* u_int32_t */
1878 u_int32_t hid;
1879
1880 /* small enough to be on stack */
1881 bcopy(addr, &hid, sizeof(hid));
1882 if (hid == 0) {
1883 pf_status.hostid = random();
1884 } else {
1885 pf_status.hostid = hid;
1886 }
1887 break;
1888 }
1889
1890 case DIOCOSFPFLUSH:
1891 pf_osfp_flush();
1892 break;
1893
1894 case DIOCIGETIFACES: /* struct pfioc_iface */
1895 case DIOCSETIFFLAG: /* struct pfioc_iface */
1896 case DIOCCLRIFFLAG: { /* struct pfioc_iface */
1897 PFIOCX_STRUCT_DECL(pfioc_iface);
1898
1899 PFIOCX_STRUCT_BEGIN(addr, pfioc_iface, error = ENOMEM; break; );
1900 error = pfioctl_ioc_iface(cmd,
1901 PFIOCX_STRUCT_ADDR32(pfioc_iface),
1902 PFIOCX_STRUCT_ADDR64(pfioc_iface), p);
1903 PFIOCX_STRUCT_END(pfioc_iface, addr);
1904 break;
1905 }
1906
1907 default:
1908 error = ENODEV;
1909 break;
1910 }
1911
1912 lck_mtx_unlock(pf_lock);
1913 lck_rw_done(pf_perim_lock);
1914
1915 return error;
1916 }
1917
1918 static int
1919 pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32,
1920 struct pfioc_table_64 *io64, struct proc *p)
1921 {
1922 int p64 = proc_is64bit(p);
1923 int error = 0;
1924
1925 if (!p64) {
1926 goto struct32;
1927 }
1928
1929 /*
1930 * 64-bit structure processing
1931 */
1932 switch (cmd) {
1933 case DIOCRCLRTABLES:
1934 if (io64->pfrio_esize != 0) {
1935 error = ENODEV;
1936 break;
1937 }
1938 pfr_table_copyin_cleanup(&io64->pfrio_table);
1939 error = pfr_clr_tables(&io64->pfrio_table, &io64->pfrio_ndel,
1940 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1941 break;
1942
1943 case DIOCRADDTABLES:
1944 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1945 error = ENODEV;
1946 break;
1947 }
1948 error = pfr_add_tables(io64->pfrio_buffer, io64->pfrio_size,
1949 &io64->pfrio_nadd, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1950 break;
1951
1952 case DIOCRDELTABLES:
1953 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1954 error = ENODEV;
1955 break;
1956 }
1957 error = pfr_del_tables(io64->pfrio_buffer, io64->pfrio_size,
1958 &io64->pfrio_ndel, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1959 break;
1960
1961 case DIOCRGETTABLES:
1962 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1963 error = ENODEV;
1964 break;
1965 }
1966 pfr_table_copyin_cleanup(&io64->pfrio_table);
1967 error = pfr_get_tables(&io64->pfrio_table, io64->pfrio_buffer,
1968 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1969 break;
1970
1971 case DIOCRGETTSTATS:
1972 if (io64->pfrio_esize != sizeof(struct pfr_tstats)) {
1973 error = ENODEV;
1974 break;
1975 }
1976 pfr_table_copyin_cleanup(&io64->pfrio_table);
1977 error = pfr_get_tstats(&io64->pfrio_table, io64->pfrio_buffer,
1978 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1979 break;
1980
1981 case DIOCRCLRTSTATS:
1982 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1983 error = ENODEV;
1984 break;
1985 }
1986 error = pfr_clr_tstats(io64->pfrio_buffer, io64->pfrio_size,
1987 &io64->pfrio_nzero, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1988 break;
1989
1990 case DIOCRSETTFLAGS:
1991 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1992 error = ENODEV;
1993 break;
1994 }
1995 error = pfr_set_tflags(io64->pfrio_buffer, io64->pfrio_size,
1996 io64->pfrio_setflag, io64->pfrio_clrflag,
1997 &io64->pfrio_nchange, &io64->pfrio_ndel,
1998 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1999 break;
2000
2001 case DIOCRCLRADDRS:
2002 if (io64->pfrio_esize != 0) {
2003 error = ENODEV;
2004 break;
2005 }
2006 pfr_table_copyin_cleanup(&io64->pfrio_table);
2007 error = pfr_clr_addrs(&io64->pfrio_table, &io64->pfrio_ndel,
2008 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2009 break;
2010
2011 case DIOCRADDADDRS:
2012 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2013 error = ENODEV;
2014 break;
2015 }
2016 pfr_table_copyin_cleanup(&io64->pfrio_table);
2017 error = pfr_add_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2018 io64->pfrio_size, &io64->pfrio_nadd, io64->pfrio_flags |
2019 PFR_FLAG_USERIOCTL);
2020 break;
2021
2022 case DIOCRDELADDRS:
2023 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2024 error = ENODEV;
2025 break;
2026 }
2027 pfr_table_copyin_cleanup(&io64->pfrio_table);
2028 error = pfr_del_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2029 io64->pfrio_size, &io64->pfrio_ndel, io64->pfrio_flags |
2030 PFR_FLAG_USERIOCTL);
2031 break;
2032
2033 case DIOCRSETADDRS:
2034 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2035 error = ENODEV;
2036 break;
2037 }
2038 pfr_table_copyin_cleanup(&io64->pfrio_table);
2039 error = pfr_set_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2040 io64->pfrio_size, &io64->pfrio_size2, &io64->pfrio_nadd,
2041 &io64->pfrio_ndel, &io64->pfrio_nchange, io64->pfrio_flags |
2042 PFR_FLAG_USERIOCTL, 0);
2043 break;
2044
2045 case DIOCRGETADDRS:
2046 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2047 error = ENODEV;
2048 break;
2049 }
2050 pfr_table_copyin_cleanup(&io64->pfrio_table);
2051 error = pfr_get_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2052 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2053 break;
2054
2055 case DIOCRGETASTATS:
2056 if (io64->pfrio_esize != sizeof(struct pfr_astats)) {
2057 error = ENODEV;
2058 break;
2059 }
2060 pfr_table_copyin_cleanup(&io64->pfrio_table);
2061 error = pfr_get_astats(&io64->pfrio_table, io64->pfrio_buffer,
2062 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2063 break;
2064
2065 case DIOCRCLRASTATS:
2066 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2067 error = ENODEV;
2068 break;
2069 }
2070 pfr_table_copyin_cleanup(&io64->pfrio_table);
2071 error = pfr_clr_astats(&io64->pfrio_table, io64->pfrio_buffer,
2072 io64->pfrio_size, &io64->pfrio_nzero, io64->pfrio_flags |
2073 PFR_FLAG_USERIOCTL);
2074 break;
2075
2076 case DIOCRTSTADDRS:
2077 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2078 error = ENODEV;
2079 break;
2080 }
2081 pfr_table_copyin_cleanup(&io64->pfrio_table);
2082 error = pfr_tst_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2083 io64->pfrio_size, &io64->pfrio_nmatch, io64->pfrio_flags |
2084 PFR_FLAG_USERIOCTL);
2085 break;
2086
2087 case DIOCRINADEFINE:
2088 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2089 error = ENODEV;
2090 break;
2091 }
2092 pfr_table_copyin_cleanup(&io64->pfrio_table);
2093 error = pfr_ina_define(&io64->pfrio_table, io64->pfrio_buffer,
2094 io64->pfrio_size, &io64->pfrio_nadd, &io64->pfrio_naddr,
2095 io64->pfrio_ticket, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2096 break;
2097
2098 default:
2099 VERIFY(0);
2100 /* NOTREACHED */
2101 }
2102 goto done;
2103
2104 struct32:
2105 /*
2106 * 32-bit structure processing
2107 */
2108 switch (cmd) {
2109 case DIOCRCLRTABLES:
2110 if (io32->pfrio_esize != 0) {
2111 error = ENODEV;
2112 break;
2113 }
2114 pfr_table_copyin_cleanup(&io32->pfrio_table);
2115 error = pfr_clr_tables(&io32->pfrio_table, &io32->pfrio_ndel,
2116 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2117 break;
2118
2119 case DIOCRADDTABLES:
2120 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2121 error = ENODEV;
2122 break;
2123 }
2124 error = pfr_add_tables(io32->pfrio_buffer, io32->pfrio_size,
2125 &io32->pfrio_nadd, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2126 break;
2127
2128 case DIOCRDELTABLES:
2129 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2130 error = ENODEV;
2131 break;
2132 }
2133 error = pfr_del_tables(io32->pfrio_buffer, io32->pfrio_size,
2134 &io32->pfrio_ndel, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2135 break;
2136
2137 case DIOCRGETTABLES:
2138 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2139 error = ENODEV;
2140 break;
2141 }
2142 pfr_table_copyin_cleanup(&io32->pfrio_table);
2143 error = pfr_get_tables(&io32->pfrio_table, io32->pfrio_buffer,
2144 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2145 break;
2146
2147 case DIOCRGETTSTATS:
2148 if (io32->pfrio_esize != sizeof(struct pfr_tstats)) {
2149 error = ENODEV;
2150 break;
2151 }
2152 pfr_table_copyin_cleanup(&io32->pfrio_table);
2153 error = pfr_get_tstats(&io32->pfrio_table, io32->pfrio_buffer,
2154 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2155 break;
2156
2157 case DIOCRCLRTSTATS:
2158 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2159 error = ENODEV;
2160 break;
2161 }
2162 error = pfr_clr_tstats(io32->pfrio_buffer, io32->pfrio_size,
2163 &io32->pfrio_nzero, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2164 break;
2165
2166 case DIOCRSETTFLAGS:
2167 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2168 error = ENODEV;
2169 break;
2170 }
2171 error = pfr_set_tflags(io32->pfrio_buffer, io32->pfrio_size,
2172 io32->pfrio_setflag, io32->pfrio_clrflag,
2173 &io32->pfrio_nchange, &io32->pfrio_ndel,
2174 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2175 break;
2176
2177 case DIOCRCLRADDRS:
2178 if (io32->pfrio_esize != 0) {
2179 error = ENODEV;
2180 break;
2181 }
2182 pfr_table_copyin_cleanup(&io32->pfrio_table);
2183 error = pfr_clr_addrs(&io32->pfrio_table, &io32->pfrio_ndel,
2184 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2185 break;
2186
2187 case DIOCRADDADDRS:
2188 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2189 error = ENODEV;
2190 break;
2191 }
2192 pfr_table_copyin_cleanup(&io32->pfrio_table);
2193 error = pfr_add_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2194 io32->pfrio_size, &io32->pfrio_nadd, io32->pfrio_flags |
2195 PFR_FLAG_USERIOCTL);
2196 break;
2197
2198 case DIOCRDELADDRS:
2199 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2200 error = ENODEV;
2201 break;
2202 }
2203 pfr_table_copyin_cleanup(&io32->pfrio_table);
2204 error = pfr_del_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2205 io32->pfrio_size, &io32->pfrio_ndel, io32->pfrio_flags |
2206 PFR_FLAG_USERIOCTL);
2207 break;
2208
2209 case DIOCRSETADDRS:
2210 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2211 error = ENODEV;
2212 break;
2213 }
2214 pfr_table_copyin_cleanup(&io32->pfrio_table);
2215 error = pfr_set_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2216 io32->pfrio_size, &io32->pfrio_size2, &io32->pfrio_nadd,
2217 &io32->pfrio_ndel, &io32->pfrio_nchange, io32->pfrio_flags |
2218 PFR_FLAG_USERIOCTL, 0);
2219 break;
2220
2221 case DIOCRGETADDRS:
2222 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2223 error = ENODEV;
2224 break;
2225 }
2226 pfr_table_copyin_cleanup(&io32->pfrio_table);
2227 error = pfr_get_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2228 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2229 break;
2230
2231 case DIOCRGETASTATS:
2232 if (io32->pfrio_esize != sizeof(struct pfr_astats)) {
2233 error = ENODEV;
2234 break;
2235 }
2236 pfr_table_copyin_cleanup(&io32->pfrio_table);
2237 error = pfr_get_astats(&io32->pfrio_table, io32->pfrio_buffer,
2238 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2239 break;
2240
2241 case DIOCRCLRASTATS:
2242 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2243 error = ENODEV;
2244 break;
2245 }
2246 pfr_table_copyin_cleanup(&io32->pfrio_table);
2247 error = pfr_clr_astats(&io32->pfrio_table, io32->pfrio_buffer,
2248 io32->pfrio_size, &io32->pfrio_nzero, io32->pfrio_flags |
2249 PFR_FLAG_USERIOCTL);
2250 break;
2251
2252 case DIOCRTSTADDRS:
2253 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2254 error = ENODEV;
2255 break;
2256 }
2257 pfr_table_copyin_cleanup(&io32->pfrio_table);
2258 error = pfr_tst_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2259 io32->pfrio_size, &io32->pfrio_nmatch, io32->pfrio_flags |
2260 PFR_FLAG_USERIOCTL);
2261 break;
2262
2263 case DIOCRINADEFINE:
2264 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2265 error = ENODEV;
2266 break;
2267 }
2268 pfr_table_copyin_cleanup(&io32->pfrio_table);
2269 error = pfr_ina_define(&io32->pfrio_table, io32->pfrio_buffer,
2270 io32->pfrio_size, &io32->pfrio_nadd, &io32->pfrio_naddr,
2271 io32->pfrio_ticket, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2272 break;
2273
2274 default:
2275 VERIFY(0);
2276 /* NOTREACHED */
2277 }
2278
2279 done:
2280 return error;
2281 }
2282
2283 static int
2284 pfioctl_ioc_tokens(u_long cmd, struct pfioc_tokens_32 *tok32,
2285 struct pfioc_tokens_64 *tok64, struct proc *p)
2286 {
2287 struct pfioc_token *tokens;
2288 struct pfioc_kernel_token *entry, *tmp;
2289 user_addr_t token_buf;
2290 int ocnt, cnt, error = 0, p64 = proc_is64bit(p);
2291 char *ptr;
2292
2293 switch (cmd) {
2294 case DIOCGETSTARTERS: {
2295 int size;
2296
2297 if (nr_tokens == 0) {
2298 error = ENOENT;
2299 break;
2300 }
2301
2302 size = sizeof(struct pfioc_token) * nr_tokens;
2303 if (size / nr_tokens != sizeof(struct pfioc_token)) {
2304 os_log_error(OS_LOG_DEFAULT, "%s: size overflows", __func__);
2305 error = ERANGE;
2306 break;
2307 }
2308 ocnt = cnt = (p64 ? tok64->size : tok32->size);
2309 if (cnt == 0) {
2310 if (p64) {
2311 tok64->size = size;
2312 } else {
2313 tok32->size = size;
2314 }
2315 break;
2316 }
2317
2318 token_buf = (p64 ? tok64->pgt_buf : tok32->pgt_buf);
2319 tokens = _MALLOC(size, M_TEMP, M_WAITOK | M_ZERO);
2320 if (tokens == NULL) {
2321 error = ENOMEM;
2322 break;
2323 }
2324
2325 ptr = (void *)tokens;
2326 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
2327 struct pfioc_token *t;
2328
2329 if ((unsigned)cnt < sizeof(*tokens)) {
2330 break; /* no more buffer space left */
2331 }
2332 t = (struct pfioc_token *)(void *)ptr;
2333 t->token_value = entry->token.token_value;
2334 t->timestamp = entry->token.timestamp;
2335 t->pid = entry->token.pid;
2336 bcopy(entry->token.proc_name, t->proc_name,
2337 PFTOK_PROCNAME_LEN);
2338 ptr += sizeof(struct pfioc_token);
2339
2340 cnt -= sizeof(struct pfioc_token);
2341 }
2342
2343 if (cnt < ocnt) {
2344 error = copyout(tokens, token_buf, ocnt - cnt);
2345 }
2346
2347 if (p64) {
2348 tok64->size = ocnt - cnt;
2349 } else {
2350 tok32->size = ocnt - cnt;
2351 }
2352
2353 _FREE(tokens, M_TEMP);
2354 break;
2355 }
2356
2357 default:
2358 VERIFY(0);
2359 /* NOTREACHED */
2360 }
2361
2362 return error;
2363 }
2364
2365 static void
2366 pf_expire_states_and_src_nodes(struct pf_rule *rule)
2367 {
2368 struct pf_state *state;
2369 struct pf_src_node *sn;
2370 int killed = 0;
2371
2372 /* expire the states */
2373 state = TAILQ_FIRST(&state_list);
2374 while (state) {
2375 if (state->rule.ptr == rule) {
2376 state->timeout = PFTM_PURGE;
2377 }
2378 state = TAILQ_NEXT(state, entry_list);
2379 }
2380 pf_purge_expired_states(pf_status.states);
2381
2382 /* expire the src_nodes */
2383 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2384 if (sn->rule.ptr != rule) {
2385 continue;
2386 }
2387 if (sn->states != 0) {
2388 RB_FOREACH(state, pf_state_tree_id,
2389 &tree_id) {
2390 if (state->src_node == sn) {
2391 state->src_node = NULL;
2392 }
2393 if (state->nat_src_node == sn) {
2394 state->nat_src_node = NULL;
2395 }
2396 }
2397 sn->states = 0;
2398 }
2399 sn->expire = 1;
2400 killed++;
2401 }
2402 if (killed) {
2403 pf_purge_expired_src_nodes();
2404 }
2405 }
2406
2407 static void
2408 pf_delete_rule_from_ruleset(struct pf_ruleset *ruleset, int rs_num,
2409 struct pf_rule *rule)
2410 {
2411 struct pf_rule *r;
2412 int nr = 0;
2413
2414 pf_expire_states_and_src_nodes(rule);
2415
2416 pf_rm_rule(ruleset->rules[rs_num].active.ptr, rule);
2417 if (ruleset->rules[rs_num].active.rcount-- == 0) {
2418 panic("%s: rcount value broken!", __func__);
2419 }
2420 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2421
2422 while (r) {
2423 r->nr = nr++;
2424 r = TAILQ_NEXT(r, entries);
2425 }
2426 }
2427
2428
2429 static void
2430 pf_ruleset_cleanup(struct pf_ruleset *ruleset, int rs)
2431 {
2432 pf_calc_skip_steps(ruleset->rules[rs].active.ptr);
2433 ruleset->rules[rs].active.ticket =
2434 ++ruleset->rules[rs].inactive.ticket;
2435 }
2436
2437 /*
2438 * req_dev encodes the PF interface. Currently, possible values are
2439 * 0 or PFRULE_PFM
2440 */
2441 static int
2442 pf_delete_rule_by_ticket(struct pfioc_rule *pr, u_int32_t req_dev)
2443 {
2444 struct pf_ruleset *ruleset;
2445 struct pf_rule *rule = NULL;
2446 int is_anchor;
2447 int error;
2448 int i;
2449
2450 is_anchor = (pr->anchor_call[0] != '\0');
2451 if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
2452 pr->rule.owner, is_anchor, &error)) == NULL) {
2453 return error;
2454 }
2455
2456 for (i = 0; i < PF_RULESET_MAX && rule == NULL; i++) {
2457 rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2458 while (rule && (rule->ticket != pr->rule.ticket)) {
2459 rule = TAILQ_NEXT(rule, entries);
2460 }
2461 }
2462 if (rule == NULL) {
2463 return ENOENT;
2464 } else {
2465 i--;
2466 }
2467
2468 if (strcmp(rule->owner, pr->rule.owner)) {
2469 return EACCES;
2470 }
2471
2472 delete_rule:
2473 if (rule->anchor && (ruleset != &pf_main_ruleset) &&
2474 ((strcmp(ruleset->anchor->owner, "")) == 0) &&
2475 ((ruleset->rules[i].active.rcount - 1) == 0)) {
2476 /* set rule & ruleset to parent and repeat */
2477 struct pf_rule *delete_rule = rule;
2478 struct pf_ruleset *delete_ruleset = ruleset;
2479
2480 #define parent_ruleset ruleset->anchor->parent->ruleset
2481 if (ruleset->anchor->parent == NULL) {
2482 ruleset = &pf_main_ruleset;
2483 } else {
2484 ruleset = &parent_ruleset;
2485 }
2486
2487 rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2488 while (rule &&
2489 (rule->anchor != delete_ruleset->anchor)) {
2490 rule = TAILQ_NEXT(rule, entries);
2491 }
2492 if (rule == NULL) {
2493 panic("%s: rule not found!", __func__);
2494 }
2495
2496 /*
2497 * if reqest device != rule's device, bail :
2498 * with error if ticket matches;
2499 * without error if ticket doesn't match (i.e. its just cleanup)
2500 */
2501 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2502 if (rule->ticket != pr->rule.ticket) {
2503 return 0;
2504 } else {
2505 return EACCES;
2506 }
2507 }
2508
2509 if (delete_rule->rule_flag & PFRULE_PFM) {
2510 pffwrules--;
2511 }
2512
2513 pf_delete_rule_from_ruleset(delete_ruleset,
2514 i, delete_rule);
2515 delete_ruleset->rules[i].active.ticket =
2516 ++delete_ruleset->rules[i].inactive.ticket;
2517 goto delete_rule;
2518 } else {
2519 /*
2520 * process deleting rule only if device that added the
2521 * rule matches device that issued the request
2522 */
2523 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2524 return EACCES;
2525 }
2526 if (rule->rule_flag & PFRULE_PFM) {
2527 pffwrules--;
2528 }
2529 pf_delete_rule_from_ruleset(ruleset, i,
2530 rule);
2531 pf_ruleset_cleanup(ruleset, i);
2532 }
2533
2534 return 0;
2535 }
2536
2537 /*
2538 * req_dev encodes the PF interface. Currently, possible values are
2539 * 0 or PFRULE_PFM
2540 */
2541 static void
2542 pf_delete_rule_by_owner(char *owner, u_int32_t req_dev)
2543 {
2544 struct pf_ruleset *ruleset;
2545 struct pf_rule *rule, *next;
2546 int deleted = 0;
2547
2548 for (int rs = 0; rs < PF_RULESET_MAX; rs++) {
2549 rule = TAILQ_FIRST(pf_main_ruleset.rules[rs].active.ptr);
2550 ruleset = &pf_main_ruleset;
2551 while (rule) {
2552 next = TAILQ_NEXT(rule, entries);
2553 /*
2554 * process deleting rule only if device that added the
2555 * rule matches device that issued the request
2556 */
2557 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2558 rule = next;
2559 continue;
2560 }
2561 if (rule->anchor) {
2562 if (((strcmp(rule->owner, owner)) == 0) ||
2563 ((strcmp(rule->owner, "")) == 0)) {
2564 if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2565 if (deleted) {
2566 pf_ruleset_cleanup(ruleset, rs);
2567 deleted = 0;
2568 }
2569 /* step into anchor */
2570 ruleset =
2571 &rule->anchor->ruleset;
2572 rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2573 continue;
2574 } else {
2575 if (rule->rule_flag &
2576 PFRULE_PFM) {
2577 pffwrules--;
2578 }
2579 pf_delete_rule_from_ruleset(ruleset, rs, rule);
2580 deleted = 1;
2581 rule = next;
2582 }
2583 } else {
2584 rule = next;
2585 }
2586 } else {
2587 if (((strcmp(rule->owner, owner)) == 0)) {
2588 /* delete rule */
2589 if (rule->rule_flag & PFRULE_PFM) {
2590 pffwrules--;
2591 }
2592 pf_delete_rule_from_ruleset(ruleset,
2593 rs, rule);
2594 deleted = 1;
2595 }
2596 rule = next;
2597 }
2598 if (rule == NULL) {
2599 if (deleted) {
2600 pf_ruleset_cleanup(ruleset, rs);
2601 deleted = 0;
2602 }
2603 if (ruleset != &pf_main_ruleset) {
2604 pf_deleterule_anchor_step_out(&ruleset,
2605 rs, &rule);
2606 }
2607 }
2608 }
2609 }
2610 }
2611
2612 static void
2613 pf_deleterule_anchor_step_out(struct pf_ruleset **ruleset_ptr,
2614 int rs, struct pf_rule **rule_ptr)
2615 {
2616 struct pf_ruleset *ruleset = *ruleset_ptr;
2617 struct pf_rule *rule = *rule_ptr;
2618
2619 /* step out of anchor */
2620 struct pf_ruleset *rs_copy = ruleset;
2621 ruleset = ruleset->anchor->parent?
2622 &ruleset->anchor->parent->ruleset:&pf_main_ruleset;
2623
2624 rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2625 while (rule && (rule->anchor != rs_copy->anchor)) {
2626 rule = TAILQ_NEXT(rule, entries);
2627 }
2628 if (rule == NULL) {
2629 panic("%s: parent rule of anchor not found!", __func__);
2630 }
2631 if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2632 rule = TAILQ_NEXT(rule, entries);
2633 }
2634
2635 *ruleset_ptr = ruleset;
2636 *rule_ptr = rule;
2637 }
2638
2639 static void
2640 pf_addrwrap_setup(struct pf_addr_wrap *aw)
2641 {
2642 VERIFY(aw);
2643 bzero(&aw->p, sizeof aw->p);
2644 }
2645
2646 static int
2647 pf_rule_setup(struct pfioc_rule *pr, struct pf_rule *rule,
2648 struct pf_ruleset *ruleset)
2649 {
2650 struct pf_pooladdr *apa;
2651 int error = 0;
2652
2653 if (rule->ifname[0]) {
2654 rule->kif = pfi_kif_get(rule->ifname);
2655 if (rule->kif == NULL) {
2656 pool_put(&pf_rule_pl, rule);
2657 return EINVAL;
2658 }
2659 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
2660 }
2661 if (rule->tagname[0]) {
2662 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) {
2663 error = EBUSY;
2664 }
2665 }
2666 if (rule->match_tagname[0]) {
2667 if ((rule->match_tag =
2668 pf_tagname2tag(rule->match_tagname)) == 0) {
2669 error = EBUSY;
2670 }
2671 }
2672 if (rule->rt && !rule->direction) {
2673 error = EINVAL;
2674 }
2675 #if PFLOG
2676 if (!rule->log) {
2677 rule->logif = 0;
2678 }
2679 if (rule->logif >= PFLOGIFS_MAX) {
2680 error = EINVAL;
2681 }
2682 #endif /* PFLOG */
2683 pf_addrwrap_setup(&rule->src.addr);
2684 pf_addrwrap_setup(&rule->dst.addr);
2685 if (pf_rtlabel_add(&rule->src.addr) ||
2686 pf_rtlabel_add(&rule->dst.addr)) {
2687 error = EBUSY;
2688 }
2689 if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) {
2690 error = EINVAL;
2691 }
2692 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) {
2693 error = EINVAL;
2694 }
2695 if (pf_tbladdr_setup(ruleset, &rule->src.addr)) {
2696 error = EINVAL;
2697 }
2698 if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) {
2699 error = EINVAL;
2700 }
2701 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) {
2702 error = EINVAL;
2703 }
2704 TAILQ_FOREACH(apa, &pf_pabuf, entries)
2705 if (pf_tbladdr_setup(ruleset, &apa->addr)) {
2706 error = EINVAL;
2707 }
2708
2709 if (rule->overload_tblname[0]) {
2710 if ((rule->overload_tbl = pfr_attach_table(ruleset,
2711 rule->overload_tblname)) == NULL) {
2712 error = EINVAL;
2713 } else {
2714 rule->overload_tbl->pfrkt_flags |=
2715 PFR_TFLAG_ACTIVE;
2716 }
2717 }
2718
2719 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
2720
2721 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2722 (rule->action == PF_BINAT) || (rule->action == PF_NAT64)) &&
2723 rule->anchor == NULL) ||
2724 (rule->rt > PF_FASTROUTE)) &&
2725 (TAILQ_FIRST(&rule->rpool.list) == NULL)) {
2726 error = EINVAL;
2727 }
2728
2729 if (error) {
2730 pf_rm_rule(NULL, rule);
2731 return error;
2732 }
2733 /* For a NAT64 rule the rule's address family is AF_INET6 whereas
2734 * the address pool's family will be AF_INET
2735 */
2736 rule->rpool.af = (rule->action == PF_NAT64) ? AF_INET: rule->af;
2737 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2738 rule->evaluations = rule->packets[0] = rule->packets[1] =
2739 rule->bytes[0] = rule->bytes[1] = 0;
2740
2741 return 0;
2742 }
2743
2744 static int
2745 pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p)
2746 {
2747 int error = 0;
2748 u_int32_t req_dev = 0;
2749
2750 switch (cmd) {
2751 case DIOCADDRULE: {
2752 struct pf_ruleset *ruleset;
2753 struct pf_rule *rule, *tail;
2754 int rs_num;
2755
2756 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2757 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2758 ruleset = pf_find_ruleset(pr->anchor);
2759 if (ruleset == NULL) {
2760 error = EINVAL;
2761 break;
2762 }
2763 rs_num = pf_get_ruleset_number(pr->rule.action);
2764 if (rs_num >= PF_RULESET_MAX) {
2765 error = EINVAL;
2766 break;
2767 }
2768 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
2769 error = EINVAL;
2770 break;
2771 }
2772 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
2773 error = EBUSY;
2774 break;
2775 }
2776 if (pr->pool_ticket != ticket_pabuf) {
2777 error = EBUSY;
2778 break;
2779 }
2780 rule = pool_get(&pf_rule_pl, PR_WAITOK);
2781 if (rule == NULL) {
2782 error = ENOMEM;
2783 break;
2784 }
2785 pf_rule_copyin(&pr->rule, rule, p, minordev);
2786 #if !INET
2787 if (rule->af == AF_INET) {
2788 pool_put(&pf_rule_pl, rule);
2789 error = EAFNOSUPPORT;
2790 break;
2791 }
2792 #endif /* INET */
2793 #if !INET6
2794 if (rule->af == AF_INET6) {
2795 pool_put(&pf_rule_pl, rule);
2796 error = EAFNOSUPPORT;
2797 break;
2798 }
2799 #endif /* INET6 */
2800 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2801 pf_rulequeue);
2802 if (tail) {
2803 rule->nr = tail->nr + 1;
2804 } else {
2805 rule->nr = 0;
2806 }
2807
2808 if ((error = pf_rule_setup(pr, rule, ruleset))) {
2809 break;
2810 }
2811
2812 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2813 rule, entries);
2814 ruleset->rules[rs_num].inactive.rcount++;
2815 if (rule->rule_flag & PFRULE_PFM) {
2816 pffwrules++;
2817 }
2818
2819 if (rule->action == PF_NAT64) {
2820 atomic_add_16(&pf_nat64_configured, 1);
2821 }
2822
2823 if (pr->anchor_call[0] == '\0') {
2824 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
2825 if (rule->rule_flag & PFRULE_PFM) {
2826 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
2827 }
2828 }
2829
2830 #if DUMMYNET
2831 if (rule->action == PF_DUMMYNET) {
2832 struct dummynet_event dn_event;
2833 uint32_t direction = DN_INOUT;;
2834 bzero(&dn_event, sizeof(dn_event));
2835
2836 dn_event.dn_event_code = DUMMYNET_RULE_CONFIG;
2837
2838 if (rule->direction == PF_IN) {
2839 direction = DN_IN;
2840 } else if (rule->direction == PF_OUT) {
2841 direction = DN_OUT;
2842 }
2843
2844 dn_event.dn_event_rule_config.dir = direction;
2845 dn_event.dn_event_rule_config.af = rule->af;
2846 dn_event.dn_event_rule_config.proto = rule->proto;
2847 dn_event.dn_event_rule_config.src_port = rule->src.xport.range.port[0];
2848 dn_event.dn_event_rule_config.dst_port = rule->dst.xport.range.port[0];
2849 strlcpy(dn_event.dn_event_rule_config.ifname, rule->ifname,
2850 sizeof(dn_event.dn_event_rule_config.ifname));
2851
2852 dummynet_event_enqueue_nwk_wq_entry(&dn_event);
2853 }
2854 #endif
2855 break;
2856 }
2857
2858 case DIOCGETRULES: {
2859 struct pf_ruleset *ruleset;
2860 struct pf_rule *tail;
2861 int rs_num;
2862
2863 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2864 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2865 ruleset = pf_find_ruleset(pr->anchor);
2866 if (ruleset == NULL) {
2867 error = EINVAL;
2868 break;
2869 }
2870 rs_num = pf_get_ruleset_number(pr->rule.action);
2871 if (rs_num >= PF_RULESET_MAX) {
2872 error = EINVAL;
2873 break;
2874 }
2875 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2876 pf_rulequeue);
2877 if (tail) {
2878 pr->nr = tail->nr + 1;
2879 } else {
2880 pr->nr = 0;
2881 }
2882 pr->ticket = ruleset->rules[rs_num].active.ticket;
2883 break;
2884 }
2885
2886 case DIOCGETRULE: {
2887 struct pf_ruleset *ruleset;
2888 struct pf_rule *rule;
2889 int rs_num, i;
2890
2891 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2892 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2893 ruleset = pf_find_ruleset(pr->anchor);
2894 if (ruleset == NULL) {
2895 error = EINVAL;
2896 break;
2897 }
2898 rs_num = pf_get_ruleset_number(pr->rule.action);
2899 if (rs_num >= PF_RULESET_MAX) {
2900 error = EINVAL;
2901 break;
2902 }
2903 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
2904 error = EBUSY;
2905 break;
2906 }
2907 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2908 while ((rule != NULL) && (rule->nr != pr->nr)) {
2909 rule = TAILQ_NEXT(rule, entries);
2910 }
2911 if (rule == NULL) {
2912 error = EBUSY;
2913 break;
2914 }
2915 pf_rule_copyout(rule, &pr->rule);
2916 if (pf_anchor_copyout(ruleset, rule, pr)) {
2917 error = EBUSY;
2918 break;
2919 }
2920 pfi_dynaddr_copyout(&pr->rule.src.addr);
2921 pfi_dynaddr_copyout(&pr->rule.dst.addr);
2922 pf_tbladdr_copyout(&pr->rule.src.addr);
2923 pf_tbladdr_copyout(&pr->rule.dst.addr);
2924 pf_rtlabel_copyout(&pr->rule.src.addr);
2925 pf_rtlabel_copyout(&pr->rule.dst.addr);
2926 for (i = 0; i < PF_SKIP_COUNT; ++i) {
2927 if (rule->skip[i].ptr == NULL) {
2928 pr->rule.skip[i].nr = -1;
2929 } else {
2930 pr->rule.skip[i].nr =
2931 rule->skip[i].ptr->nr;
2932 }
2933 }
2934
2935 if (pr->action == PF_GET_CLR_CNTR) {
2936 rule->evaluations = 0;
2937 rule->packets[0] = rule->packets[1] = 0;
2938 rule->bytes[0] = rule->bytes[1] = 0;
2939 }
2940 break;
2941 }
2942
2943 case DIOCCHANGERULE: {
2944 struct pfioc_rule *pcr = pr;
2945 struct pf_ruleset *ruleset;
2946 struct pf_rule *oldrule = NULL, *newrule = NULL;
2947 struct pf_pooladdr *pa;
2948 u_int32_t nr = 0;
2949 int rs_num;
2950
2951 if (!(pcr->action == PF_CHANGE_REMOVE ||
2952 pcr->action == PF_CHANGE_GET_TICKET) &&
2953 pcr->pool_ticket != ticket_pabuf) {
2954 error = EBUSY;
2955 break;
2956 }
2957
2958 if (pcr->action < PF_CHANGE_ADD_HEAD ||
2959 pcr->action > PF_CHANGE_GET_TICKET) {
2960 error = EINVAL;
2961 break;
2962 }
2963 pcr->anchor[sizeof(pcr->anchor) - 1] = '\0';
2964 pcr->anchor_call[sizeof(pcr->anchor_call) - 1] = '\0';
2965 ruleset = pf_find_ruleset(pcr->anchor);
2966 if (ruleset == NULL) {
2967 error = EINVAL;
2968 break;
2969 }
2970 rs_num = pf_get_ruleset_number(pcr->rule.action);
2971 if (rs_num >= PF_RULESET_MAX) {
2972 error = EINVAL;
2973 break;
2974 }
2975
2976 if (pcr->action == PF_CHANGE_GET_TICKET) {
2977 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
2978 break;
2979 } else {
2980 if (pcr->ticket !=
2981 ruleset->rules[rs_num].active.ticket) {
2982 error = EINVAL;
2983 break;
2984 }
2985 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
2986 error = EINVAL;
2987 break;
2988 }
2989 }
2990
2991 if (pcr->action != PF_CHANGE_REMOVE) {
2992 newrule = pool_get(&pf_rule_pl, PR_WAITOK);
2993 if (newrule == NULL) {
2994 error = ENOMEM;
2995 break;
2996 }
2997 pf_rule_copyin(&pcr->rule, newrule, p, minordev);
2998 #if !INET
2999 if (newrule->af == AF_INET) {
3000 pool_put(&pf_rule_pl, newrule);
3001 error = EAFNOSUPPORT;
3002 break;
3003 }
3004 #endif /* INET */
3005 #if !INET6
3006 if (newrule->af == AF_INET6) {
3007 pool_put(&pf_rule_pl, newrule);
3008 error = EAFNOSUPPORT;
3009 break;
3010 }
3011 #endif /* INET6 */
3012 if (newrule->ifname[0]) {
3013 newrule->kif = pfi_kif_get(newrule->ifname);
3014 if (newrule->kif == NULL) {
3015 pool_put(&pf_rule_pl, newrule);
3016 error = EINVAL;
3017 break;
3018 }
3019 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
3020 } else {
3021 newrule->kif = NULL;
3022 }
3023
3024 if (newrule->tagname[0]) {
3025 if ((newrule->tag =
3026 pf_tagname2tag(newrule->tagname)) == 0) {
3027 error = EBUSY;
3028 }
3029 }
3030 if (newrule->match_tagname[0]) {
3031 if ((newrule->match_tag = pf_tagname2tag(
3032 newrule->match_tagname)) == 0) {
3033 error = EBUSY;
3034 }
3035 }
3036 if (newrule->rt && !newrule->direction) {
3037 error = EINVAL;
3038 }
3039 #if PFLOG
3040 if (!newrule->log) {
3041 newrule->logif = 0;
3042 }
3043 if (newrule->logif >= PFLOGIFS_MAX) {
3044 error = EINVAL;
3045 }
3046 #endif /* PFLOG */
3047 pf_addrwrap_setup(&newrule->src.addr);
3048 pf_addrwrap_setup(&newrule->dst.addr);
3049 if (pf_rtlabel_add(&newrule->src.addr) ||
3050 pf_rtlabel_add(&newrule->dst.addr)) {
3051 error = EBUSY;
3052 }
3053 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) {
3054 error = EINVAL;
3055 }
3056 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) {
3057 error = EINVAL;
3058 }
3059 if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) {
3060 error = EINVAL;
3061 }
3062 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) {
3063 error = EINVAL;
3064 }
3065 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) {
3066 error = EINVAL;
3067 }
3068 TAILQ_FOREACH(pa, &pf_pabuf, entries)
3069 if (pf_tbladdr_setup(ruleset, &pa->addr)) {
3070 error = EINVAL;
3071 }
3072
3073 if (newrule->overload_tblname[0]) {
3074 if ((newrule->overload_tbl = pfr_attach_table(
3075 ruleset, newrule->overload_tblname)) ==
3076 NULL) {
3077 error = EINVAL;
3078 } else {
3079 newrule->overload_tbl->pfrkt_flags |=
3080 PFR_TFLAG_ACTIVE;
3081 }
3082 }
3083
3084 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
3085 if (((((newrule->action == PF_NAT) ||
3086 (newrule->action == PF_RDR) ||
3087 (newrule->action == PF_BINAT) ||
3088 (newrule->rt > PF_FASTROUTE)) &&
3089 !newrule->anchor)) &&
3090 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) {
3091 error = EINVAL;
3092 }
3093
3094 if (error) {
3095 pf_rm_rule(NULL, newrule);
3096 break;
3097 }
3098 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3099 newrule->evaluations = 0;
3100 newrule->packets[0] = newrule->packets[1] = 0;
3101 newrule->bytes[0] = newrule->bytes[1] = 0;
3102 }
3103 pf_empty_pool(&pf_pabuf);
3104
3105 if (pcr->action == PF_CHANGE_ADD_HEAD) {
3106 oldrule = TAILQ_FIRST(
3107 ruleset->rules[rs_num].active.ptr);
3108 } else if (pcr->action == PF_CHANGE_ADD_TAIL) {
3109 oldrule = TAILQ_LAST(
3110 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
3111 } else {
3112 oldrule = TAILQ_FIRST(
3113 ruleset->rules[rs_num].active.ptr);
3114 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) {
3115 oldrule = TAILQ_NEXT(oldrule, entries);
3116 }
3117 if (oldrule == NULL) {
3118 if (newrule != NULL) {
3119 pf_rm_rule(NULL, newrule);
3120 }
3121 error = EINVAL;
3122 break;
3123 }
3124 }
3125
3126 if (pcr->action == PF_CHANGE_REMOVE) {
3127 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
3128 ruleset->rules[rs_num].active.rcount--;
3129 } else {
3130 if (oldrule == NULL) {
3131 TAILQ_INSERT_TAIL(
3132 ruleset->rules[rs_num].active.ptr,
3133 newrule, entries);
3134 } else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3135 pcr->action == PF_CHANGE_ADD_BEFORE) {
3136 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3137 } else {
3138 TAILQ_INSERT_AFTER(
3139 ruleset->rules[rs_num].active.ptr,
3140 oldrule, newrule, entries);
3141 }
3142 ruleset->rules[rs_num].active.rcount++;
3143 }
3144
3145 nr = 0;
3146 TAILQ_FOREACH(oldrule,
3147 ruleset->rules[rs_num].active.ptr, entries)
3148 oldrule->nr = nr++;
3149
3150 ruleset->rules[rs_num].active.ticket++;
3151
3152 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3153 pf_remove_if_empty_ruleset(ruleset);
3154
3155 break;
3156 }
3157
3158 case DIOCINSERTRULE: {
3159 struct pf_ruleset *ruleset;
3160 struct pf_rule *rule, *tail, *r;
3161 int rs_num;
3162 int is_anchor;
3163
3164 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3165 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
3166 is_anchor = (pr->anchor_call[0] != '\0');
3167
3168 if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
3169 pr->rule.owner, is_anchor, &error)) == NULL) {
3170 break;
3171 }
3172
3173 rs_num = pf_get_ruleset_number(pr->rule.action);
3174 if (rs_num >= PF_RULESET_MAX) {
3175 error = EINVAL;
3176 break;
3177 }
3178 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3179 error = EINVAL;
3180 break;
3181 }
3182
3183 /* make sure this anchor rule doesn't exist already */
3184 if (is_anchor) {
3185 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3186 while (r) {
3187 if (r->anchor &&
3188 ((strcmp(r->anchor->name,
3189 pr->anchor_call)) == 0)) {
3190 if (((strcmp(pr->rule.owner,
3191 r->owner)) == 0) ||
3192 ((strcmp(r->owner, "")) == 0)) {
3193 error = EEXIST;
3194 } else {
3195 error = EPERM;
3196 }
3197 break;
3198 }
3199 r = TAILQ_NEXT(r, entries);
3200 }
3201 if (error != 0) {
3202 return error;
3203 }
3204 }
3205
3206 rule = pool_get(&pf_rule_pl, PR_WAITOK);
3207 if (rule == NULL) {
3208 error = ENOMEM;
3209 break;
3210 }
3211 pf_rule_copyin(&pr->rule, rule, p, minordev);
3212 #if !INET
3213 if (rule->af == AF_INET) {
3214 pool_put(&pf_rule_pl, rule);
3215 error = EAFNOSUPPORT;
3216 break;
3217 }
3218 #endif /* INET */
3219 #if !INET6
3220 if (rule->af == AF_INET6) {
3221 pool_put(&pf_rule_pl, rule);
3222 error = EAFNOSUPPORT;
3223 break;
3224 }
3225
3226 #endif /* INET6 */
3227 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3228 while ((r != NULL) && (rule->priority >= (unsigned)r->priority)) {
3229 r = TAILQ_NEXT(r, entries);
3230 }
3231 if (r == NULL) {
3232 if ((tail =
3233 TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3234 pf_rulequeue)) != NULL) {
3235 rule->nr = tail->nr + 1;
3236 } else {
3237 rule->nr = 0;
3238 }
3239 } else {
3240 rule->nr = r->nr;
3241 }
3242
3243 if ((error = pf_rule_setup(pr, rule, ruleset))) {
3244 break;
3245 }
3246
3247 if (rule->anchor != NULL) {
3248 strlcpy(rule->anchor->owner, rule->owner,
3249 PF_OWNER_NAME_SIZE);
3250 }
3251
3252 if (r) {
3253 TAILQ_INSERT_BEFORE(r, rule, entries);
3254 while (r && ++r->nr) {
3255 r = TAILQ_NEXT(r, entries);
3256 }
3257 } else {
3258 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].active.ptr,
3259 rule, entries);
3260 }
3261 ruleset->rules[rs_num].active.rcount++;
3262
3263 /* Calculate checksum for the main ruleset */
3264 if (ruleset == &pf_main_ruleset) {
3265 error = pf_setup_pfsync_matching(ruleset);
3266 }
3267
3268 pf_ruleset_cleanup(ruleset, rs_num);
3269 rule->ticket = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)rule);
3270
3271 pr->rule.ticket = rule->ticket;
3272 pf_rule_copyout(rule, &pr->rule);
3273 if (rule->rule_flag & PFRULE_PFM) {
3274 pffwrules++;
3275 }
3276 if (rule->action == PF_NAT64) {
3277 atomic_add_16(&pf_nat64_configured, 1);
3278 }
3279
3280 if (pr->anchor_call[0] == '\0') {
3281 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
3282 if (rule->rule_flag & PFRULE_PFM) {
3283 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
3284 }
3285 }
3286 break;
3287 }
3288
3289 case DIOCDELETERULE: {
3290 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3291 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
3292
3293 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3294 error = EINVAL;
3295 break;
3296 }
3297
3298 /* get device through which request is made */
3299 if ((uint8_t)minordev == PFDEV_PFM) {
3300 req_dev |= PFRULE_PFM;
3301 }
3302
3303 if (pr->rule.ticket) {
3304 if ((error = pf_delete_rule_by_ticket(pr, req_dev))) {
3305 break;
3306 }
3307 } else {
3308 pf_delete_rule_by_owner(pr->rule.owner, req_dev);
3309 }
3310 pr->nr = pffwrules;
3311 if (pr->rule.action == PF_NAT64) {
3312 atomic_add_16(&pf_nat64_configured, -1);
3313 }
3314 break;
3315 }
3316
3317 default:
3318 VERIFY(0);
3319 /* NOTREACHED */
3320 }
3321
3322 return error;
3323 }
3324
3325 static int
3326 pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p)
3327 {
3328 #pragma unused(p)
3329 int error = 0;
3330
3331 psk->psk_ifname[sizeof(psk->psk_ifname) - 1] = '\0';
3332 psk->psk_ownername[sizeof(psk->psk_ownername) - 1] = '\0';
3333
3334 bool ifname_matched = true;
3335 bool owner_matched = true;
3336
3337 switch (cmd) {
3338 case DIOCCLRSTATES: {
3339 struct pf_state *s, *nexts;
3340 int killed = 0;
3341
3342 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
3343 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3344 /*
3345 * Purge all states only when neither ifname
3346 * or owner is provided. If any of these are provided
3347 * we purge only the states with meta data that match
3348 */
3349 bool unlink_state = false;
3350 ifname_matched = true;
3351 owner_matched = true;
3352
3353 if (psk->psk_ifname[0] &&
3354 strcmp(psk->psk_ifname, s->kif->pfik_name)) {
3355 ifname_matched = false;
3356 }
3357
3358 if (psk->psk_ownername[0] &&
3359 ((NULL == s->rule.ptr) ||
3360 strcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3361 owner_matched = false;
3362 }
3363
3364 unlink_state = ifname_matched && owner_matched;
3365
3366 if (unlink_state) {
3367 #if NPFSYNC
3368 /* don't send out individual delete messages */
3369 s->sync_flags = PFSTATE_NOSYNC;
3370 #endif
3371 pf_unlink_state(s);
3372 killed++;
3373 }
3374 }
3375 psk->psk_af = killed;
3376 #if NPFSYNC
3377 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
3378 #endif
3379 break;
3380 }
3381
3382 case DIOCKILLSTATES: {
3383 struct pf_state *s, *nexts;
3384 struct pf_state_key *sk;
3385 struct pf_state_host *src, *dst;
3386 int killed = 0;
3387
3388 for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
3389 s = nexts) {
3390 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3391 sk = s->state_key;
3392 ifname_matched = true;
3393 owner_matched = true;
3394
3395 if (psk->psk_ifname[0] &&
3396 strcmp(psk->psk_ifname, s->kif->pfik_name)) {
3397 ifname_matched = false;
3398 }
3399
3400 if (psk->psk_ownername[0] &&
3401 ((NULL == s->rule.ptr) ||
3402 strcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3403 owner_matched = false;
3404 }
3405
3406 if (sk->direction == PF_OUT) {
3407 src = &sk->lan;
3408 dst = &sk->ext_lan;
3409 } else {
3410 src = &sk->ext_lan;
3411 dst = &sk->lan;
3412 }
3413 if ((!psk->psk_af || sk->af_lan == psk->psk_af) &&
3414 (!psk->psk_proto || psk->psk_proto == sk->proto) &&
3415 PF_MATCHA(psk->psk_src.neg,
3416 &psk->psk_src.addr.v.a.addr,
3417 &psk->psk_src.addr.v.a.mask,
3418 &src->addr, sk->af_lan) &&
3419 PF_MATCHA(psk->psk_dst.neg,
3420 &psk->psk_dst.addr.v.a.addr,
3421 &psk->psk_dst.addr.v.a.mask,
3422 &dst->addr, sk->af_lan) &&
3423 (pf_match_xport(psk->psk_proto,
3424 psk->psk_proto_variant, &psk->psk_src.xport,
3425 &src->xport)) &&
3426 (pf_match_xport(psk->psk_proto,
3427 psk->psk_proto_variant, &psk->psk_dst.xport,
3428 &dst->xport)) &&
3429 ifname_matched &&
3430 owner_matched) {
3431 #if NPFSYNC
3432 /* send immediate delete of state */
3433 pfsync_delete_state(s);
3434 s->sync_flags |= PFSTATE_NOSYNC;
3435 #endif
3436 pf_unlink_state(s);
3437 killed++;
3438 }
3439 }
3440 psk->psk_af = killed;
3441 break;
3442 }
3443
3444 default:
3445 VERIFY(0);
3446 /* NOTREACHED */
3447 }
3448
3449 return error;
3450 }
3451
3452 static int
3453 pfioctl_ioc_state(u_long cmd, struct pfioc_state *ps, struct proc *p)
3454 {
3455 #pragma unused(p)
3456 int error = 0;
3457
3458 switch (cmd) {
3459 case DIOCADDSTATE: {
3460 struct pfsync_state *sp = &ps->state;
3461 struct pf_state *s;
3462 struct pf_state_key *sk;
3463 struct pfi_kif *kif;
3464
3465 if (sp->timeout >= PFTM_MAX) {
3466 error = EINVAL;
3467 break;
3468 }
3469 s = pool_get(&pf_state_pl, PR_WAITOK);
3470 if (s == NULL) {
3471 error = ENOMEM;
3472 break;
3473 }
3474 bzero(s, sizeof(struct pf_state));
3475 if ((sk = pf_alloc_state_key(s, NULL)) == NULL) {
3476 pool_put(&pf_state_pl, s);
3477 error = ENOMEM;
3478 break;
3479 }
3480 pf_state_import(sp, sk, s);
3481 kif = pfi_kif_get(sp->ifname);
3482 if (kif == NULL) {
3483 pool_put(&pf_state_pl, s);
3484 pool_put(&pf_state_key_pl, sk);
3485 error = ENOENT;
3486 break;
3487 }
3488 TAILQ_INIT(&s->unlink_hooks);
3489 s->state_key->app_state = 0;
3490 if (pf_insert_state(kif, s)) {
3491 pfi_kif_unref(kif, PFI_KIF_REF_NONE);
3492 pool_put(&pf_state_pl, s);
3493 error = EEXIST;
3494 break;
3495 }
3496 pf_default_rule.states++;
3497 VERIFY(pf_default_rule.states != 0);
3498 break;
3499 }
3500
3501 case DIOCGETSTATE: {
3502 struct pf_state *s;
3503 struct pf_state_cmp id_key;
3504
3505 bcopy(ps->state.id, &id_key.id, sizeof(id_key.id));
3506 id_key.creatorid = ps->state.creatorid;
3507
3508 s = pf_find_state_byid(&id_key);
3509 if (s == NULL) {
3510 error = ENOENT;
3511 break;
3512 }
3513
3514 pf_state_export(&ps->state, s->state_key, s);
3515 break;
3516 }
3517
3518 default:
3519 VERIFY(0);
3520 /* NOTREACHED */
3521 }
3522
3523 return error;
3524 }
3525
3526 static int
3527 pfioctl_ioc_states(u_long cmd, struct pfioc_states_32 *ps32,
3528 struct pfioc_states_64 *ps64, struct proc *p)
3529 {
3530 int p64 = proc_is64bit(p);
3531 int error = 0;
3532
3533 switch (cmd) {
3534 case DIOCGETSTATES: { /* struct pfioc_states */
3535 struct pf_state *state;
3536 struct pfsync_state *pstore;
3537 user_addr_t buf;
3538 u_int32_t nr = 0;
3539 int len, size;
3540
3541 len = (p64 ? ps64->ps_len : ps32->ps_len);
3542 if (len == 0) {
3543 size = sizeof(struct pfsync_state) * pf_status.states;
3544 if (p64) {
3545 ps64->ps_len = size;
3546 } else {
3547 ps32->ps_len = size;
3548 }
3549 break;
3550 }
3551
3552 pstore = _MALLOC(sizeof(*pstore), M_TEMP, M_WAITOK | M_ZERO);
3553 if (pstore == NULL) {
3554 error = ENOMEM;
3555 break;
3556 }
3557 buf = (p64 ? ps64->ps_buf : ps32->ps_buf);
3558
3559 state = TAILQ_FIRST(&state_list);
3560 while (state) {
3561 if (state->timeout != PFTM_UNLINKED) {
3562 if ((nr + 1) * sizeof(*pstore) > (unsigned)len) {
3563 break;
3564 }
3565
3566 pf_state_export(pstore,
3567 state->state_key, state);
3568 error = copyout(pstore, buf, sizeof(*pstore));
3569 if (error) {
3570 _FREE(pstore, M_TEMP);
3571 goto fail;
3572 }
3573 buf += sizeof(*pstore);
3574 nr++;
3575 }
3576 state = TAILQ_NEXT(state, entry_list);
3577 }
3578
3579 size = sizeof(struct pfsync_state) * nr;
3580 if (p64) {
3581 ps64->ps_len = size;
3582 } else {
3583 ps32->ps_len = size;
3584 }
3585
3586 _FREE(pstore, M_TEMP);
3587 break;
3588 }
3589
3590 default:
3591 VERIFY(0);
3592 /* NOTREACHED */
3593 }
3594 fail:
3595 return error;
3596 }
3597
3598 static int
3599 pfioctl_ioc_natlook(u_long cmd, struct pfioc_natlook *pnl, struct proc *p)
3600 {
3601 #pragma unused(p)
3602 int error = 0;
3603
3604 switch (cmd) {
3605 case DIOCNATLOOK: {
3606 struct pf_state_key *sk;
3607 struct pf_state *state;
3608 struct pf_state_key_cmp key;
3609 int m = 0, direction = pnl->direction;
3610
3611 key.proto = pnl->proto;
3612 key.proto_variant = pnl->proto_variant;
3613
3614 if (!pnl->proto ||
3615 PF_AZERO(&pnl->saddr, pnl->af) ||
3616 PF_AZERO(&pnl->daddr, pnl->af) ||
3617 ((pnl->proto == IPPROTO_TCP ||
3618 pnl->proto == IPPROTO_UDP) &&
3619 (!pnl->dxport.port || !pnl->sxport.port))) {
3620 error = EINVAL;
3621 } else {
3622 /*
3623 * userland gives us source and dest of connection,
3624 * reverse the lookup so we ask for what happens with
3625 * the return traffic, enabling us to find it in the
3626 * state tree.
3627 */
3628 if (direction == PF_IN) {
3629 key.af_gwy = pnl->af;
3630 PF_ACPY(&key.ext_gwy.addr, &pnl->daddr,
3631 pnl->af);
3632 memcpy(&key.ext_gwy.xport, &pnl->dxport,
3633 sizeof(key.ext_gwy.xport));
3634 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
3635 memcpy(&key.gwy.xport, &pnl->sxport,
3636 sizeof(key.gwy.xport));
3637 state = pf_find_state_all(&key, PF_IN, &m);
3638 } else {
3639 key.af_lan = pnl->af;
3640 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
3641 memcpy(&key.lan.xport, &pnl->dxport,
3642 sizeof(key.lan.xport));
3643 PF_ACPY(&key.ext_lan.addr, &pnl->saddr,
3644 pnl->af);
3645 memcpy(&key.ext_lan.xport, &pnl->sxport,
3646 sizeof(key.ext_lan.xport));
3647 state = pf_find_state_all(&key, PF_OUT, &m);
3648 }
3649 if (m > 1) {
3650 error = E2BIG; /* more than one state */
3651 } else if (state != NULL) {
3652 sk = state->state_key;
3653 if (direction == PF_IN) {
3654 PF_ACPY(&pnl->rsaddr, &sk->lan.addr,
3655 sk->af_lan);
3656 memcpy(&pnl->rsxport, &sk->lan.xport,
3657 sizeof(pnl->rsxport));
3658 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
3659 pnl->af);
3660 memcpy(&pnl->rdxport, &pnl->dxport,
3661 sizeof(pnl->rdxport));
3662 } else {
3663 PF_ACPY(&pnl->rdaddr, &sk->gwy.addr,
3664 sk->af_gwy);
3665 memcpy(&pnl->rdxport, &sk->gwy.xport,
3666 sizeof(pnl->rdxport));
3667 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
3668 pnl->af);
3669 memcpy(&pnl->rsxport, &pnl->sxport,
3670 sizeof(pnl->rsxport));
3671 }
3672 } else {
3673 error = ENOENT;
3674 }
3675 }
3676 break;
3677 }
3678
3679 default:
3680 VERIFY(0);
3681 /* NOTREACHED */
3682 }
3683
3684 return error;
3685 }
3686
3687 static int
3688 pfioctl_ioc_tm(u_long cmd, struct pfioc_tm *pt, struct proc *p)
3689 {
3690 #pragma unused(p)
3691 int error = 0;
3692
3693 switch (cmd) {
3694 case DIOCSETTIMEOUT: {
3695 int old;
3696
3697 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
3698 pt->seconds < 0) {
3699 error = EINVAL;
3700 goto fail;
3701 }
3702 old = pf_default_rule.timeout[pt->timeout];
3703 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) {
3704 pt->seconds = 1;
3705 }
3706 pf_default_rule.timeout[pt->timeout] = pt->seconds;
3707 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) {
3708 wakeup(pf_purge_thread_fn);
3709 }
3710 pt->seconds = old;
3711 break;
3712 }
3713
3714 case DIOCGETTIMEOUT: {
3715 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
3716 error = EINVAL;
3717 goto fail;
3718 }
3719 pt->seconds = pf_default_rule.timeout[pt->timeout];
3720 break;
3721 }
3722
3723 default:
3724 VERIFY(0);
3725 /* NOTREACHED */
3726 }
3727 fail:
3728 return error;
3729 }
3730
3731 static int
3732 pfioctl_ioc_limit(u_long cmd, struct pfioc_limit *pl, struct proc *p)
3733 {
3734 #pragma unused(p)
3735 int error = 0;
3736
3737 switch (cmd) {
3738 case DIOCGETLIMIT: {
3739 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
3740 error = EINVAL;
3741 goto fail;
3742 }
3743 pl->limit = pf_pool_limits[pl->index].limit;
3744 break;
3745 }
3746
3747 case DIOCSETLIMIT: {
3748 int old_limit;
3749
3750 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
3751 pf_pool_limits[pl->index].pp == NULL) {
3752 error = EINVAL;
3753 goto fail;
3754 }
3755 pool_sethardlimit(pf_pool_limits[pl->index].pp,
3756 pl->limit, NULL, 0);
3757 old_limit = pf_pool_limits[pl->index].limit;
3758 pf_pool_limits[pl->index].limit = pl->limit;
3759 pl->limit = old_limit;
3760 break;
3761 }
3762
3763 default:
3764 VERIFY(0);
3765 /* NOTREACHED */
3766 }
3767 fail:
3768 return error;
3769 }
3770
3771 static int
3772 pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p)
3773 {
3774 #pragma unused(p)
3775 struct pf_pooladdr *pa = NULL;
3776 struct pf_pool *pool = NULL;
3777 int error = 0;
3778
3779 switch (cmd) {
3780 case DIOCBEGINADDRS: {
3781 pf_empty_pool(&pf_pabuf);
3782 pp->ticket = ++ticket_pabuf;
3783 break;
3784 }
3785
3786 case DIOCADDADDR: {
3787 pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3788 if (pp->ticket != ticket_pabuf) {
3789 error = EBUSY;
3790 break;
3791 }
3792 #if !INET
3793 if (pp->af == AF_INET) {
3794 error = EAFNOSUPPORT;
3795 break;
3796 }
3797 #endif /* INET */
3798 #if !INET6
3799 if (pp->af == AF_INET6) {
3800 error = EAFNOSUPPORT;
3801 break;
3802 }
3803 #endif /* INET6 */
3804 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
3805 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
3806 pp->addr.addr.type != PF_ADDR_TABLE) {
3807 error = EINVAL;
3808 break;
3809 }
3810 pa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3811 if (pa == NULL) {
3812 error = ENOMEM;
3813 break;
3814 }
3815 pf_pooladdr_copyin(&pp->addr, pa);
3816 if (pa->ifname[0]) {
3817 pa->kif = pfi_kif_get(pa->ifname);
3818 if (pa->kif == NULL) {
3819 pool_put(&pf_pooladdr_pl, pa);
3820 error = EINVAL;
3821 break;
3822 }
3823 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
3824 }
3825 pf_addrwrap_setup(&pa->addr);
3826 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
3827 pfi_dynaddr_remove(&pa->addr);
3828 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
3829 pool_put(&pf_pooladdr_pl, pa);
3830 error = EINVAL;
3831 break;
3832 }
3833 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
3834 break;
3835 }
3836
3837 case DIOCGETADDRS: {
3838 pp->nr = 0;
3839 pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3840 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
3841 pp->r_num, 0, 1, 0);
3842 if (pool == NULL) {
3843 error = EBUSY;
3844 break;
3845 }
3846 TAILQ_FOREACH(pa, &pool->list, entries)
3847 pp->nr++;
3848 break;
3849 }
3850
3851 case DIOCGETADDR: {
3852 u_int32_t nr = 0;
3853
3854 pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3855 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
3856 pp->r_num, 0, 1, 1);
3857 if (pool == NULL) {
3858 error = EBUSY;
3859 break;
3860 }
3861 pa = TAILQ_FIRST(&pool->list);
3862 while ((pa != NULL) && (nr < pp->nr)) {
3863 pa = TAILQ_NEXT(pa, entries);
3864 nr++;
3865 }
3866 if (pa == NULL) {
3867 error = EBUSY;
3868 break;
3869 }
3870 pf_pooladdr_copyout(pa, &pp->addr);
3871 pfi_dynaddr_copyout(&pp->addr.addr);
3872 pf_tbladdr_copyout(&pp->addr.addr);
3873 pf_rtlabel_copyout(&pp->addr.addr);
3874 break;
3875 }
3876
3877 case DIOCCHANGEADDR: {
3878 struct pfioc_pooladdr *pca = pp;
3879 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
3880 struct pf_ruleset *ruleset;
3881
3882 if (pca->action < PF_CHANGE_ADD_HEAD ||
3883 pca->action > PF_CHANGE_REMOVE) {
3884 error = EINVAL;
3885 break;
3886 }
3887 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
3888 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
3889 pca->addr.addr.type != PF_ADDR_TABLE) {
3890 error = EINVAL;
3891 break;
3892 }
3893
3894 pca->anchor[sizeof(pca->anchor) - 1] = '\0';
3895 ruleset = pf_find_ruleset(pca->anchor);
3896 if (ruleset == NULL) {
3897 error = EBUSY;
3898 break;
3899 }
3900 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
3901 pca->r_num, pca->r_last, 1, 1);
3902 if (pool == NULL) {
3903 error = EBUSY;
3904 break;
3905 }
3906 if (pca->action != PF_CHANGE_REMOVE) {
3907 newpa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3908 if (newpa == NULL) {
3909 error = ENOMEM;
3910 break;
3911 }
3912 pf_pooladdr_copyin(&pca->addr, newpa);
3913 #if !INET
3914 if (pca->af == AF_INET) {
3915 pool_put(&pf_pooladdr_pl, newpa);
3916 error = EAFNOSUPPORT;
3917 break;
3918 }
3919 #endif /* INET */
3920 #if !INET6
3921 if (pca->af == AF_INET6) {
3922 pool_put(&pf_pooladdr_pl, newpa);
3923 error = EAFNOSUPPORT;
3924 break;
3925 }
3926 #endif /* INET6 */
3927 if (newpa->ifname[0]) {
3928 newpa->kif = pfi_kif_get(newpa->ifname);
3929 if (newpa->kif == NULL) {
3930 pool_put(&pf_pooladdr_pl, newpa);
3931 error = EINVAL;
3932 break;
3933 }
3934 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
3935 } else {
3936 newpa->kif = NULL;
3937 }
3938 pf_addrwrap_setup(&newpa->addr);
3939 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
3940 pf_tbladdr_setup(ruleset, &newpa->addr)) {
3941 pfi_dynaddr_remove(&newpa->addr);
3942 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
3943 pool_put(&pf_pooladdr_pl, newpa);
3944 error = EINVAL;
3945 break;
3946 }
3947 }
3948
3949 if (pca->action == PF_CHANGE_ADD_HEAD) {
3950 oldpa = TAILQ_FIRST(&pool->list);
3951 } else if (pca->action == PF_CHANGE_ADD_TAIL) {
3952 oldpa = TAILQ_LAST(&pool->list, pf_palist);
3953 } else {
3954 int i = 0;
3955
3956 oldpa = TAILQ_FIRST(&pool->list);
3957 while ((oldpa != NULL) && (i < (int)pca->nr)) {
3958 oldpa = TAILQ_NEXT(oldpa, entries);
3959 i++;
3960 }
3961 if (oldpa == NULL) {
3962 error = EINVAL;
3963 break;
3964 }
3965 }
3966
3967 if (pca->action == PF_CHANGE_REMOVE) {
3968 TAILQ_REMOVE(&pool->list, oldpa, entries);
3969 pfi_dynaddr_remove(&oldpa->addr);
3970 pf_tbladdr_remove(&oldpa->addr);
3971 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
3972 pool_put(&pf_pooladdr_pl, oldpa);
3973 } else {
3974 if (oldpa == NULL) {
3975 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
3976 } else if (pca->action == PF_CHANGE_ADD_HEAD ||
3977 pca->action == PF_CHANGE_ADD_BEFORE) {
3978 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
3979 } else {
3980 TAILQ_INSERT_AFTER(&pool->list, oldpa,
3981 newpa, entries);
3982 }
3983 }
3984
3985 pool->cur = TAILQ_FIRST(&pool->list);
3986 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
3987 pca->af);
3988 break;
3989 }
3990
3991 default:
3992 VERIFY(0);
3993 /* NOTREACHED */
3994 }
3995
3996 return error;
3997 }
3998
3999 static int
4000 pfioctl_ioc_ruleset(u_long cmd, struct pfioc_ruleset *pr, struct proc *p)
4001 {
4002 #pragma unused(p)
4003 int error = 0;
4004
4005 switch (cmd) {
4006 case DIOCGETRULESETS: {
4007 struct pf_ruleset *ruleset;
4008 struct pf_anchor *anchor;
4009
4010 pr->path[sizeof(pr->path) - 1] = '\0';
4011 pr->name[sizeof(pr->name) - 1] = '\0';
4012 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
4013 error = EINVAL;
4014 break;
4015 }
4016 pr->nr = 0;
4017 if (ruleset->anchor == NULL) {
4018 /* XXX kludge for pf_main_ruleset */
4019 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4020 if (anchor->parent == NULL) {
4021 pr->nr++;
4022 }
4023 } else {
4024 RB_FOREACH(anchor, pf_anchor_node,
4025 &ruleset->anchor->children)
4026 pr->nr++;
4027 }
4028 break;
4029 }
4030
4031 case DIOCGETRULESET: {
4032 struct pf_ruleset *ruleset;
4033 struct pf_anchor *anchor;
4034 u_int32_t nr = 0;
4035
4036 pr->path[sizeof(pr->path) - 1] = '\0';
4037 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
4038 error = EINVAL;
4039 break;
4040 }
4041 pr->name[0] = 0;
4042 if (ruleset->anchor == NULL) {
4043 /* XXX kludge for pf_main_ruleset */
4044 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4045 if (anchor->parent == NULL && nr++ == pr->nr) {
4046 strlcpy(pr->name, anchor->name,
4047 sizeof(pr->name));
4048 break;
4049 }
4050 } else {
4051 RB_FOREACH(anchor, pf_anchor_node,
4052 &ruleset->anchor->children)
4053 if (nr++ == pr->nr) {
4054 strlcpy(pr->name, anchor->name,
4055 sizeof(pr->name));
4056 break;
4057 }
4058 }
4059 if (!pr->name[0]) {
4060 error = EBUSY;
4061 }
4062 break;
4063 }
4064
4065 default:
4066 VERIFY(0);
4067 /* NOTREACHED */
4068 }
4069
4070 return error;
4071 }
4072
4073 static int
4074 pfioctl_ioc_trans(u_long cmd, struct pfioc_trans_32 *io32,
4075 struct pfioc_trans_64 *io64, struct proc *p)
4076 {
4077 int p64 = proc_is64bit(p);
4078 int error = 0, esize, size;
4079 user_addr_t buf;
4080
4081 esize = (p64 ? io64->esize : io32->esize);
4082 size = (p64 ? io64->size : io32->size);
4083 buf = (p64 ? io64->array : io32->array);
4084
4085 switch (cmd) {
4086 case DIOCXBEGIN: {
4087 struct pfioc_trans_e *ioe;
4088 struct pfr_table *table;
4089 int i;
4090
4091 if (esize != sizeof(*ioe)) {
4092 error = ENODEV;
4093 goto fail;
4094 }
4095 ioe = _MALLOC(sizeof(*ioe), M_TEMP, M_WAITOK);
4096 table = _MALLOC(sizeof(*table), M_TEMP, M_WAITOK);
4097 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4098 if (copyin(buf, ioe, sizeof(*ioe))) {
4099 _FREE(table, M_TEMP);
4100 _FREE(ioe, M_TEMP);
4101 error = EFAULT;
4102 goto fail;
4103 }
4104 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4105 switch (ioe->rs_num) {
4106 case PF_RULESET_ALTQ:
4107 break;
4108 case PF_RULESET_TABLE:
4109 bzero(table, sizeof(*table));
4110 strlcpy(table->pfrt_anchor, ioe->anchor,
4111 sizeof(table->pfrt_anchor));
4112 if ((error = pfr_ina_begin(table,
4113 &ioe->ticket, NULL, 0))) {
4114 _FREE(table, M_TEMP);
4115 _FREE(ioe, M_TEMP);
4116 goto fail;
4117 }
4118 break;
4119 default:
4120 if ((error = pf_begin_rules(&ioe->ticket,
4121 ioe->rs_num, ioe->anchor))) {
4122 _FREE(table, M_TEMP);
4123 _FREE(ioe, M_TEMP);
4124 goto fail;
4125 }
4126 break;
4127 }
4128 if (copyout(ioe, buf, sizeof(*ioe))) {
4129 _FREE(table, M_TEMP);
4130 _FREE(ioe, M_TEMP);
4131 error = EFAULT;
4132 goto fail;
4133 }
4134 }
4135 _FREE(table, M_TEMP);
4136 _FREE(ioe, M_TEMP);
4137 break;
4138 }
4139
4140 case DIOCXROLLBACK: {
4141 struct pfioc_trans_e *ioe;
4142 struct pfr_table *table;
4143 int i;
4144
4145 if (esize != sizeof(*ioe)) {
4146 error = ENODEV;
4147 goto fail;
4148 }
4149 ioe = _MALLOC(sizeof(*ioe), M_TEMP, M_WAITOK);
4150 table = _MALLOC(sizeof(*table), M_TEMP, M_WAITOK);
4151 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4152 if (copyin(buf, ioe, sizeof(*ioe))) {
4153 _FREE(table, M_TEMP);
4154 _FREE(ioe, M_TEMP);
4155 error = EFAULT;
4156 goto fail;
4157 }
4158 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4159 switch (ioe->rs_num) {
4160 case PF_RULESET_ALTQ:
4161 break;
4162 case PF_RULESET_TABLE:
4163 bzero(table, sizeof(*table));
4164 strlcpy(table->pfrt_anchor, ioe->anchor,
4165 sizeof(table->pfrt_anchor));
4166 if ((error = pfr_ina_rollback(table,
4167 ioe->ticket, NULL, 0))) {
4168 _FREE(table, M_TEMP);
4169 _FREE(ioe, M_TEMP);
4170 goto fail; /* really bad */
4171 }
4172 break;
4173 default:
4174 if ((error = pf_rollback_rules(ioe->ticket,
4175 ioe->rs_num, ioe->anchor))) {
4176 _FREE(table, M_TEMP);
4177 _FREE(ioe, M_TEMP);
4178 goto fail; /* really bad */
4179 }
4180 break;
4181 }
4182 }
4183 _FREE(table, M_TEMP);
4184 _FREE(ioe, M_TEMP);
4185 break;
4186 }
4187
4188 case DIOCXCOMMIT: {
4189 struct pfioc_trans_e *ioe;
4190 struct pfr_table *table;
4191 struct pf_ruleset *rs;
4192 user_addr_t _buf = buf;
4193 int i;
4194
4195 if (esize != sizeof(*ioe)) {
4196 error = ENODEV;
4197 goto fail;
4198 }
4199 ioe = _MALLOC(sizeof(*ioe), M_TEMP, M_WAITOK);
4200 table = _MALLOC(sizeof(*table), M_TEMP, M_WAITOK);
4201 /* first makes sure everything will succeed */
4202 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4203 if (copyin(buf, ioe, sizeof(*ioe))) {
4204 _FREE(table, M_TEMP);
4205 _FREE(ioe, M_TEMP);
4206 error = EFAULT;
4207 goto fail;
4208 }
4209 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4210 switch (ioe->rs_num) {
4211 case PF_RULESET_ALTQ:
4212 break;
4213 case PF_RULESET_TABLE:
4214 rs = pf_find_ruleset(ioe->anchor);
4215 if (rs == NULL || !rs->topen || ioe->ticket !=
4216 rs->tticket) {
4217 _FREE(table, M_TEMP);
4218 _FREE(ioe, M_TEMP);
4219 error = EBUSY;
4220 goto fail;
4221 }
4222 break;
4223 default:
4224 if (ioe->rs_num < 0 || ioe->rs_num >=
4225 PF_RULESET_MAX) {
4226 _FREE(table, M_TEMP);
4227 _FREE(ioe, M_TEMP);
4228 error = EINVAL;
4229 goto fail;
4230 }
4231 rs = pf_find_ruleset(ioe->anchor);
4232 if (rs == NULL ||
4233 !rs->rules[ioe->rs_num].inactive.open ||
4234 rs->rules[ioe->rs_num].inactive.ticket !=
4235 ioe->ticket) {
4236 _FREE(table, M_TEMP);
4237 _FREE(ioe, M_TEMP);
4238 error = EBUSY;
4239 goto fail;
4240 }
4241 break;
4242 }
4243 }
4244 buf = _buf;
4245 /* now do the commit - no errors should happen here */
4246 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4247 if (copyin(buf, ioe, sizeof(*ioe))) {
4248 _FREE(table, M_TEMP);
4249 _FREE(ioe, M_TEMP);
4250 error = EFAULT;
4251 goto fail;
4252 }
4253 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4254 switch (ioe->rs_num) {
4255 case PF_RULESET_ALTQ:
4256 break;
4257 case PF_RULESET_TABLE:
4258 bzero(table, sizeof(*table));
4259 strlcpy(table->pfrt_anchor, ioe->anchor,
4260 sizeof(table->pfrt_anchor));
4261 if ((error = pfr_ina_commit(table, ioe->ticket,
4262 NULL, NULL, 0))) {
4263 _FREE(table, M_TEMP);
4264 _FREE(ioe, M_TEMP);
4265 goto fail; /* really bad */
4266 }
4267 break;
4268 default:
4269 if ((error = pf_commit_rules(ioe->ticket,
4270 ioe->rs_num, ioe->anchor))) {
4271 _FREE(table, M_TEMP);
4272 _FREE(ioe, M_TEMP);
4273 goto fail; /* really bad */
4274 }
4275 break;
4276 }
4277 }
4278 _FREE(table, M_TEMP);
4279 _FREE(ioe, M_TEMP);
4280 break;
4281 }
4282
4283 default:
4284 VERIFY(0);
4285 /* NOTREACHED */
4286 }
4287 fail:
4288 return error;
4289 }
4290
4291 static int
4292 pfioctl_ioc_src_nodes(u_long cmd, struct pfioc_src_nodes_32 *psn32,
4293 struct pfioc_src_nodes_64 *psn64, struct proc *p)
4294 {
4295 int p64 = proc_is64bit(p);
4296 int error = 0;
4297
4298 switch (cmd) {
4299 case DIOCGETSRCNODES: {
4300 struct pf_src_node *n, *pstore;
4301 user_addr_t buf;
4302 u_int32_t nr = 0;
4303 int space, size;
4304
4305 space = (p64 ? psn64->psn_len : psn32->psn_len);
4306 if (space == 0) {
4307 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
4308 nr++;
4309
4310 size = sizeof(struct pf_src_node) * nr;
4311 if (p64) {
4312 psn64->psn_len = size;
4313 } else {
4314 psn32->psn_len = size;
4315 }
4316 break;
4317 }
4318
4319 pstore = _MALLOC(sizeof(*pstore), M_TEMP, M_WAITOK);
4320 if (pstore == NULL) {
4321 error = ENOMEM;
4322 break;
4323 }
4324 buf = (p64 ? psn64->psn_buf : psn32->psn_buf);
4325
4326 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
4327 uint64_t secs = pf_time_second(), diff;
4328
4329 if ((nr + 1) * sizeof(*pstore) > (unsigned)space) {
4330 break;
4331 }
4332
4333 bcopy(n, pstore, sizeof(*pstore));
4334 if (n->rule.ptr != NULL) {
4335 pstore->rule.nr = n->rule.ptr->nr;
4336 }
4337 pstore->creation = secs - pstore->creation;
4338 if (pstore->expire > secs) {
4339 pstore->expire -= secs;
4340 } else {
4341 pstore->expire = 0;
4342 }
4343
4344 /* adjust the connection rate estimate */
4345 diff = secs - n->conn_rate.last;
4346 if (diff >= n->conn_rate.seconds) {
4347 pstore->conn_rate.count = 0;
4348 } else {
4349 pstore->conn_rate.count -=
4350 n->conn_rate.count * diff /
4351 n->conn_rate.seconds;
4352 }
4353
4354 _RB_PARENT(pstore, entry) = NULL;
4355 RB_LEFT(pstore, entry) = RB_RIGHT(pstore, entry) = NULL;
4356 pstore->kif = NULL;
4357
4358 error = copyout(pstore, buf, sizeof(*pstore));
4359 if (error) {
4360 _FREE(pstore, M_TEMP);
4361 goto fail;
4362 }
4363 buf += sizeof(*pstore);
4364 nr++;
4365 }
4366
4367 size = sizeof(struct pf_src_node) * nr;
4368 if (p64) {
4369 psn64->psn_len = size;
4370 } else {
4371 psn32->psn_len = size;
4372 }
4373
4374 _FREE(pstore, M_TEMP);
4375 break;
4376 }
4377
4378 default:
4379 VERIFY(0);
4380 /* NOTREACHED */
4381 }
4382 fail:
4383 return error;
4384 }
4385
4386 static int
4387 pfioctl_ioc_src_node_kill(u_long cmd, struct pfioc_src_node_kill *psnk,
4388 struct proc *p)
4389 {
4390 #pragma unused(p)
4391 int error = 0;
4392
4393 switch (cmd) {
4394 case DIOCKILLSRCNODES: {
4395 struct pf_src_node *sn;
4396 struct pf_state *s;
4397 int killed = 0;
4398
4399 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
4400 if (PF_MATCHA(psnk->psnk_src.neg,
4401 &psnk->psnk_src.addr.v.a.addr,
4402 &psnk->psnk_src.addr.v.a.mask,
4403 &sn->addr, sn->af) &&
4404 PF_MATCHA(psnk->psnk_dst.neg,
4405 &psnk->psnk_dst.addr.v.a.addr,
4406 &psnk->psnk_dst.addr.v.a.mask,
4407 &sn->raddr, sn->af)) {
4408 /* Handle state to src_node linkage */
4409 if (sn->states != 0) {
4410 RB_FOREACH(s, pf_state_tree_id,
4411 &tree_id) {
4412 if (s->src_node == sn) {
4413 s->src_node = NULL;
4414 }
4415 if (s->nat_src_node == sn) {
4416 s->nat_src_node = NULL;
4417 }
4418 }
4419 sn->states = 0;
4420 }
4421 sn->expire = 1;
4422 killed++;
4423 }
4424 }
4425
4426 if (killed > 0) {
4427 pf_purge_expired_src_nodes();
4428 }
4429
4430 psnk->psnk_af = killed;
4431 break;
4432 }
4433
4434 default:
4435 VERIFY(0);
4436 /* NOTREACHED */
4437 }
4438
4439 return error;
4440 }
4441
4442 static int
4443 pfioctl_ioc_iface(u_long cmd, struct pfioc_iface_32 *io32,
4444 struct pfioc_iface_64 *io64, struct proc *p)
4445 {
4446 int p64 = proc_is64bit(p);
4447 int error = 0;
4448
4449 switch (cmd) {
4450 case DIOCIGETIFACES: {
4451 user_addr_t buf;
4452 int esize;
4453
4454 buf = (p64 ? io64->pfiio_buffer : io32->pfiio_buffer);
4455 esize = (p64 ? io64->pfiio_esize : io32->pfiio_esize);
4456
4457 /* esize must be that of the user space version of pfi_kif */
4458 if (esize != sizeof(struct pfi_uif)) {
4459 error = ENODEV;
4460 break;
4461 }
4462 if (p64) {
4463 io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4464 } else {
4465 io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4466 }
4467 error = pfi_get_ifaces(
4468 p64 ? io64->pfiio_name : io32->pfiio_name, buf,
4469 p64 ? &io64->pfiio_size : &io32->pfiio_size);
4470 break;
4471 }
4472
4473 case DIOCSETIFFLAG: {
4474 if (p64) {
4475 io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4476 } else {
4477 io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4478 }
4479
4480 error = pfi_set_flags(
4481 p64 ? io64->pfiio_name : io32->pfiio_name,
4482 p64 ? io64->pfiio_flags : io32->pfiio_flags);
4483 break;
4484 }
4485
4486 case DIOCCLRIFFLAG: {
4487 if (p64) {
4488 io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4489 } else {
4490 io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4491 }
4492
4493 error = pfi_clear_flags(
4494 p64 ? io64->pfiio_name : io32->pfiio_name,
4495 p64 ? io64->pfiio_flags : io32->pfiio_flags);
4496 break;
4497 }
4498
4499 default:
4500 VERIFY(0);
4501 /* NOTREACHED */
4502 }
4503
4504 return error;
4505 }
4506
4507 int
4508 pf_af_hook(struct ifnet *ifp, struct mbuf **mppn, struct mbuf **mp,
4509 unsigned int af, int input, struct ip_fw_args *fwa)
4510 {
4511 int error = 0;
4512 struct mbuf *nextpkt;
4513 net_thread_marks_t marks;
4514 struct ifnet * pf_ifp = ifp;
4515
4516 /* Always allow traffic on co-processor interfaces. */
4517 if (!intcoproc_unrestricted && ifp && IFNET_IS_INTCOPROC(ifp)) {
4518 return 0;
4519 }
4520
4521 marks = net_thread_marks_push(NET_THREAD_HELD_PF);
4522
4523 if (marks != net_thread_marks_none) {
4524 lck_rw_lock_shared(pf_perim_lock);
4525 if (!pf_is_enabled) {
4526 goto done;
4527 }
4528 lck_mtx_lock(pf_lock);
4529 }
4530
4531 if (mppn != NULL && *mppn != NULL) {
4532 VERIFY(*mppn == *mp);
4533 }
4534 if ((nextpkt = (*mp)->m_nextpkt) != NULL) {
4535 (*mp)->m_nextpkt = NULL;
4536 }
4537
4538 /*
4539 * For packets destined to locally hosted IP address
4540 * ip_output_list sets Mbuf's pkt header's rcvif to
4541 * the interface hosting the IP address.
4542 * While on the output path ifp passed to pf_af_hook
4543 * to such local communication is the loopback interface,
4544 * the input path derives ifp from mbuf packet header's
4545 * rcvif.
4546 * This asymmetry caues issues with PF.
4547 * To handle that case, we have a limited change here to
4548 * pass interface as loopback if packets are looped in.
4549 */
4550 if (input && ((*mp)->m_pkthdr.pkt_flags & PKTF_LOOP)) {
4551 pf_ifp = lo_ifp;
4552 }
4553
4554 switch (af) {
4555 #if INET
4556 case AF_INET: {
4557 error = pf_inet_hook(pf_ifp, mp, input, fwa);
4558 break;
4559 }
4560 #endif /* INET */
4561 #if INET6
4562 case AF_INET6:
4563 error = pf_inet6_hook(pf_ifp, mp, input, fwa);
4564 break;
4565 #endif /* INET6 */
4566 default:
4567 break;
4568 }
4569
4570 /* When packet valid, link to the next packet */
4571 if (*mp != NULL && nextpkt != NULL) {
4572 struct mbuf *m = *mp;
4573 while (m->m_nextpkt != NULL) {
4574 m = m->m_nextpkt;
4575 }
4576 m->m_nextpkt = nextpkt;
4577 }
4578 /* Fix up linkage of previous packet in the chain */
4579 if (mppn != NULL) {
4580 if (*mp != NULL) {
4581 *mppn = *mp;
4582 } else {
4583 *mppn = nextpkt;
4584 }
4585 }
4586
4587 if (marks != net_thread_marks_none) {
4588 lck_mtx_unlock(pf_lock);
4589 }
4590
4591 done:
4592 if (marks != net_thread_marks_none) {
4593 lck_rw_done(pf_perim_lock);
4594 }
4595
4596 net_thread_marks_pop(marks);
4597 return error;
4598 }
4599
4600
4601 #if INET
4602 static int
4603 pf_inet_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4604 struct ip_fw_args *fwa)
4605 {
4606 struct mbuf *m = *mp;
4607 #if BYTE_ORDER != BIG_ENDIAN
4608 struct ip *ip = mtod(m, struct ip *);
4609 #endif
4610 int error = 0;
4611
4612 /*
4613 * If the packet is outbound, is originated locally, is flagged for
4614 * delayed UDP/TCP checksum calculation, and is about to be processed
4615 * for an interface that doesn't support the appropriate checksum
4616 * offloading, then calculated the checksum here so that PF can adjust
4617 * it properly.
4618 */
4619 if (!input && m->m_pkthdr.rcvif == NULL) {
4620 static const int mask = CSUM_DELAY_DATA;
4621 const int flags = m->m_pkthdr.csum_flags &
4622 ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4623
4624 if (flags & mask) {
4625 in_delayed_cksum(m);
4626 m->m_pkthdr.csum_flags &= ~mask;
4627 }
4628 }
4629
4630 #if BYTE_ORDER != BIG_ENDIAN
4631 HTONS(ip->ip_len);
4632 HTONS(ip->ip_off);
4633 #endif
4634 if (pf_test_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4635 if (*mp != NULL) {
4636 m_freem(*mp);
4637 *mp = NULL;
4638 error = EHOSTUNREACH;
4639 } else {
4640 error = ENOBUFS;
4641 }
4642 }
4643 #if BYTE_ORDER != BIG_ENDIAN
4644 else {
4645 if (*mp != NULL) {
4646 ip = mtod(*mp, struct ip *);
4647 NTOHS(ip->ip_len);
4648 NTOHS(ip->ip_off);
4649 }
4650 }
4651 #endif
4652 return error;
4653 }
4654 #endif /* INET */
4655
4656 #if INET6
4657 int
4658 pf_inet6_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4659 struct ip_fw_args *fwa)
4660 {
4661 int error = 0;
4662
4663 /*
4664 * If the packet is outbound, is originated locally, is flagged for
4665 * delayed UDP/TCP checksum calculation, and is about to be processed
4666 * for an interface that doesn't support the appropriate checksum
4667 * offloading, then calculated the checksum here so that PF can adjust
4668 * it properly.
4669 */
4670 if (!input && (*mp)->m_pkthdr.rcvif == NULL) {
4671 static const int mask = CSUM_DELAY_IPV6_DATA;
4672 const int flags = (*mp)->m_pkthdr.csum_flags &
4673 ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4674
4675 if (flags & mask) {
4676 /*
4677 * Checksum offload should not have been enabled
4678 * when extension headers exist, thus 0 for optlen.
4679 */
4680 in6_delayed_cksum(*mp);
4681 (*mp)->m_pkthdr.csum_flags &= ~mask;
4682 }
4683 }
4684
4685 if (pf_test6_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4686 if (*mp != NULL) {
4687 m_freem(*mp);
4688 *mp = NULL;
4689 error = EHOSTUNREACH;
4690 } else {
4691 error = ENOBUFS;
4692 }
4693 }
4694 return error;
4695 }
4696 #endif /* INET6 */
4697
4698 int
4699 pf_ifaddr_hook(struct ifnet *ifp)
4700 {
4701 struct pfi_kif *kif = ifp->if_pf_kif;
4702
4703 if (kif != NULL) {
4704 lck_rw_lock_shared(pf_perim_lock);
4705 lck_mtx_lock(pf_lock);
4706
4707 pfi_kifaddr_update(kif);
4708
4709 lck_mtx_unlock(pf_lock);
4710 lck_rw_done(pf_perim_lock);
4711 }
4712 return 0;
4713 }
4714
4715 /*
4716 * Caller acquires dlil lock as writer (exclusive)
4717 */
4718 void
4719 pf_ifnet_hook(struct ifnet *ifp, int attach)
4720 {
4721 lck_rw_lock_shared(pf_perim_lock);
4722 lck_mtx_lock(pf_lock);
4723 if (attach) {
4724 pfi_attach_ifnet(ifp);
4725 } else {
4726 pfi_detach_ifnet(ifp);
4727 }
4728 lck_mtx_unlock(pf_lock);
4729 lck_rw_done(pf_perim_lock);
4730 }
4731
4732 static void
4733 pf_attach_hooks(void)
4734 {
4735 ifnet_head_lock_shared();
4736 /*
4737 * Check against ifnet_addrs[] before proceeding, in case this
4738 * is called very early on, e.g. during dlil_init() before any
4739 * network interface is attached.
4740 */
4741 if (ifnet_addrs != NULL) {
4742 int i;
4743
4744 for (i = 0; i <= if_index; i++) {
4745 struct ifnet *ifp = ifindex2ifnet[i];
4746 if (ifp != NULL) {
4747 pfi_attach_ifnet(ifp);
4748 }
4749 }
4750 }
4751 ifnet_head_done();
4752 }
4753
4754 #if 0
4755 /* currently unused along with pfdetach() */
4756 static void
4757 pf_detach_hooks(void)
4758 {
4759 ifnet_head_lock_shared();
4760 if (ifnet_addrs != NULL) {
4761 for (i = 0; i <= if_index; i++) {
4762 int i;
4763
4764 struct ifnet *ifp = ifindex2ifnet[i];
4765 if (ifp != NULL && ifp->if_pf_kif != NULL) {
4766 pfi_detach_ifnet(ifp);
4767 }
4768 }
4769 }
4770 ifnet_head_done();
4771 }
4772 #endif
4773
4774 /*
4775 * 'D' group ioctls.
4776 *
4777 * The switch statement below does nothing at runtime, as it serves as a
4778 * compile time check to ensure that all of the socket 'D' ioctls (those
4779 * in the 'D' group going thru soo_ioctl) that are made available by the
4780 * networking stack is unique. This works as long as this routine gets
4781 * updated each time a new interface ioctl gets added.
4782 *
4783 * Any failures at compile time indicates duplicated ioctl values.
4784 */
4785 static __attribute__((unused)) void
4786 pfioctl_cassert(void)
4787 {
4788 /*
4789 * This is equivalent to _CASSERT() and the compiler wouldn't
4790 * generate any instructions, thus for compile time only.
4791 */
4792 switch ((u_long)0) {
4793 case 0:
4794
4795 /* bsd/net/pfvar.h */
4796 case DIOCSTART:
4797 case DIOCSTOP:
4798 case DIOCADDRULE:
4799 case DIOCGETSTARTERS:
4800 case DIOCGETRULES:
4801 case DIOCGETRULE:
4802 case DIOCSTARTREF:
4803 case DIOCSTOPREF:
4804 case DIOCCLRSTATES:
4805 case DIOCGETSTATE:
4806 case DIOCSETSTATUSIF:
4807 case DIOCGETSTATUS:
4808 case DIOCCLRSTATUS:
4809 case DIOCNATLOOK:
4810 case DIOCSETDEBUG:
4811 case DIOCGETSTATES:
4812 case DIOCCHANGERULE:
4813 case DIOCINSERTRULE:
4814 case DIOCDELETERULE:
4815 case DIOCSETTIMEOUT:
4816 case DIOCGETTIMEOUT:
4817 case DIOCADDSTATE:
4818 case DIOCCLRRULECTRS:
4819 case DIOCGETLIMIT:
4820 case DIOCSETLIMIT:
4821 case DIOCKILLSTATES:
4822 case DIOCSTARTALTQ:
4823 case DIOCSTOPALTQ:
4824 case DIOCADDALTQ:
4825 case DIOCGETALTQS:
4826 case DIOCGETALTQ:
4827 case DIOCCHANGEALTQ:
4828 case DIOCGETQSTATS:
4829 case DIOCBEGINADDRS:
4830 case DIOCADDADDR:
4831 case DIOCGETADDRS:
4832 case DIOCGETADDR:
4833 case DIOCCHANGEADDR:
4834 case DIOCGETRULESETS:
4835 case DIOCGETRULESET:
4836 case DIOCRCLRTABLES:
4837 case DIOCRADDTABLES:
4838 case DIOCRDELTABLES:
4839 case DIOCRGETTABLES:
4840 case DIOCRGETTSTATS:
4841 case DIOCRCLRTSTATS:
4842 case DIOCRCLRADDRS:
4843 case DIOCRADDADDRS:
4844 case DIOCRDELADDRS:
4845 case DIOCRSETADDRS:
4846 case DIOCRGETADDRS:
4847 case DIOCRGETASTATS:
4848 case DIOCRCLRASTATS:
4849 case DIOCRTSTADDRS:
4850 case DIOCRSETTFLAGS:
4851 case DIOCRINADEFINE:
4852 case DIOCOSFPFLUSH:
4853 case DIOCOSFPADD:
4854 case DIOCOSFPGET:
4855 case DIOCXBEGIN:
4856 case DIOCXCOMMIT:
4857 case DIOCXROLLBACK:
4858 case DIOCGETSRCNODES:
4859 case DIOCCLRSRCNODES:
4860 case DIOCSETHOSTID:
4861 case DIOCIGETIFACES:
4862 case DIOCSETIFFLAG:
4863 case DIOCCLRIFFLAG:
4864 case DIOCKILLSRCNODES:
4865 case DIOCGIFSPEED:
4866 ;
4867 }
4868 }