]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/pf_ioctl.c
43a8e23c785741883364c4d17a9cd68e2af0d929
[apple/xnu.git] / bsd / net / pf_ioctl.c
1 /*
2 * Copyright (c) 2007-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */
30 /* $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
31
32 /*
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * - Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * - Redistributions in binary form must reproduce the above
44 * copyright notice, this list of conditions and the following
45 * disclaimer in the documentation and/or other materials provided
46 * with the distribution.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 *
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
64 *
65 */
66
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/mbuf.h>
71 #include <sys/filio.h>
72 #include <sys/fcntl.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/kernel.h>
76 #include <sys/time.h>
77 #include <sys/proc_internal.h>
78 #include <sys/malloc.h>
79 #include <sys/kauth.h>
80 #include <sys/conf.h>
81 #include <sys/mcache.h>
82 #include <sys/queue.h>
83
84 #include <mach/vm_param.h>
85
86 #include <net/dlil.h>
87 #include <net/if.h>
88 #include <net/if_types.h>
89 #include <net/net_api_stats.h>
90 #include <net/route.h>
91
92 #include <netinet/in.h>
93 #include <netinet/in_var.h>
94 #include <netinet/in_systm.h>
95 #include <netinet/ip.h>
96 #include <netinet/ip_var.h>
97 #include <netinet/ip_icmp.h>
98 #include <netinet/if_ether.h>
99
100 #if DUMMYNET
101 #include <netinet/ip_dummynet.h>
102 #else
103 struct ip_fw_args;
104 #endif /* DUMMYNET */
105
106 #include <libkern/crypto/md5.h>
107
108 #include <machine/machine_routines.h>
109
110 #include <miscfs/devfs/devfs.h>
111
112 #include <net/pfvar.h>
113
114 #if NPFSYNC
115 #include <net/if_pfsync.h>
116 #endif /* NPFSYNC */
117
118 #if PFLOG
119 #include <net/if_pflog.h>
120 #endif /* PFLOG */
121
122 #if INET6
123 #include <netinet/ip6.h>
124 #include <netinet/in_pcb.h>
125 #endif /* INET6 */
126
127 #include <dev/random/randomdev.h>
128
129 #if 0
130 static void pfdetach(void);
131 #endif
132 static int pfopen(dev_t, int, int, struct proc *);
133 static int pfclose(dev_t, int, int, struct proc *);
134 static int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
135 static int pfioctl_ioc_table(u_long, struct pfioc_table_32 *,
136 struct pfioc_table_64 *, struct proc *);
137 static int pfioctl_ioc_tokens(u_long, struct pfioc_tokens_32 *,
138 struct pfioc_tokens_64 *, struct proc *);
139 static int pfioctl_ioc_rule(u_long, int, struct pfioc_rule *, struct proc *);
140 static int pfioctl_ioc_state_kill(u_long, struct pfioc_state_kill *,
141 struct proc *);
142 static int pfioctl_ioc_state(u_long, struct pfioc_state *, struct proc *);
143 static int pfioctl_ioc_states(u_long, struct pfioc_states_32 *,
144 struct pfioc_states_64 *, struct proc *);
145 static int pfioctl_ioc_natlook(u_long, struct pfioc_natlook *, struct proc *);
146 static int pfioctl_ioc_tm(u_long, struct pfioc_tm *, struct proc *);
147 static int pfioctl_ioc_limit(u_long, struct pfioc_limit *, struct proc *);
148 static int pfioctl_ioc_pooladdr(u_long, struct pfioc_pooladdr *, struct proc *);
149 static int pfioctl_ioc_ruleset(u_long, struct pfioc_ruleset *, struct proc *);
150 static int pfioctl_ioc_trans(u_long, struct pfioc_trans_32 *,
151 struct pfioc_trans_64 *, struct proc *);
152 static int pfioctl_ioc_src_nodes(u_long, struct pfioc_src_nodes_32 *,
153 struct pfioc_src_nodes_64 *, struct proc *);
154 static int pfioctl_ioc_src_node_kill(u_long, struct pfioc_src_node_kill *,
155 struct proc *);
156 static int pfioctl_ioc_iface(u_long, struct pfioc_iface_32 *,
157 struct pfioc_iface_64 *, struct proc *);
158 static struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
159 u_int8_t, u_int8_t, u_int8_t);
160 static void pf_mv_pool(struct pf_palist *, struct pf_palist *);
161 static void pf_empty_pool(struct pf_palist *);
162 static int pf_begin_rules(u_int32_t *, int, const char *);
163 static int pf_rollback_rules(u_int32_t, int, char *);
164 static int pf_setup_pfsync_matching(struct pf_ruleset *);
165 static void pf_hash_rule(MD5_CTX *, struct pf_rule *);
166 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *, u_int8_t);
167 static int pf_commit_rules(u_int32_t, int, char *);
168 static void pf_rule_copyin(struct pf_rule *, struct pf_rule *, struct proc *,
169 int);
170 static void pf_rule_copyout(struct pf_rule *, struct pf_rule *);
171 static void pf_state_export(struct pfsync_state *, struct pf_state_key *,
172 struct pf_state *);
173 static void pf_state_import(struct pfsync_state *, struct pf_state_key *,
174 struct pf_state *);
175 static void pf_pooladdr_copyin(struct pf_pooladdr *, struct pf_pooladdr *);
176 static void pf_pooladdr_copyout(struct pf_pooladdr *, struct pf_pooladdr *);
177 static void pf_expire_states_and_src_nodes(struct pf_rule *);
178 static void pf_delete_rule_from_ruleset(struct pf_ruleset *,
179 int, struct pf_rule *);
180 static void pf_addrwrap_setup(struct pf_addr_wrap *);
181 static int pf_rule_setup(struct pfioc_rule *, struct pf_rule *,
182 struct pf_ruleset *);
183 static void pf_delete_rule_by_owner(char *, u_int32_t);
184 static int pf_delete_rule_by_ticket(struct pfioc_rule *, u_int32_t);
185 static void pf_ruleset_cleanup(struct pf_ruleset *, int);
186 static void pf_deleterule_anchor_step_out(struct pf_ruleset **,
187 int, struct pf_rule **);
188
189 #define PF_CDEV_MAJOR (-1)
190
191 static struct cdevsw pf_cdevsw = {
192 /* open */ pfopen,
193 /* close */ pfclose,
194 /* read */ eno_rdwrt,
195 /* write */ eno_rdwrt,
196 /* ioctl */ pfioctl,
197 /* stop */ eno_stop,
198 /* reset */ eno_reset,
199 /* tty */ NULL,
200 /* select */ eno_select,
201 /* mmap */ eno_mmap,
202 /* strategy */ eno_strat,
203 /* getc */ eno_getc,
204 /* putc */ eno_putc,
205 /* type */ 0
206 };
207
208 static void pf_attach_hooks(void);
209 #if 0
210 /* currently unused along with pfdetach() */
211 static void pf_detach_hooks(void);
212 #endif
213
214 /*
215 * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer,
216 * and used in pf_af_hook() for performance optimization, such that packets
217 * will enter pf_test() or pf_test6() only when PF is running.
218 */
219 int pf_is_enabled = 0;
220
221 u_int32_t pf_hash_seed;
222 int16_t pf_nat64_configured = 0;
223
224 /*
225 * These are the pf enabled reference counting variables
226 */
227 static u_int64_t pf_enabled_ref_count;
228 static u_int32_t nr_tokens = 0;
229 static u_int64_t pffwrules;
230 static u_int32_t pfdevcnt;
231
232 SLIST_HEAD(list_head, pfioc_kernel_token);
233 static struct list_head token_list_head;
234
235 struct pf_rule pf_default_rule;
236
237 #define TAGID_MAX 50000
238 static TAILQ_HEAD(pf_tags, pf_tagname) pf_tags =
239 TAILQ_HEAD_INITIALIZER(pf_tags);
240
241 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
242 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
243 #endif
244 static u_int16_t tagname2tag(struct pf_tags *, char *);
245 static void tag2tagname(struct pf_tags *, u_int16_t, char *);
246 static void tag_unref(struct pf_tags *, u_int16_t);
247 static int pf_rtlabel_add(struct pf_addr_wrap *);
248 static void pf_rtlabel_remove(struct pf_addr_wrap *);
249 static void pf_rtlabel_copyout(struct pf_addr_wrap *);
250
251 #if INET
252 static int pf_inet_hook(struct ifnet *, struct mbuf **, int,
253 struct ip_fw_args *);
254 #endif /* INET */
255 #if INET6
256 static int pf_inet6_hook(struct ifnet *, struct mbuf **, int,
257 struct ip_fw_args *);
258 #endif /* INET6 */
259
260 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
261
262 /*
263 * Helper macros for ioctl structures which vary in size (32-bit vs. 64-bit)
264 */
265 #define PFIOCX_STRUCT_DECL(s) \
266 struct { \
267 union { \
268 struct s##_32 _s##_32; \
269 struct s##_64 _s##_64; \
270 } _u; \
271 } *s##_un = NULL \
272
273 #define PFIOCX_STRUCT_BEGIN(a, s, _action) { \
274 VERIFY(s##_un == NULL); \
275 s##_un = _MALLOC(sizeof (*s##_un), M_TEMP, M_WAITOK|M_ZERO); \
276 if (s##_un == NULL) { \
277 _action \
278 } else { \
279 if (p64) \
280 bcopy(a, &s##_un->_u._s##_64, \
281 sizeof (struct s##_64)); \
282 else \
283 bcopy(a, &s##_un->_u._s##_32, \
284 sizeof (struct s##_32)); \
285 } \
286 }
287
288 #define PFIOCX_STRUCT_END(s, a) { \
289 VERIFY(s##_un != NULL); \
290 if (p64) \
291 bcopy(&s##_un->_u._s##_64, a, sizeof (struct s##_64)); \
292 else \
293 bcopy(&s##_un->_u._s##_32, a, sizeof (struct s##_32)); \
294 _FREE(s##_un, M_TEMP); \
295 s##_un = NULL; \
296 }
297
298 #define PFIOCX_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
299 #define PFIOCX_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
300
301 /*
302 * Helper macros for regular ioctl structures.
303 */
304 #define PFIOC_STRUCT_BEGIN(a, v, _action) { \
305 VERIFY((v) == NULL); \
306 (v) = _MALLOC(sizeof (*(v)), M_TEMP, M_WAITOK|M_ZERO); \
307 if ((v) == NULL) { \
308 _action \
309 } else { \
310 bcopy(a, v, sizeof (*(v))); \
311 } \
312 }
313
314 #define PFIOC_STRUCT_END(v, a) { \
315 VERIFY((v) != NULL); \
316 bcopy(v, a, sizeof (*(v))); \
317 _FREE(v, M_TEMP); \
318 (v) = NULL; \
319 }
320
321 #define PFIOC_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
322 #define PFIOC_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
323
324 static lck_attr_t *pf_perim_lock_attr;
325 static lck_grp_t *pf_perim_lock_grp;
326 static lck_grp_attr_t *pf_perim_lock_grp_attr;
327
328 static lck_attr_t *pf_lock_attr;
329 static lck_grp_t *pf_lock_grp;
330 static lck_grp_attr_t *pf_lock_grp_attr;
331
332 struct thread *pf_purge_thread;
333
334 extern void pfi_kifaddr_update(void *);
335
336 /* pf enable ref-counting helper functions */
337 static u_int64_t generate_token(struct proc *);
338 static int remove_token(struct pfioc_remove_token *);
339 static void invalidate_all_tokens(void);
340
341 static u_int64_t
342 generate_token(struct proc *p)
343 {
344 u_int64_t token_value;
345 struct pfioc_kernel_token *new_token;
346
347 new_token = _MALLOC(sizeof(struct pfioc_kernel_token), M_TEMP,
348 M_WAITOK | M_ZERO);
349
350 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
351
352 if (new_token == NULL) {
353 /* malloc failed! bail! */
354 printf("%s: unable to allocate pf token structure!", __func__);
355 return 0;
356 }
357
358 token_value = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)new_token);
359
360 new_token->token.token_value = token_value;
361 new_token->token.pid = proc_pid(p);
362 proc_name(new_token->token.pid, new_token->token.proc_name,
363 sizeof(new_token->token.proc_name));
364 new_token->token.timestamp = pf_calendar_time_second();
365
366 SLIST_INSERT_HEAD(&token_list_head, new_token, next);
367 nr_tokens++;
368
369 return token_value;
370 }
371
372 static int
373 remove_token(struct pfioc_remove_token *tok)
374 {
375 struct pfioc_kernel_token *entry, *tmp;
376
377 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
378
379 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
380 if (tok->token_value == entry->token.token_value) {
381 SLIST_REMOVE(&token_list_head, entry,
382 pfioc_kernel_token, next);
383 _FREE(entry, M_TEMP);
384 nr_tokens--;
385 return 0; /* success */
386 }
387 }
388
389 printf("pf : remove failure\n");
390 return ESRCH; /* failure */
391 }
392
393 static void
394 invalidate_all_tokens(void)
395 {
396 struct pfioc_kernel_token *entry, *tmp;
397
398 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
399
400 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
401 SLIST_REMOVE(&token_list_head, entry, pfioc_kernel_token, next);
402 _FREE(entry, M_TEMP);
403 }
404
405 nr_tokens = 0;
406 }
407
408 void
409 pfinit(void)
410 {
411 u_int32_t *t = pf_default_rule.timeout;
412 int maj;
413
414 pf_perim_lock_grp_attr = lck_grp_attr_alloc_init();
415 pf_perim_lock_grp = lck_grp_alloc_init("pf_perim",
416 pf_perim_lock_grp_attr);
417 pf_perim_lock_attr = lck_attr_alloc_init();
418 lck_rw_init(pf_perim_lock, pf_perim_lock_grp, pf_perim_lock_attr);
419
420 pf_lock_grp_attr = lck_grp_attr_alloc_init();
421 pf_lock_grp = lck_grp_alloc_init("pf", pf_lock_grp_attr);
422 pf_lock_attr = lck_attr_alloc_init();
423 lck_mtx_init(pf_lock, pf_lock_grp, pf_lock_attr);
424
425 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
426 NULL);
427 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
428 "pfsrctrpl", NULL);
429 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
430 NULL);
431 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0,
432 "pfstatekeypl", NULL);
433 pool_init(&pf_app_state_pl, sizeof(struct pf_app_state), 0, 0, 0,
434 "pfappstatepl", NULL);
435 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
436 "pfpooladdrpl", NULL);
437 pfr_initialize();
438 pfi_initialize();
439 pf_osfp_initialize();
440
441 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
442 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
443
444 if (max_mem <= 256 * 1024 * 1024) {
445 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
446 PFR_KENTRY_HIWAT_SMALL;
447 }
448
449 RB_INIT(&tree_src_tracking);
450 RB_INIT(&pf_anchors);
451 pf_init_ruleset(&pf_main_ruleset);
452 TAILQ_INIT(&pf_pabuf);
453 TAILQ_INIT(&state_list);
454
455 _CASSERT((SC_BE & SCIDX_MASK) == SCIDX_BE);
456 _CASSERT((SC_BK_SYS & SCIDX_MASK) == SCIDX_BK_SYS);
457 _CASSERT((SC_BK & SCIDX_MASK) == SCIDX_BK);
458 _CASSERT((SC_RD & SCIDX_MASK) == SCIDX_RD);
459 _CASSERT((SC_OAM & SCIDX_MASK) == SCIDX_OAM);
460 _CASSERT((SC_AV & SCIDX_MASK) == SCIDX_AV);
461 _CASSERT((SC_RV & SCIDX_MASK) == SCIDX_RV);
462 _CASSERT((SC_VI & SCIDX_MASK) == SCIDX_VI);
463 _CASSERT((SC_SIG & SCIDX_MASK) == SCIDX_SIG);
464 _CASSERT((SC_VO & SCIDX_MASK) == SCIDX_VO);
465 _CASSERT((SC_CTL & SCIDX_MASK) == SCIDX_CTL);
466
467 /* default rule should never be garbage collected */
468 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
469 pf_default_rule.action = PF_PASS;
470 pf_default_rule.nr = -1;
471 pf_default_rule.rtableid = IFSCOPE_NONE;
472
473 /* initialize default timeouts */
474 t[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
475 t[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
476 t[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
477 t[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
478 t[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
479 t[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
480 t[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
481 t[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
482 t[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
483 t[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
484 t[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
485 t[PFTM_GREv1_FIRST_PACKET] = PFTM_GREv1_FIRST_PACKET_VAL;
486 t[PFTM_GREv1_INITIATING] = PFTM_GREv1_INITIATING_VAL;
487 t[PFTM_GREv1_ESTABLISHED] = PFTM_GREv1_ESTABLISHED_VAL;
488 t[PFTM_ESP_FIRST_PACKET] = PFTM_ESP_FIRST_PACKET_VAL;
489 t[PFTM_ESP_INITIATING] = PFTM_ESP_INITIATING_VAL;
490 t[PFTM_ESP_ESTABLISHED] = PFTM_ESP_ESTABLISHED_VAL;
491 t[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
492 t[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
493 t[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
494 t[PFTM_FRAG] = PFTM_FRAG_VAL;
495 t[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
496 t[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
497 t[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
498 t[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
499 t[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
500
501 pf_normalize_init();
502 bzero(&pf_status, sizeof(pf_status));
503 pf_status.debug = PF_DEBUG_URGENT;
504 pf_hash_seed = RandomULong();
505
506 /* XXX do our best to avoid a conflict */
507 pf_status.hostid = random();
508
509 if (kernel_thread_start(pf_purge_thread_fn, NULL,
510 &pf_purge_thread) != 0) {
511 printf("%s: unable to start purge thread!", __func__);
512 return;
513 }
514
515 maj = cdevsw_add(PF_CDEV_MAJOR, &pf_cdevsw);
516 if (maj == -1) {
517 printf("%s: failed to allocate major number!\n", __func__);
518 return;
519 }
520 (void) devfs_make_node(makedev(maj, PFDEV_PF), DEVFS_CHAR,
521 UID_ROOT, GID_WHEEL, 0600, "pf", 0);
522
523 (void) devfs_make_node(makedev(maj, PFDEV_PFM), DEVFS_CHAR,
524 UID_ROOT, GID_WHEEL, 0600, "pfm", 0);
525
526 pf_attach_hooks();
527 #if DUMMYNET
528 dummynet_init();
529 #endif
530 }
531
532 #if 0
533 static void
534 pfdetach(void)
535 {
536 struct pf_anchor *anchor;
537 struct pf_state *state;
538 struct pf_src_node *node;
539 struct pfioc_table pt;
540 u_int32_t ticket;
541 int i;
542 char r = '\0';
543
544 pf_detach_hooks();
545
546 pf_status.running = 0;
547 wakeup(pf_purge_thread_fn);
548
549 /* clear the rulesets */
550 for (i = 0; i < PF_RULESET_MAX; i++) {
551 if (pf_begin_rules(&ticket, i, &r) == 0) {
552 pf_commit_rules(ticket, i, &r);
553 }
554 }
555
556 /* clear states */
557 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
558 state->timeout = PFTM_PURGE;
559 #if NPFSYNC
560 state->sync_flags = PFSTATE_NOSYNC;
561 #endif
562 }
563 pf_purge_expired_states(pf_status.states);
564
565 #if NPFSYNC
566 pfsync_clear_states(pf_status.hostid, NULL);
567 #endif
568
569 /* clear source nodes */
570 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
571 state->src_node = NULL;
572 state->nat_src_node = NULL;
573 }
574 RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
575 node->expire = 1;
576 node->states = 0;
577 }
578 pf_purge_expired_src_nodes();
579
580 /* clear tables */
581 memset(&pt, '\0', sizeof(pt));
582 pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
583
584 /* destroy anchors */
585 while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
586 for (i = 0; i < PF_RULESET_MAX; i++) {
587 if (pf_begin_rules(&ticket, i, anchor->name) == 0) {
588 pf_commit_rules(ticket, i, anchor->name);
589 }
590 }
591 }
592
593 /* destroy main ruleset */
594 pf_remove_if_empty_ruleset(&pf_main_ruleset);
595
596 /* destroy the pools */
597 pool_destroy(&pf_pooladdr_pl);
598 pool_destroy(&pf_state_pl);
599 pool_destroy(&pf_rule_pl);
600 pool_destroy(&pf_src_tree_pl);
601
602 /* destroy subsystems */
603 pf_normalize_destroy();
604 pf_osfp_destroy();
605 pfr_destroy();
606 pfi_destroy();
607 }
608 #endif
609
610 static int
611 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
612 {
613 #pragma unused(flags, fmt, p)
614 if (minor(dev) >= PFDEV_MAX) {
615 return ENXIO;
616 }
617
618 if (minor(dev) == PFDEV_PFM) {
619 lck_mtx_lock(pf_lock);
620 if (pfdevcnt != 0) {
621 lck_mtx_unlock(pf_lock);
622 return EBUSY;
623 }
624 pfdevcnt++;
625 lck_mtx_unlock(pf_lock);
626 }
627 return 0;
628 }
629
630 static int
631 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
632 {
633 #pragma unused(flags, fmt, p)
634 if (minor(dev) >= PFDEV_MAX) {
635 return ENXIO;
636 }
637
638 if (minor(dev) == PFDEV_PFM) {
639 lck_mtx_lock(pf_lock);
640 VERIFY(pfdevcnt > 0);
641 pfdevcnt--;
642 lck_mtx_unlock(pf_lock);
643 }
644 return 0;
645 }
646
647 static struct pf_pool *
648 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
649 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
650 u_int8_t check_ticket)
651 {
652 struct pf_ruleset *ruleset;
653 struct pf_rule *rule;
654 int rs_num;
655
656 ruleset = pf_find_ruleset(anchor);
657 if (ruleset == NULL) {
658 return NULL;
659 }
660 rs_num = pf_get_ruleset_number(rule_action);
661 if (rs_num >= PF_RULESET_MAX) {
662 return NULL;
663 }
664 if (active) {
665 if (check_ticket && ticket !=
666 ruleset->rules[rs_num].active.ticket) {
667 return NULL;
668 }
669 if (r_last) {
670 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
671 pf_rulequeue);
672 } else {
673 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
674 }
675 } else {
676 if (check_ticket && ticket !=
677 ruleset->rules[rs_num].inactive.ticket) {
678 return NULL;
679 }
680 if (r_last) {
681 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
682 pf_rulequeue);
683 } else {
684 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
685 }
686 }
687 if (!r_last) {
688 while ((rule != NULL) && (rule->nr != rule_number)) {
689 rule = TAILQ_NEXT(rule, entries);
690 }
691 }
692 if (rule == NULL) {
693 return NULL;
694 }
695
696 return &rule->rpool;
697 }
698
699 static void
700 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
701 {
702 struct pf_pooladdr *mv_pool_pa;
703
704 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
705 TAILQ_REMOVE(poola, mv_pool_pa, entries);
706 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
707 }
708 }
709
710 static void
711 pf_empty_pool(struct pf_palist *poola)
712 {
713 struct pf_pooladdr *empty_pool_pa;
714
715 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
716 pfi_dynaddr_remove(&empty_pool_pa->addr);
717 pf_tbladdr_remove(&empty_pool_pa->addr);
718 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
719 TAILQ_REMOVE(poola, empty_pool_pa, entries);
720 pool_put(&pf_pooladdr_pl, empty_pool_pa);
721 }
722 }
723
724 void
725 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
726 {
727 if (rulequeue != NULL) {
728 if (rule->states <= 0) {
729 /*
730 * XXX - we need to remove the table *before* detaching
731 * the rule to make sure the table code does not delete
732 * the anchor under our feet.
733 */
734 pf_tbladdr_remove(&rule->src.addr);
735 pf_tbladdr_remove(&rule->dst.addr);
736 if (rule->overload_tbl) {
737 pfr_detach_table(rule->overload_tbl);
738 }
739 }
740 TAILQ_REMOVE(rulequeue, rule, entries);
741 rule->entries.tqe_prev = NULL;
742 rule->nr = -1;
743 }
744
745 if (rule->states > 0 || rule->src_nodes > 0 ||
746 rule->entries.tqe_prev != NULL) {
747 return;
748 }
749 pf_tag_unref(rule->tag);
750 pf_tag_unref(rule->match_tag);
751 pf_rtlabel_remove(&rule->src.addr);
752 pf_rtlabel_remove(&rule->dst.addr);
753 pfi_dynaddr_remove(&rule->src.addr);
754 pfi_dynaddr_remove(&rule->dst.addr);
755 if (rulequeue == NULL) {
756 pf_tbladdr_remove(&rule->src.addr);
757 pf_tbladdr_remove(&rule->dst.addr);
758 if (rule->overload_tbl) {
759 pfr_detach_table(rule->overload_tbl);
760 }
761 }
762 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
763 pf_anchor_remove(rule);
764 pf_empty_pool(&rule->rpool.list);
765 pool_put(&pf_rule_pl, rule);
766 }
767
768 static u_int16_t
769 tagname2tag(struct pf_tags *head, char *tagname)
770 {
771 struct pf_tagname *tag, *p = NULL;
772 u_int16_t new_tagid = 1;
773
774 TAILQ_FOREACH(tag, head, entries)
775 if (strcmp(tagname, tag->name) == 0) {
776 tag->ref++;
777 return tag->tag;
778 }
779
780 /*
781 * to avoid fragmentation, we do a linear search from the beginning
782 * and take the first free slot we find. if there is none or the list
783 * is empty, append a new entry at the end.
784 */
785
786 /* new entry */
787 if (!TAILQ_EMPTY(head)) {
788 for (p = TAILQ_FIRST(head); p != NULL &&
789 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) {
790 new_tagid = p->tag + 1;
791 }
792 }
793
794 if (new_tagid > TAGID_MAX) {
795 return 0;
796 }
797
798 /* allocate and fill new struct pf_tagname */
799 tag = _MALLOC(sizeof(*tag), M_TEMP, M_WAITOK | M_ZERO);
800 if (tag == NULL) {
801 return 0;
802 }
803 strlcpy(tag->name, tagname, sizeof(tag->name));
804 tag->tag = new_tagid;
805 tag->ref++;
806
807 if (p != NULL) { /* insert new entry before p */
808 TAILQ_INSERT_BEFORE(p, tag, entries);
809 } else { /* either list empty or no free slot in between */
810 TAILQ_INSERT_TAIL(head, tag, entries);
811 }
812
813 return tag->tag;
814 }
815
816 static void
817 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
818 {
819 struct pf_tagname *tag;
820
821 TAILQ_FOREACH(tag, head, entries)
822 if (tag->tag == tagid) {
823 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
824 return;
825 }
826 }
827
828 static void
829 tag_unref(struct pf_tags *head, u_int16_t tag)
830 {
831 struct pf_tagname *p, *next;
832
833 if (tag == 0) {
834 return;
835 }
836
837 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
838 next = TAILQ_NEXT(p, entries);
839 if (tag == p->tag) {
840 if (--p->ref == 0) {
841 TAILQ_REMOVE(head, p, entries);
842 _FREE(p, M_TEMP);
843 }
844 break;
845 }
846 }
847 }
848
849 u_int16_t
850 pf_tagname2tag(char *tagname)
851 {
852 return tagname2tag(&pf_tags, tagname);
853 }
854
855 void
856 pf_tag2tagname(u_int16_t tagid, char *p)
857 {
858 tag2tagname(&pf_tags, tagid, p);
859 }
860
861 void
862 pf_tag_ref(u_int16_t tag)
863 {
864 struct pf_tagname *t;
865
866 TAILQ_FOREACH(t, &pf_tags, entries)
867 if (t->tag == tag) {
868 break;
869 }
870 if (t != NULL) {
871 t->ref++;
872 }
873 }
874
875 void
876 pf_tag_unref(u_int16_t tag)
877 {
878 tag_unref(&pf_tags, tag);
879 }
880
881 static int
882 pf_rtlabel_add(struct pf_addr_wrap *a)
883 {
884 #pragma unused(a)
885 return 0;
886 }
887
888 static void
889 pf_rtlabel_remove(struct pf_addr_wrap *a)
890 {
891 #pragma unused(a)
892 }
893
894 static void
895 pf_rtlabel_copyout(struct pf_addr_wrap *a)
896 {
897 #pragma unused(a)
898 }
899
900 static int
901 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
902 {
903 struct pf_ruleset *rs;
904 struct pf_rule *rule;
905
906 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
907 return EINVAL;
908 }
909 rs = pf_find_or_create_ruleset(anchor);
910 if (rs == NULL) {
911 return EINVAL;
912 }
913 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
914 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
915 rs->rules[rs_num].inactive.rcount--;
916 }
917 *ticket = ++rs->rules[rs_num].inactive.ticket;
918 rs->rules[rs_num].inactive.open = 1;
919 return 0;
920 }
921
922 static int
923 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
924 {
925 struct pf_ruleset *rs;
926 struct pf_rule *rule;
927
928 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
929 return EINVAL;
930 }
931 rs = pf_find_ruleset(anchor);
932 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
933 rs->rules[rs_num].inactive.ticket != ticket) {
934 return 0;
935 }
936 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
937 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
938 rs->rules[rs_num].inactive.rcount--;
939 }
940 rs->rules[rs_num].inactive.open = 0;
941 return 0;
942 }
943
944 #define PF_MD5_UPD(st, elm) \
945 MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm))
946
947 #define PF_MD5_UPD_STR(st, elm) \
948 MD5Update(ctx, (u_int8_t *)(st)->elm, strlen((st)->elm))
949
950 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
951 (stor) = htonl((st)->elm); \
952 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t)); \
953 } while (0)
954
955 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
956 (stor) = htons((st)->elm); \
957 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t)); \
958 } while (0)
959
960 static void
961 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr, u_int8_t proto)
962 {
963 PF_MD5_UPD(pfr, addr.type);
964 switch (pfr->addr.type) {
965 case PF_ADDR_DYNIFTL:
966 PF_MD5_UPD(pfr, addr.v.ifname);
967 PF_MD5_UPD(pfr, addr.iflags);
968 break;
969 case PF_ADDR_TABLE:
970 PF_MD5_UPD(pfr, addr.v.tblname);
971 break;
972 case PF_ADDR_ADDRMASK:
973 /* XXX ignore af? */
974 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
975 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
976 break;
977 case PF_ADDR_RTLABEL:
978 PF_MD5_UPD(pfr, addr.v.rtlabelname);
979 break;
980 }
981
982 switch (proto) {
983 case IPPROTO_TCP:
984 case IPPROTO_UDP:
985 PF_MD5_UPD(pfr, xport.range.port[0]);
986 PF_MD5_UPD(pfr, xport.range.port[1]);
987 PF_MD5_UPD(pfr, xport.range.op);
988 break;
989
990 default:
991 break;
992 }
993
994 PF_MD5_UPD(pfr, neg);
995 }
996
997 static void
998 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
999 {
1000 u_int16_t x;
1001 u_int32_t y;
1002
1003 pf_hash_rule_addr(ctx, &rule->src, rule->proto);
1004 pf_hash_rule_addr(ctx, &rule->dst, rule->proto);
1005 PF_MD5_UPD_STR(rule, label);
1006 PF_MD5_UPD_STR(rule, ifname);
1007 PF_MD5_UPD_STR(rule, match_tagname);
1008 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1009 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1010 PF_MD5_UPD_HTONL(rule, prob, y);
1011 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1012 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1013 PF_MD5_UPD(rule, uid.op);
1014 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1015 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1016 PF_MD5_UPD(rule, gid.op);
1017 PF_MD5_UPD_HTONL(rule, rule_flag, y);
1018 PF_MD5_UPD(rule, action);
1019 PF_MD5_UPD(rule, direction);
1020 PF_MD5_UPD(rule, af);
1021 PF_MD5_UPD(rule, quick);
1022 PF_MD5_UPD(rule, ifnot);
1023 PF_MD5_UPD(rule, match_tag_not);
1024 PF_MD5_UPD(rule, natpass);
1025 PF_MD5_UPD(rule, keep_state);
1026 PF_MD5_UPD(rule, proto);
1027 PF_MD5_UPD(rule, type);
1028 PF_MD5_UPD(rule, code);
1029 PF_MD5_UPD(rule, flags);
1030 PF_MD5_UPD(rule, flagset);
1031 PF_MD5_UPD(rule, allow_opts);
1032 PF_MD5_UPD(rule, rt);
1033 PF_MD5_UPD(rule, tos);
1034 }
1035
1036 static int
1037 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1038 {
1039 struct pf_ruleset *rs;
1040 struct pf_rule *rule, **old_array, *r;
1041 struct pf_rulequeue *old_rules;
1042 int error;
1043 u_int32_t old_rcount;
1044
1045 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1046
1047 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
1048 return EINVAL;
1049 }
1050 rs = pf_find_ruleset(anchor);
1051 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1052 ticket != rs->rules[rs_num].inactive.ticket) {
1053 return EBUSY;
1054 }
1055
1056 /* Calculate checksum for the main ruleset */
1057 if (rs == &pf_main_ruleset) {
1058 error = pf_setup_pfsync_matching(rs);
1059 if (error != 0) {
1060 return error;
1061 }
1062 }
1063
1064 /* Swap rules, keep the old. */
1065 old_rules = rs->rules[rs_num].active.ptr;
1066 old_rcount = rs->rules[rs_num].active.rcount;
1067 old_array = rs->rules[rs_num].active.ptr_array;
1068
1069 if (old_rcount != 0) {
1070 r = TAILQ_FIRST(rs->rules[rs_num].active.ptr);
1071 while (r) {
1072 if (r->rule_flag & PFRULE_PFM) {
1073 pffwrules--;
1074 }
1075 r = TAILQ_NEXT(r, entries);
1076 }
1077 }
1078
1079
1080 rs->rules[rs_num].active.ptr =
1081 rs->rules[rs_num].inactive.ptr;
1082 rs->rules[rs_num].active.ptr_array =
1083 rs->rules[rs_num].inactive.ptr_array;
1084 rs->rules[rs_num].active.rcount =
1085 rs->rules[rs_num].inactive.rcount;
1086 rs->rules[rs_num].inactive.ptr = old_rules;
1087 rs->rules[rs_num].inactive.ptr_array = old_array;
1088 rs->rules[rs_num].inactive.rcount = old_rcount;
1089
1090 rs->rules[rs_num].active.ticket =
1091 rs->rules[rs_num].inactive.ticket;
1092 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1093
1094
1095 /* Purge the old rule list. */
1096 while ((rule = TAILQ_FIRST(old_rules)) != NULL) {
1097 pf_rm_rule(old_rules, rule);
1098 }
1099 if (rs->rules[rs_num].inactive.ptr_array) {
1100 _FREE(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1101 }
1102 rs->rules[rs_num].inactive.ptr_array = NULL;
1103 rs->rules[rs_num].inactive.rcount = 0;
1104 rs->rules[rs_num].inactive.open = 0;
1105 pf_remove_if_empty_ruleset(rs);
1106 return 0;
1107 }
1108
1109 static void
1110 pf_rule_copyin(struct pf_rule *src, struct pf_rule *dst, struct proc *p,
1111 int minordev)
1112 {
1113 bcopy(src, dst, sizeof(struct pf_rule));
1114
1115 dst->label[sizeof(dst->label) - 1] = '\0';
1116 dst->ifname[sizeof(dst->ifname) - 1] = '\0';
1117 dst->qname[sizeof(dst->qname) - 1] = '\0';
1118 dst->pqname[sizeof(dst->pqname) - 1] = '\0';
1119 dst->tagname[sizeof(dst->tagname) - 1] = '\0';
1120 dst->match_tagname[sizeof(dst->match_tagname) - 1] = '\0';
1121 dst->overload_tblname[sizeof(dst->overload_tblname) - 1] = '\0';
1122
1123 dst->cuid = kauth_cred_getuid(p->p_ucred);
1124 dst->cpid = p->p_pid;
1125
1126 dst->anchor = NULL;
1127 dst->kif = NULL;
1128 dst->overload_tbl = NULL;
1129
1130 TAILQ_INIT(&dst->rpool.list);
1131 dst->rpool.cur = NULL;
1132
1133 /* initialize refcounting */
1134 dst->states = 0;
1135 dst->src_nodes = 0;
1136
1137 dst->entries.tqe_prev = NULL;
1138 dst->entries.tqe_next = NULL;
1139 if ((uint8_t)minordev == PFDEV_PFM) {
1140 dst->rule_flag |= PFRULE_PFM;
1141 }
1142 }
1143
1144 static void
1145 pf_rule_copyout(struct pf_rule *src, struct pf_rule *dst)
1146 {
1147 bcopy(src, dst, sizeof(struct pf_rule));
1148
1149 dst->anchor = NULL;
1150 dst->kif = NULL;
1151 dst->overload_tbl = NULL;
1152
1153 TAILQ_INIT(&dst->rpool.list);
1154 dst->rpool.cur = NULL;
1155
1156 dst->entries.tqe_prev = NULL;
1157 dst->entries.tqe_next = NULL;
1158 }
1159
1160 static void
1161 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk,
1162 struct pf_state *s)
1163 {
1164 uint64_t secs = pf_time_second();
1165 bzero(sp, sizeof(struct pfsync_state));
1166
1167 /* copy from state key */
1168 sp->lan.addr = sk->lan.addr;
1169 sp->lan.xport = sk->lan.xport;
1170 sp->gwy.addr = sk->gwy.addr;
1171 sp->gwy.xport = sk->gwy.xport;
1172 sp->ext_lan.addr = sk->ext_lan.addr;
1173 sp->ext_lan.xport = sk->ext_lan.xport;
1174 sp->ext_gwy.addr = sk->ext_gwy.addr;
1175 sp->ext_gwy.xport = sk->ext_gwy.xport;
1176 sp->proto_variant = sk->proto_variant;
1177 sp->tag = s->tag;
1178 sp->proto = sk->proto;
1179 sp->af_lan = sk->af_lan;
1180 sp->af_gwy = sk->af_gwy;
1181 sp->direction = sk->direction;
1182 sp->flowhash = sk->flowhash;
1183
1184 /* copy from state */
1185 memcpy(&sp->id, &s->id, sizeof(sp->id));
1186 sp->creatorid = s->creatorid;
1187 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname));
1188 pf_state_peer_to_pfsync(&s->src, &sp->src);
1189 pf_state_peer_to_pfsync(&s->dst, &sp->dst);
1190
1191 sp->rule = s->rule.ptr->nr;
1192 sp->nat_rule = (s->nat_rule.ptr == NULL) ?
1193 (unsigned)-1 : s->nat_rule.ptr->nr;
1194 sp->anchor = (s->anchor.ptr == NULL) ?
1195 (unsigned)-1 : s->anchor.ptr->nr;
1196
1197 pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]);
1198 pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]);
1199 pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]);
1200 pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]);
1201 sp->creation = secs - s->creation;
1202 sp->expire = pf_state_expires(s);
1203 sp->log = s->log;
1204 sp->allow_opts = s->allow_opts;
1205 sp->timeout = s->timeout;
1206
1207 if (s->src_node) {
1208 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
1209 }
1210 if (s->nat_src_node) {
1211 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
1212 }
1213
1214 if (sp->expire > secs) {
1215 sp->expire -= secs;
1216 } else {
1217 sp->expire = 0;
1218 }
1219 }
1220
1221 static void
1222 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk,
1223 struct pf_state *s)
1224 {
1225 /* copy to state key */
1226 sk->lan.addr = sp->lan.addr;
1227 sk->lan.xport = sp->lan.xport;
1228 sk->gwy.addr = sp->gwy.addr;
1229 sk->gwy.xport = sp->gwy.xport;
1230 sk->ext_lan.addr = sp->ext_lan.addr;
1231 sk->ext_lan.xport = sp->ext_lan.xport;
1232 sk->ext_gwy.addr = sp->ext_gwy.addr;
1233 sk->ext_gwy.xport = sp->ext_gwy.xport;
1234 sk->proto_variant = sp->proto_variant;
1235 s->tag = sp->tag;
1236 sk->proto = sp->proto;
1237 sk->af_lan = sp->af_lan;
1238 sk->af_gwy = sp->af_gwy;
1239 sk->direction = sp->direction;
1240 sk->flowhash = pf_calc_state_key_flowhash(sk);
1241
1242 /* copy to state */
1243 memcpy(&s->id, &sp->id, sizeof(sp->id));
1244 s->creatorid = sp->creatorid;
1245 pf_state_peer_from_pfsync(&sp->src, &s->src);
1246 pf_state_peer_from_pfsync(&sp->dst, &s->dst);
1247
1248 s->rule.ptr = &pf_default_rule;
1249 s->nat_rule.ptr = NULL;
1250 s->anchor.ptr = NULL;
1251 s->rt_kif = NULL;
1252 s->creation = pf_time_second();
1253 s->expire = pf_time_second();
1254 if (sp->expire > 0) {
1255 s->expire -= pf_default_rule.timeout[sp->timeout] - sp->expire;
1256 }
1257 s->pfsync_time = 0;
1258 s->packets[0] = s->packets[1] = 0;
1259 s->bytes[0] = s->bytes[1] = 0;
1260 }
1261
1262 static void
1263 pf_pooladdr_copyin(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1264 {
1265 bcopy(src, dst, sizeof(struct pf_pooladdr));
1266
1267 dst->entries.tqe_prev = NULL;
1268 dst->entries.tqe_next = NULL;
1269 dst->ifname[sizeof(dst->ifname) - 1] = '\0';
1270 dst->kif = NULL;
1271 }
1272
1273 static void
1274 pf_pooladdr_copyout(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1275 {
1276 bcopy(src, dst, sizeof(struct pf_pooladdr));
1277
1278 dst->entries.tqe_prev = NULL;
1279 dst->entries.tqe_next = NULL;
1280 dst->kif = NULL;
1281 }
1282
1283 static int
1284 pf_setup_pfsync_matching(struct pf_ruleset *rs)
1285 {
1286 MD5_CTX ctx;
1287 struct pf_rule *rule;
1288 int rs_cnt;
1289 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
1290
1291 MD5Init(&ctx);
1292 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1293 /* XXX PF_RULESET_SCRUB as well? */
1294 if (rs_cnt == PF_RULESET_SCRUB) {
1295 continue;
1296 }
1297
1298 if (rs->rules[rs_cnt].inactive.ptr_array) {
1299 _FREE(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1300 }
1301 rs->rules[rs_cnt].inactive.ptr_array = NULL;
1302
1303 if (rs->rules[rs_cnt].inactive.rcount) {
1304 rs->rules[rs_cnt].inactive.ptr_array =
1305 _MALLOC(sizeof(caddr_t) *
1306 rs->rules[rs_cnt].inactive.rcount,
1307 M_TEMP, M_WAITOK);
1308
1309 if (!rs->rules[rs_cnt].inactive.ptr_array) {
1310 return ENOMEM;
1311 }
1312 }
1313
1314 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1315 entries) {
1316 pf_hash_rule(&ctx, rule);
1317 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1318 }
1319 }
1320
1321 MD5Final(digest, &ctx);
1322 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
1323 return 0;
1324 }
1325
1326 static void
1327 pf_start(void)
1328 {
1329 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1330
1331 VERIFY(pf_is_enabled == 0);
1332
1333 pf_is_enabled = 1;
1334 pf_status.running = 1;
1335 pf_status.since = pf_calendar_time_second();
1336 if (pf_status.stateid == 0) {
1337 pf_status.stateid = pf_time_second();
1338 pf_status.stateid = pf_status.stateid << 32;
1339 }
1340 wakeup(pf_purge_thread_fn);
1341 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1342 }
1343
1344 static void
1345 pf_stop(void)
1346 {
1347 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1348
1349 VERIFY(pf_is_enabled);
1350
1351 pf_status.running = 0;
1352 pf_is_enabled = 0;
1353 pf_status.since = pf_calendar_time_second();
1354 wakeup(pf_purge_thread_fn);
1355 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1356 }
1357
1358 static int
1359 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1360 {
1361 #pragma unused(dev)
1362 int p64 = proc_is64bit(p);
1363 int error = 0;
1364 int minordev = minor(dev);
1365
1366 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
1367 return EPERM;
1368 }
1369
1370 /* XXX keep in sync with switch() below */
1371 if (securelevel > 1) {
1372 switch (cmd) {
1373 case DIOCGETRULES:
1374 case DIOCGETRULE:
1375 case DIOCGETADDRS:
1376 case DIOCGETADDR:
1377 case DIOCGETSTATE:
1378 case DIOCSETSTATUSIF:
1379 case DIOCGETSTATUS:
1380 case DIOCCLRSTATUS:
1381 case DIOCNATLOOK:
1382 case DIOCSETDEBUG:
1383 case DIOCGETSTATES:
1384 case DIOCINSERTRULE:
1385 case DIOCDELETERULE:
1386 case DIOCGETTIMEOUT:
1387 case DIOCCLRRULECTRS:
1388 case DIOCGETLIMIT:
1389 case DIOCGETALTQS:
1390 case DIOCGETALTQ:
1391 case DIOCGETQSTATS:
1392 case DIOCGETRULESETS:
1393 case DIOCGETRULESET:
1394 case DIOCRGETTABLES:
1395 case DIOCRGETTSTATS:
1396 case DIOCRCLRTSTATS:
1397 case DIOCRCLRADDRS:
1398 case DIOCRADDADDRS:
1399 case DIOCRDELADDRS:
1400 case DIOCRSETADDRS:
1401 case DIOCRGETADDRS:
1402 case DIOCRGETASTATS:
1403 case DIOCRCLRASTATS:
1404 case DIOCRTSTADDRS:
1405 case DIOCOSFPGET:
1406 case DIOCGETSRCNODES:
1407 case DIOCCLRSRCNODES:
1408 case DIOCIGETIFACES:
1409 case DIOCGIFSPEED:
1410 case DIOCSETIFFLAG:
1411 case DIOCCLRIFFLAG:
1412 break;
1413 case DIOCRCLRTABLES:
1414 case DIOCRADDTABLES:
1415 case DIOCRDELTABLES:
1416 case DIOCRSETTFLAGS: {
1417 int pfrio_flags;
1418
1419 bcopy(&((struct pfioc_table *)(void *)addr)->
1420 pfrio_flags, &pfrio_flags, sizeof(pfrio_flags));
1421
1422 if (pfrio_flags & PFR_FLAG_DUMMY) {
1423 break; /* dummy operation ok */
1424 }
1425 return EPERM;
1426 }
1427 default:
1428 return EPERM;
1429 }
1430 }
1431
1432 if (!(flags & FWRITE)) {
1433 switch (cmd) {
1434 case DIOCSTART:
1435 case DIOCSTARTREF:
1436 case DIOCSTOP:
1437 case DIOCSTOPREF:
1438 case DIOCGETSTARTERS:
1439 case DIOCGETRULES:
1440 case DIOCGETADDRS:
1441 case DIOCGETADDR:
1442 case DIOCGETSTATE:
1443 case DIOCGETSTATUS:
1444 case DIOCGETSTATES:
1445 case DIOCINSERTRULE:
1446 case DIOCDELETERULE:
1447 case DIOCGETTIMEOUT:
1448 case DIOCGETLIMIT:
1449 case DIOCGETALTQS:
1450 case DIOCGETALTQ:
1451 case DIOCGETQSTATS:
1452 case DIOCGETRULESETS:
1453 case DIOCGETRULESET:
1454 case DIOCNATLOOK:
1455 case DIOCRGETTABLES:
1456 case DIOCRGETTSTATS:
1457 case DIOCRGETADDRS:
1458 case DIOCRGETASTATS:
1459 case DIOCRTSTADDRS:
1460 case DIOCOSFPGET:
1461 case DIOCGETSRCNODES:
1462 case DIOCIGETIFACES:
1463 case DIOCGIFSPEED:
1464 break;
1465 case DIOCRCLRTABLES:
1466 case DIOCRADDTABLES:
1467 case DIOCRDELTABLES:
1468 case DIOCRCLRTSTATS:
1469 case DIOCRCLRADDRS:
1470 case DIOCRADDADDRS:
1471 case DIOCRDELADDRS:
1472 case DIOCRSETADDRS:
1473 case DIOCRSETTFLAGS: {
1474 int pfrio_flags;
1475
1476 bcopy(&((struct pfioc_table *)(void *)addr)->
1477 pfrio_flags, &pfrio_flags, sizeof(pfrio_flags));
1478
1479 if (pfrio_flags & PFR_FLAG_DUMMY) {
1480 flags |= FWRITE; /* need write lock for dummy */
1481 break; /* dummy operation ok */
1482 }
1483 return EACCES;
1484 }
1485 case DIOCGETRULE: {
1486 u_int32_t action;
1487
1488 bcopy(&((struct pfioc_rule *)(void *)addr)->action,
1489 &action, sizeof(action));
1490
1491 if (action == PF_GET_CLR_CNTR) {
1492 return EACCES;
1493 }
1494 break;
1495 }
1496 default:
1497 return EACCES;
1498 }
1499 }
1500
1501 if (flags & FWRITE) {
1502 lck_rw_lock_exclusive(pf_perim_lock);
1503 } else {
1504 lck_rw_lock_shared(pf_perim_lock);
1505 }
1506
1507 lck_mtx_lock(pf_lock);
1508
1509 switch (cmd) {
1510 case DIOCSTART:
1511 if (pf_status.running) {
1512 /*
1513 * Increment the reference for a simple -e enable, so
1514 * that even if other processes drop their references,
1515 * pf will still be available to processes that turned
1516 * it on without taking a reference
1517 */
1518 if (nr_tokens == pf_enabled_ref_count) {
1519 pf_enabled_ref_count++;
1520 VERIFY(pf_enabled_ref_count != 0);
1521 }
1522 error = EEXIST;
1523 } else if (pf_purge_thread == NULL) {
1524 error = ENOMEM;
1525 } else {
1526 pf_start();
1527 pf_enabled_ref_count++;
1528 VERIFY(pf_enabled_ref_count != 0);
1529 }
1530 break;
1531
1532 case DIOCSTARTREF: /* u_int64_t */
1533 if (pf_purge_thread == NULL) {
1534 error = ENOMEM;
1535 } else {
1536 u_int64_t token;
1537
1538 /* small enough to be on stack */
1539 if ((token = generate_token(p)) != 0) {
1540 if (pf_is_enabled == 0) {
1541 pf_start();
1542 }
1543 pf_enabled_ref_count++;
1544 VERIFY(pf_enabled_ref_count != 0);
1545 } else {
1546 error = ENOMEM;
1547 DPFPRINTF(PF_DEBUG_URGENT,
1548 ("pf: unable to generate token\n"));
1549 }
1550 bcopy(&token, addr, sizeof(token));
1551 }
1552 break;
1553
1554 case DIOCSTOP:
1555 if (!pf_status.running) {
1556 error = ENOENT;
1557 } else {
1558 pf_stop();
1559 pf_enabled_ref_count = 0;
1560 invalidate_all_tokens();
1561 }
1562 break;
1563
1564 case DIOCSTOPREF: /* struct pfioc_remove_token */
1565 if (!pf_status.running) {
1566 error = ENOENT;
1567 } else {
1568 struct pfioc_remove_token pfrt;
1569
1570 /* small enough to be on stack */
1571 bcopy(addr, &pfrt, sizeof(pfrt));
1572 if ((error = remove_token(&pfrt)) == 0) {
1573 VERIFY(pf_enabled_ref_count != 0);
1574 pf_enabled_ref_count--;
1575 /* return currently held references */
1576 pfrt.refcount = pf_enabled_ref_count;
1577 DPFPRINTF(PF_DEBUG_MISC,
1578 ("pf: enabled refcount decremented\n"));
1579 } else {
1580 error = EINVAL;
1581 DPFPRINTF(PF_DEBUG_URGENT,
1582 ("pf: token mismatch\n"));
1583 }
1584 bcopy(&pfrt, addr, sizeof(pfrt));
1585
1586 if (error == 0 && pf_enabled_ref_count == 0) {
1587 pf_stop();
1588 }
1589 }
1590 break;
1591
1592 case DIOCGETSTARTERS: { /* struct pfioc_tokens */
1593 PFIOCX_STRUCT_DECL(pfioc_tokens);
1594
1595 PFIOCX_STRUCT_BEGIN(addr, pfioc_tokens, error = ENOMEM; break; );
1596 error = pfioctl_ioc_tokens(cmd,
1597 PFIOCX_STRUCT_ADDR32(pfioc_tokens),
1598 PFIOCX_STRUCT_ADDR64(pfioc_tokens), p);
1599 PFIOCX_STRUCT_END(pfioc_tokens, addr);
1600 break;
1601 }
1602
1603 case DIOCADDRULE: /* struct pfioc_rule */
1604 case DIOCGETRULES: /* struct pfioc_rule */
1605 case DIOCGETRULE: /* struct pfioc_rule */
1606 case DIOCCHANGERULE: /* struct pfioc_rule */
1607 case DIOCINSERTRULE: /* struct pfioc_rule */
1608 case DIOCDELETERULE: { /* struct pfioc_rule */
1609 struct pfioc_rule *pr = NULL;
1610
1611 PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break; );
1612 error = pfioctl_ioc_rule(cmd, minordev, pr, p);
1613 PFIOC_STRUCT_END(pr, addr);
1614 break;
1615 }
1616
1617 case DIOCCLRSTATES: /* struct pfioc_state_kill */
1618 case DIOCKILLSTATES: { /* struct pfioc_state_kill */
1619 struct pfioc_state_kill *psk = NULL;
1620
1621 PFIOC_STRUCT_BEGIN(addr, psk, error = ENOMEM; break; );
1622 error = pfioctl_ioc_state_kill(cmd, psk, p);
1623 PFIOC_STRUCT_END(psk, addr);
1624 break;
1625 }
1626
1627 case DIOCADDSTATE: /* struct pfioc_state */
1628 case DIOCGETSTATE: { /* struct pfioc_state */
1629 struct pfioc_state *ps = NULL;
1630
1631 PFIOC_STRUCT_BEGIN(addr, ps, error = ENOMEM; break; );
1632 error = pfioctl_ioc_state(cmd, ps, p);
1633 PFIOC_STRUCT_END(ps, addr);
1634 break;
1635 }
1636
1637 case DIOCGETSTATES: { /* struct pfioc_states */
1638 PFIOCX_STRUCT_DECL(pfioc_states);
1639
1640 PFIOCX_STRUCT_BEGIN(addr, pfioc_states, error = ENOMEM; break; );
1641 error = pfioctl_ioc_states(cmd,
1642 PFIOCX_STRUCT_ADDR32(pfioc_states),
1643 PFIOCX_STRUCT_ADDR64(pfioc_states), p);
1644 PFIOCX_STRUCT_END(pfioc_states, addr);
1645 break;
1646 }
1647
1648 case DIOCGETSTATUS: { /* struct pf_status */
1649 struct pf_status *s = NULL;
1650
1651 PFIOC_STRUCT_BEGIN(&pf_status, s, error = ENOMEM; break; );
1652 pfi_update_status(s->ifname, s);
1653 PFIOC_STRUCT_END(s, addr);
1654 break;
1655 }
1656
1657 case DIOCSETSTATUSIF: { /* struct pfioc_if */
1658 struct pfioc_if *pi = (struct pfioc_if *)(void *)addr;
1659
1660 /* OK for unaligned accesses */
1661 if (pi->ifname[0] == 0) {
1662 bzero(pf_status.ifname, IFNAMSIZ);
1663 break;
1664 }
1665 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1666 break;
1667 }
1668
1669 case DIOCCLRSTATUS: {
1670 bzero(pf_status.counters, sizeof(pf_status.counters));
1671 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1672 bzero(pf_status.scounters, sizeof(pf_status.scounters));
1673 pf_status.since = pf_calendar_time_second();
1674 if (*pf_status.ifname) {
1675 pfi_update_status(pf_status.ifname, NULL);
1676 }
1677 break;
1678 }
1679
1680 case DIOCNATLOOK: { /* struct pfioc_natlook */
1681 struct pfioc_natlook *pnl = NULL;
1682
1683 PFIOC_STRUCT_BEGIN(addr, pnl, error = ENOMEM; break; );
1684 error = pfioctl_ioc_natlook(cmd, pnl, p);
1685 PFIOC_STRUCT_END(pnl, addr);
1686 break;
1687 }
1688
1689 case DIOCSETTIMEOUT: /* struct pfioc_tm */
1690 case DIOCGETTIMEOUT: { /* struct pfioc_tm */
1691 struct pfioc_tm pt;
1692
1693 /* small enough to be on stack */
1694 bcopy(addr, &pt, sizeof(pt));
1695 error = pfioctl_ioc_tm(cmd, &pt, p);
1696 bcopy(&pt, addr, sizeof(pt));
1697 break;
1698 }
1699
1700 case DIOCGETLIMIT: /* struct pfioc_limit */
1701 case DIOCSETLIMIT: { /* struct pfioc_limit */
1702 struct pfioc_limit pl;
1703
1704 /* small enough to be on stack */
1705 bcopy(addr, &pl, sizeof(pl));
1706 error = pfioctl_ioc_limit(cmd, &pl, p);
1707 bcopy(&pl, addr, sizeof(pl));
1708 break;
1709 }
1710
1711 case DIOCSETDEBUG: { /* u_int32_t */
1712 bcopy(addr, &pf_status.debug, sizeof(u_int32_t));
1713 break;
1714 }
1715
1716 case DIOCCLRRULECTRS: {
1717 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1718 struct pf_ruleset *ruleset = &pf_main_ruleset;
1719 struct pf_rule *rule;
1720
1721 TAILQ_FOREACH(rule,
1722 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1723 rule->evaluations = 0;
1724 rule->packets[0] = rule->packets[1] = 0;
1725 rule->bytes[0] = rule->bytes[1] = 0;
1726 }
1727 break;
1728 }
1729
1730 case DIOCGIFSPEED: {
1731 struct pf_ifspeed *psp = (struct pf_ifspeed *)(void *)addr;
1732 struct pf_ifspeed ps;
1733 struct ifnet *ifp;
1734 u_int64_t baudrate;
1735
1736 if (psp->ifname[0] != '\0') {
1737 /* Can we completely trust user-land? */
1738 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
1739 ps.ifname[IFNAMSIZ - 1] = '\0';
1740 ifp = ifunit(ps.ifname);
1741 if (ifp != NULL) {
1742 baudrate = ifp->if_output_bw.max_bw;
1743 bcopy(&baudrate, &psp->baudrate,
1744 sizeof(baudrate));
1745 } else {
1746 error = EINVAL;
1747 }
1748 } else {
1749 error = EINVAL;
1750 }
1751 break;
1752 }
1753
1754 case DIOCBEGINADDRS: /* struct pfioc_pooladdr */
1755 case DIOCADDADDR: /* struct pfioc_pooladdr */
1756 case DIOCGETADDRS: /* struct pfioc_pooladdr */
1757 case DIOCGETADDR: /* struct pfioc_pooladdr */
1758 case DIOCCHANGEADDR: { /* struct pfioc_pooladdr */
1759 struct pfioc_pooladdr *pp = NULL;
1760
1761 PFIOC_STRUCT_BEGIN(addr, pp, error = ENOMEM; break; )
1762 error = pfioctl_ioc_pooladdr(cmd, pp, p);
1763 PFIOC_STRUCT_END(pp, addr);
1764 break;
1765 }
1766
1767 case DIOCGETRULESETS: /* struct pfioc_ruleset */
1768 case DIOCGETRULESET: { /* struct pfioc_ruleset */
1769 struct pfioc_ruleset *pr = NULL;
1770
1771 PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break; );
1772 error = pfioctl_ioc_ruleset(cmd, pr, p);
1773 PFIOC_STRUCT_END(pr, addr);
1774 break;
1775 }
1776
1777 case DIOCRCLRTABLES: /* struct pfioc_table */
1778 case DIOCRADDTABLES: /* struct pfioc_table */
1779 case DIOCRDELTABLES: /* struct pfioc_table */
1780 case DIOCRGETTABLES: /* struct pfioc_table */
1781 case DIOCRGETTSTATS: /* struct pfioc_table */
1782 case DIOCRCLRTSTATS: /* struct pfioc_table */
1783 case DIOCRSETTFLAGS: /* struct pfioc_table */
1784 case DIOCRCLRADDRS: /* struct pfioc_table */
1785 case DIOCRADDADDRS: /* struct pfioc_table */
1786 case DIOCRDELADDRS: /* struct pfioc_table */
1787 case DIOCRSETADDRS: /* struct pfioc_table */
1788 case DIOCRGETADDRS: /* struct pfioc_table */
1789 case DIOCRGETASTATS: /* struct pfioc_table */
1790 case DIOCRCLRASTATS: /* struct pfioc_table */
1791 case DIOCRTSTADDRS: /* struct pfioc_table */
1792 case DIOCRINADEFINE: { /* struct pfioc_table */
1793 PFIOCX_STRUCT_DECL(pfioc_table);
1794
1795 PFIOCX_STRUCT_BEGIN(addr, pfioc_table, error = ENOMEM; break; );
1796 error = pfioctl_ioc_table(cmd,
1797 PFIOCX_STRUCT_ADDR32(pfioc_table),
1798 PFIOCX_STRUCT_ADDR64(pfioc_table), p);
1799 PFIOCX_STRUCT_END(pfioc_table, addr);
1800 break;
1801 }
1802
1803 case DIOCOSFPADD: /* struct pf_osfp_ioctl */
1804 case DIOCOSFPGET: { /* struct pf_osfp_ioctl */
1805 struct pf_osfp_ioctl *io = NULL;
1806
1807 PFIOC_STRUCT_BEGIN(addr, io, error = ENOMEM; break; );
1808 if (cmd == DIOCOSFPADD) {
1809 error = pf_osfp_add(io);
1810 } else {
1811 VERIFY(cmd == DIOCOSFPGET);
1812 error = pf_osfp_get(io);
1813 }
1814 PFIOC_STRUCT_END(io, addr);
1815 break;
1816 }
1817
1818 case DIOCXBEGIN: /* struct pfioc_trans */
1819 case DIOCXROLLBACK: /* struct pfioc_trans */
1820 case DIOCXCOMMIT: { /* struct pfioc_trans */
1821 PFIOCX_STRUCT_DECL(pfioc_trans);
1822
1823 PFIOCX_STRUCT_BEGIN(addr, pfioc_trans, error = ENOMEM; break; );
1824 error = pfioctl_ioc_trans(cmd,
1825 PFIOCX_STRUCT_ADDR32(pfioc_trans),
1826 PFIOCX_STRUCT_ADDR64(pfioc_trans), p);
1827 PFIOCX_STRUCT_END(pfioc_trans, addr);
1828 break;
1829 }
1830
1831 case DIOCGETSRCNODES: { /* struct pfioc_src_nodes */
1832 PFIOCX_STRUCT_DECL(pfioc_src_nodes);
1833
1834 PFIOCX_STRUCT_BEGIN(addr, pfioc_src_nodes,
1835 error = ENOMEM; break; );
1836 error = pfioctl_ioc_src_nodes(cmd,
1837 PFIOCX_STRUCT_ADDR32(pfioc_src_nodes),
1838 PFIOCX_STRUCT_ADDR64(pfioc_src_nodes), p);
1839 PFIOCX_STRUCT_END(pfioc_src_nodes, addr);
1840 break;
1841 }
1842
1843 case DIOCCLRSRCNODES: {
1844 struct pf_src_node *n;
1845 struct pf_state *state;
1846
1847 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1848 state->src_node = NULL;
1849 state->nat_src_node = NULL;
1850 }
1851 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
1852 n->expire = 1;
1853 n->states = 0;
1854 }
1855 pf_purge_expired_src_nodes();
1856 pf_status.src_nodes = 0;
1857 break;
1858 }
1859
1860 case DIOCKILLSRCNODES: { /* struct pfioc_src_node_kill */
1861 struct pfioc_src_node_kill *psnk = NULL;
1862
1863 PFIOC_STRUCT_BEGIN(addr, psnk, error = ENOMEM; break; );
1864 error = pfioctl_ioc_src_node_kill(cmd, psnk, p);
1865 PFIOC_STRUCT_END(psnk, addr);
1866 break;
1867 }
1868
1869 case DIOCSETHOSTID: { /* u_int32_t */
1870 u_int32_t hid;
1871
1872 /* small enough to be on stack */
1873 bcopy(addr, &hid, sizeof(hid));
1874 if (hid == 0) {
1875 pf_status.hostid = random();
1876 } else {
1877 pf_status.hostid = hid;
1878 }
1879 break;
1880 }
1881
1882 case DIOCOSFPFLUSH:
1883 pf_osfp_flush();
1884 break;
1885
1886 case DIOCIGETIFACES: /* struct pfioc_iface */
1887 case DIOCSETIFFLAG: /* struct pfioc_iface */
1888 case DIOCCLRIFFLAG: { /* struct pfioc_iface */
1889 PFIOCX_STRUCT_DECL(pfioc_iface);
1890
1891 PFIOCX_STRUCT_BEGIN(addr, pfioc_iface, error = ENOMEM; break; );
1892 error = pfioctl_ioc_iface(cmd,
1893 PFIOCX_STRUCT_ADDR32(pfioc_iface),
1894 PFIOCX_STRUCT_ADDR64(pfioc_iface), p);
1895 PFIOCX_STRUCT_END(pfioc_iface, addr);
1896 break;
1897 }
1898
1899 default:
1900 error = ENODEV;
1901 break;
1902 }
1903
1904 lck_mtx_unlock(pf_lock);
1905 lck_rw_done(pf_perim_lock);
1906
1907 return error;
1908 }
1909
1910 static int
1911 pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32,
1912 struct pfioc_table_64 *io64, struct proc *p)
1913 {
1914 int p64 = proc_is64bit(p);
1915 int error = 0;
1916
1917 if (!p64) {
1918 goto struct32;
1919 }
1920
1921 /*
1922 * 64-bit structure processing
1923 */
1924 switch (cmd) {
1925 case DIOCRCLRTABLES:
1926 if (io64->pfrio_esize != 0) {
1927 error = ENODEV;
1928 break;
1929 }
1930 pfr_table_copyin_cleanup(&io64->pfrio_table);
1931 error = pfr_clr_tables(&io64->pfrio_table, &io64->pfrio_ndel,
1932 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1933 break;
1934
1935 case DIOCRADDTABLES:
1936 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1937 error = ENODEV;
1938 break;
1939 }
1940 error = pfr_add_tables(io64->pfrio_buffer, io64->pfrio_size,
1941 &io64->pfrio_nadd, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1942 break;
1943
1944 case DIOCRDELTABLES:
1945 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1946 error = ENODEV;
1947 break;
1948 }
1949 error = pfr_del_tables(io64->pfrio_buffer, io64->pfrio_size,
1950 &io64->pfrio_ndel, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1951 break;
1952
1953 case DIOCRGETTABLES:
1954 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1955 error = ENODEV;
1956 break;
1957 }
1958 pfr_table_copyin_cleanup(&io64->pfrio_table);
1959 error = pfr_get_tables(&io64->pfrio_table, io64->pfrio_buffer,
1960 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1961 break;
1962
1963 case DIOCRGETTSTATS:
1964 if (io64->pfrio_esize != sizeof(struct pfr_tstats)) {
1965 error = ENODEV;
1966 break;
1967 }
1968 pfr_table_copyin_cleanup(&io64->pfrio_table);
1969 error = pfr_get_tstats(&io64->pfrio_table, io64->pfrio_buffer,
1970 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1971 break;
1972
1973 case DIOCRCLRTSTATS:
1974 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1975 error = ENODEV;
1976 break;
1977 }
1978 error = pfr_clr_tstats(io64->pfrio_buffer, io64->pfrio_size,
1979 &io64->pfrio_nzero, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1980 break;
1981
1982 case DIOCRSETTFLAGS:
1983 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1984 error = ENODEV;
1985 break;
1986 }
1987 error = pfr_set_tflags(io64->pfrio_buffer, io64->pfrio_size,
1988 io64->pfrio_setflag, io64->pfrio_clrflag,
1989 &io64->pfrio_nchange, &io64->pfrio_ndel,
1990 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1991 break;
1992
1993 case DIOCRCLRADDRS:
1994 if (io64->pfrio_esize != 0) {
1995 error = ENODEV;
1996 break;
1997 }
1998 pfr_table_copyin_cleanup(&io64->pfrio_table);
1999 error = pfr_clr_addrs(&io64->pfrio_table, &io64->pfrio_ndel,
2000 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2001 break;
2002
2003 case DIOCRADDADDRS:
2004 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2005 error = ENODEV;
2006 break;
2007 }
2008 pfr_table_copyin_cleanup(&io64->pfrio_table);
2009 error = pfr_add_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2010 io64->pfrio_size, &io64->pfrio_nadd, io64->pfrio_flags |
2011 PFR_FLAG_USERIOCTL);
2012 break;
2013
2014 case DIOCRDELADDRS:
2015 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2016 error = ENODEV;
2017 break;
2018 }
2019 pfr_table_copyin_cleanup(&io64->pfrio_table);
2020 error = pfr_del_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2021 io64->pfrio_size, &io64->pfrio_ndel, io64->pfrio_flags |
2022 PFR_FLAG_USERIOCTL);
2023 break;
2024
2025 case DIOCRSETADDRS:
2026 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2027 error = ENODEV;
2028 break;
2029 }
2030 pfr_table_copyin_cleanup(&io64->pfrio_table);
2031 error = pfr_set_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2032 io64->pfrio_size, &io64->pfrio_size2, &io64->pfrio_nadd,
2033 &io64->pfrio_ndel, &io64->pfrio_nchange, io64->pfrio_flags |
2034 PFR_FLAG_USERIOCTL, 0);
2035 break;
2036
2037 case DIOCRGETADDRS:
2038 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2039 error = ENODEV;
2040 break;
2041 }
2042 pfr_table_copyin_cleanup(&io64->pfrio_table);
2043 error = pfr_get_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2044 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2045 break;
2046
2047 case DIOCRGETASTATS:
2048 if (io64->pfrio_esize != sizeof(struct pfr_astats)) {
2049 error = ENODEV;
2050 break;
2051 }
2052 pfr_table_copyin_cleanup(&io64->pfrio_table);
2053 error = pfr_get_astats(&io64->pfrio_table, io64->pfrio_buffer,
2054 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2055 break;
2056
2057 case DIOCRCLRASTATS:
2058 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2059 error = ENODEV;
2060 break;
2061 }
2062 pfr_table_copyin_cleanup(&io64->pfrio_table);
2063 error = pfr_clr_astats(&io64->pfrio_table, io64->pfrio_buffer,
2064 io64->pfrio_size, &io64->pfrio_nzero, io64->pfrio_flags |
2065 PFR_FLAG_USERIOCTL);
2066 break;
2067
2068 case DIOCRTSTADDRS:
2069 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2070 error = ENODEV;
2071 break;
2072 }
2073 pfr_table_copyin_cleanup(&io64->pfrio_table);
2074 error = pfr_tst_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2075 io64->pfrio_size, &io64->pfrio_nmatch, io64->pfrio_flags |
2076 PFR_FLAG_USERIOCTL);
2077 break;
2078
2079 case DIOCRINADEFINE:
2080 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2081 error = ENODEV;
2082 break;
2083 }
2084 pfr_table_copyin_cleanup(&io64->pfrio_table);
2085 error = pfr_ina_define(&io64->pfrio_table, io64->pfrio_buffer,
2086 io64->pfrio_size, &io64->pfrio_nadd, &io64->pfrio_naddr,
2087 io64->pfrio_ticket, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2088 break;
2089
2090 default:
2091 VERIFY(0);
2092 /* NOTREACHED */
2093 }
2094 goto done;
2095
2096 struct32:
2097 /*
2098 * 32-bit structure processing
2099 */
2100 switch (cmd) {
2101 case DIOCRCLRTABLES:
2102 if (io32->pfrio_esize != 0) {
2103 error = ENODEV;
2104 break;
2105 }
2106 pfr_table_copyin_cleanup(&io32->pfrio_table);
2107 error = pfr_clr_tables(&io32->pfrio_table, &io32->pfrio_ndel,
2108 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2109 break;
2110
2111 case DIOCRADDTABLES:
2112 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2113 error = ENODEV;
2114 break;
2115 }
2116 error = pfr_add_tables(io32->pfrio_buffer, io32->pfrio_size,
2117 &io32->pfrio_nadd, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2118 break;
2119
2120 case DIOCRDELTABLES:
2121 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2122 error = ENODEV;
2123 break;
2124 }
2125 error = pfr_del_tables(io32->pfrio_buffer, io32->pfrio_size,
2126 &io32->pfrio_ndel, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2127 break;
2128
2129 case DIOCRGETTABLES:
2130 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2131 error = ENODEV;
2132 break;
2133 }
2134 pfr_table_copyin_cleanup(&io32->pfrio_table);
2135 error = pfr_get_tables(&io32->pfrio_table, io32->pfrio_buffer,
2136 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2137 break;
2138
2139 case DIOCRGETTSTATS:
2140 if (io32->pfrio_esize != sizeof(struct pfr_tstats)) {
2141 error = ENODEV;
2142 break;
2143 }
2144 pfr_table_copyin_cleanup(&io32->pfrio_table);
2145 error = pfr_get_tstats(&io32->pfrio_table, io32->pfrio_buffer,
2146 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2147 break;
2148
2149 case DIOCRCLRTSTATS:
2150 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2151 error = ENODEV;
2152 break;
2153 }
2154 error = pfr_clr_tstats(io32->pfrio_buffer, io32->pfrio_size,
2155 &io32->pfrio_nzero, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2156 break;
2157
2158 case DIOCRSETTFLAGS:
2159 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2160 error = ENODEV;
2161 break;
2162 }
2163 error = pfr_set_tflags(io32->pfrio_buffer, io32->pfrio_size,
2164 io32->pfrio_setflag, io32->pfrio_clrflag,
2165 &io32->pfrio_nchange, &io32->pfrio_ndel,
2166 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2167 break;
2168
2169 case DIOCRCLRADDRS:
2170 if (io32->pfrio_esize != 0) {
2171 error = ENODEV;
2172 break;
2173 }
2174 pfr_table_copyin_cleanup(&io32->pfrio_table);
2175 error = pfr_clr_addrs(&io32->pfrio_table, &io32->pfrio_ndel,
2176 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2177 break;
2178
2179 case DIOCRADDADDRS:
2180 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2181 error = ENODEV;
2182 break;
2183 }
2184 pfr_table_copyin_cleanup(&io32->pfrio_table);
2185 error = pfr_add_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2186 io32->pfrio_size, &io32->pfrio_nadd, io32->pfrio_flags |
2187 PFR_FLAG_USERIOCTL);
2188 break;
2189
2190 case DIOCRDELADDRS:
2191 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2192 error = ENODEV;
2193 break;
2194 }
2195 pfr_table_copyin_cleanup(&io32->pfrio_table);
2196 error = pfr_del_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2197 io32->pfrio_size, &io32->pfrio_ndel, io32->pfrio_flags |
2198 PFR_FLAG_USERIOCTL);
2199 break;
2200
2201 case DIOCRSETADDRS:
2202 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2203 error = ENODEV;
2204 break;
2205 }
2206 pfr_table_copyin_cleanup(&io32->pfrio_table);
2207 error = pfr_set_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2208 io32->pfrio_size, &io32->pfrio_size2, &io32->pfrio_nadd,
2209 &io32->pfrio_ndel, &io32->pfrio_nchange, io32->pfrio_flags |
2210 PFR_FLAG_USERIOCTL, 0);
2211 break;
2212
2213 case DIOCRGETADDRS:
2214 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2215 error = ENODEV;
2216 break;
2217 }
2218 pfr_table_copyin_cleanup(&io32->pfrio_table);
2219 error = pfr_get_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2220 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2221 break;
2222
2223 case DIOCRGETASTATS:
2224 if (io32->pfrio_esize != sizeof(struct pfr_astats)) {
2225 error = ENODEV;
2226 break;
2227 }
2228 pfr_table_copyin_cleanup(&io32->pfrio_table);
2229 error = pfr_get_astats(&io32->pfrio_table, io32->pfrio_buffer,
2230 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2231 break;
2232
2233 case DIOCRCLRASTATS:
2234 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2235 error = ENODEV;
2236 break;
2237 }
2238 pfr_table_copyin_cleanup(&io32->pfrio_table);
2239 error = pfr_clr_astats(&io32->pfrio_table, io32->pfrio_buffer,
2240 io32->pfrio_size, &io32->pfrio_nzero, io32->pfrio_flags |
2241 PFR_FLAG_USERIOCTL);
2242 break;
2243
2244 case DIOCRTSTADDRS:
2245 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2246 error = ENODEV;
2247 break;
2248 }
2249 pfr_table_copyin_cleanup(&io32->pfrio_table);
2250 error = pfr_tst_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2251 io32->pfrio_size, &io32->pfrio_nmatch, io32->pfrio_flags |
2252 PFR_FLAG_USERIOCTL);
2253 break;
2254
2255 case DIOCRINADEFINE:
2256 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2257 error = ENODEV;
2258 break;
2259 }
2260 pfr_table_copyin_cleanup(&io32->pfrio_table);
2261 error = pfr_ina_define(&io32->pfrio_table, io32->pfrio_buffer,
2262 io32->pfrio_size, &io32->pfrio_nadd, &io32->pfrio_naddr,
2263 io32->pfrio_ticket, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2264 break;
2265
2266 default:
2267 VERIFY(0);
2268 /* NOTREACHED */
2269 }
2270
2271 done:
2272 return error;
2273 }
2274
2275 static int
2276 pfioctl_ioc_tokens(u_long cmd, struct pfioc_tokens_32 *tok32,
2277 struct pfioc_tokens_64 *tok64, struct proc *p)
2278 {
2279 struct pfioc_token *tokens;
2280 struct pfioc_kernel_token *entry, *tmp;
2281 user_addr_t token_buf;
2282 int ocnt, cnt, error = 0, p64 = proc_is64bit(p);
2283 char *ptr;
2284
2285 switch (cmd) {
2286 case DIOCGETSTARTERS: {
2287 int size;
2288
2289 if (nr_tokens == 0) {
2290 error = ENOENT;
2291 break;
2292 }
2293
2294 size = sizeof(struct pfioc_token) * nr_tokens;
2295 ocnt = cnt = (p64 ? tok64->size : tok32->size);
2296 if (cnt == 0) {
2297 if (p64) {
2298 tok64->size = size;
2299 } else {
2300 tok32->size = size;
2301 }
2302 break;
2303 }
2304
2305 token_buf = (p64 ? tok64->pgt_buf : tok32->pgt_buf);
2306 tokens = _MALLOC(size, M_TEMP, M_WAITOK | M_ZERO);
2307 if (tokens == NULL) {
2308 error = ENOMEM;
2309 break;
2310 }
2311
2312 ptr = (void *)tokens;
2313 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
2314 struct pfioc_token *t;
2315
2316 if ((unsigned)cnt < sizeof(*tokens)) {
2317 break; /* no more buffer space left */
2318 }
2319 t = (struct pfioc_token *)(void *)ptr;
2320 t->token_value = entry->token.token_value;
2321 t->timestamp = entry->token.timestamp;
2322 t->pid = entry->token.pid;
2323 bcopy(entry->token.proc_name, t->proc_name,
2324 PFTOK_PROCNAME_LEN);
2325 ptr += sizeof(struct pfioc_token);
2326
2327 cnt -= sizeof(struct pfioc_token);
2328 }
2329
2330 if (cnt < ocnt) {
2331 error = copyout(tokens, token_buf, ocnt - cnt);
2332 }
2333
2334 if (p64) {
2335 tok64->size = ocnt - cnt;
2336 } else {
2337 tok32->size = ocnt - cnt;
2338 }
2339
2340 _FREE(tokens, M_TEMP);
2341 break;
2342 }
2343
2344 default:
2345 VERIFY(0);
2346 /* NOTREACHED */
2347 }
2348
2349 return error;
2350 }
2351
2352 static void
2353 pf_expire_states_and_src_nodes(struct pf_rule *rule)
2354 {
2355 struct pf_state *state;
2356 struct pf_src_node *sn;
2357 int killed = 0;
2358
2359 /* expire the states */
2360 state = TAILQ_FIRST(&state_list);
2361 while (state) {
2362 if (state->rule.ptr == rule) {
2363 state->timeout = PFTM_PURGE;
2364 }
2365 state = TAILQ_NEXT(state, entry_list);
2366 }
2367 pf_purge_expired_states(pf_status.states);
2368
2369 /* expire the src_nodes */
2370 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2371 if (sn->rule.ptr != rule) {
2372 continue;
2373 }
2374 if (sn->states != 0) {
2375 RB_FOREACH(state, pf_state_tree_id,
2376 &tree_id) {
2377 if (state->src_node == sn) {
2378 state->src_node = NULL;
2379 }
2380 if (state->nat_src_node == sn) {
2381 state->nat_src_node = NULL;
2382 }
2383 }
2384 sn->states = 0;
2385 }
2386 sn->expire = 1;
2387 killed++;
2388 }
2389 if (killed) {
2390 pf_purge_expired_src_nodes();
2391 }
2392 }
2393
2394 static void
2395 pf_delete_rule_from_ruleset(struct pf_ruleset *ruleset, int rs_num,
2396 struct pf_rule *rule)
2397 {
2398 struct pf_rule *r;
2399 int nr = 0;
2400
2401 pf_expire_states_and_src_nodes(rule);
2402
2403 pf_rm_rule(ruleset->rules[rs_num].active.ptr, rule);
2404 if (ruleset->rules[rs_num].active.rcount-- == 0) {
2405 panic("%s: rcount value broken!", __func__);
2406 }
2407 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2408
2409 while (r) {
2410 r->nr = nr++;
2411 r = TAILQ_NEXT(r, entries);
2412 }
2413 }
2414
2415
2416 static void
2417 pf_ruleset_cleanup(struct pf_ruleset *ruleset, int rs)
2418 {
2419 pf_calc_skip_steps(ruleset->rules[rs].active.ptr);
2420 ruleset->rules[rs].active.ticket =
2421 ++ruleset->rules[rs].inactive.ticket;
2422 }
2423
2424 /*
2425 * req_dev encodes the PF interface. Currently, possible values are
2426 * 0 or PFRULE_PFM
2427 */
2428 static int
2429 pf_delete_rule_by_ticket(struct pfioc_rule *pr, u_int32_t req_dev)
2430 {
2431 struct pf_ruleset *ruleset;
2432 struct pf_rule *rule = NULL;
2433 int is_anchor;
2434 int error;
2435 int i;
2436
2437 is_anchor = (pr->anchor_call[0] != '\0');
2438 if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
2439 pr->rule.owner, is_anchor, &error)) == NULL) {
2440 return error;
2441 }
2442
2443 for (i = 0; i < PF_RULESET_MAX && rule == NULL; i++) {
2444 rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2445 while (rule && (rule->ticket != pr->rule.ticket)) {
2446 rule = TAILQ_NEXT(rule, entries);
2447 }
2448 }
2449 if (rule == NULL) {
2450 return ENOENT;
2451 } else {
2452 i--;
2453 }
2454
2455 if (strcmp(rule->owner, pr->rule.owner)) {
2456 return EACCES;
2457 }
2458
2459 delete_rule:
2460 if (rule->anchor && (ruleset != &pf_main_ruleset) &&
2461 ((strcmp(ruleset->anchor->owner, "")) == 0) &&
2462 ((ruleset->rules[i].active.rcount - 1) == 0)) {
2463 /* set rule & ruleset to parent and repeat */
2464 struct pf_rule *delete_rule = rule;
2465 struct pf_ruleset *delete_ruleset = ruleset;
2466
2467 #define parent_ruleset ruleset->anchor->parent->ruleset
2468 if (ruleset->anchor->parent == NULL) {
2469 ruleset = &pf_main_ruleset;
2470 } else {
2471 ruleset = &parent_ruleset;
2472 }
2473
2474 rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2475 while (rule &&
2476 (rule->anchor != delete_ruleset->anchor)) {
2477 rule = TAILQ_NEXT(rule, entries);
2478 }
2479 if (rule == NULL) {
2480 panic("%s: rule not found!", __func__);
2481 }
2482
2483 /*
2484 * if reqest device != rule's device, bail :
2485 * with error if ticket matches;
2486 * without error if ticket doesn't match (i.e. its just cleanup)
2487 */
2488 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2489 if (rule->ticket != pr->rule.ticket) {
2490 return 0;
2491 } else {
2492 return EACCES;
2493 }
2494 }
2495
2496 if (delete_rule->rule_flag & PFRULE_PFM) {
2497 pffwrules--;
2498 }
2499
2500 pf_delete_rule_from_ruleset(delete_ruleset,
2501 i, delete_rule);
2502 delete_ruleset->rules[i].active.ticket =
2503 ++delete_ruleset->rules[i].inactive.ticket;
2504 goto delete_rule;
2505 } else {
2506 /*
2507 * process deleting rule only if device that added the
2508 * rule matches device that issued the request
2509 */
2510 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2511 return EACCES;
2512 }
2513 if (rule->rule_flag & PFRULE_PFM) {
2514 pffwrules--;
2515 }
2516 pf_delete_rule_from_ruleset(ruleset, i,
2517 rule);
2518 pf_ruleset_cleanup(ruleset, i);
2519 }
2520
2521 return 0;
2522 }
2523
2524 /*
2525 * req_dev encodes the PF interface. Currently, possible values are
2526 * 0 or PFRULE_PFM
2527 */
2528 static void
2529 pf_delete_rule_by_owner(char *owner, u_int32_t req_dev)
2530 {
2531 struct pf_ruleset *ruleset;
2532 struct pf_rule *rule, *next;
2533 int deleted = 0;
2534
2535 for (int rs = 0; rs < PF_RULESET_MAX; rs++) {
2536 rule = TAILQ_FIRST(pf_main_ruleset.rules[rs].active.ptr);
2537 ruleset = &pf_main_ruleset;
2538 while (rule) {
2539 next = TAILQ_NEXT(rule, entries);
2540 /*
2541 * process deleting rule only if device that added the
2542 * rule matches device that issued the request
2543 */
2544 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2545 rule = next;
2546 continue;
2547 }
2548 if (rule->anchor) {
2549 if (((strcmp(rule->owner, owner)) == 0) ||
2550 ((strcmp(rule->owner, "")) == 0)) {
2551 if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2552 if (deleted) {
2553 pf_ruleset_cleanup(ruleset, rs);
2554 deleted = 0;
2555 }
2556 /* step into anchor */
2557 ruleset =
2558 &rule->anchor->ruleset;
2559 rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2560 continue;
2561 } else {
2562 if (rule->rule_flag &
2563 PFRULE_PFM) {
2564 pffwrules--;
2565 }
2566 pf_delete_rule_from_ruleset(ruleset, rs, rule);
2567 deleted = 1;
2568 rule = next;
2569 }
2570 } else {
2571 rule = next;
2572 }
2573 } else {
2574 if (((strcmp(rule->owner, owner)) == 0)) {
2575 /* delete rule */
2576 if (rule->rule_flag & PFRULE_PFM) {
2577 pffwrules--;
2578 }
2579 pf_delete_rule_from_ruleset(ruleset,
2580 rs, rule);
2581 deleted = 1;
2582 }
2583 rule = next;
2584 }
2585 if (rule == NULL) {
2586 if (deleted) {
2587 pf_ruleset_cleanup(ruleset, rs);
2588 deleted = 0;
2589 }
2590 if (ruleset != &pf_main_ruleset) {
2591 pf_deleterule_anchor_step_out(&ruleset,
2592 rs, &rule);
2593 }
2594 }
2595 }
2596 }
2597 }
2598
2599 static void
2600 pf_deleterule_anchor_step_out(struct pf_ruleset **ruleset_ptr,
2601 int rs, struct pf_rule **rule_ptr)
2602 {
2603 struct pf_ruleset *ruleset = *ruleset_ptr;
2604 struct pf_rule *rule = *rule_ptr;
2605
2606 /* step out of anchor */
2607 struct pf_ruleset *rs_copy = ruleset;
2608 ruleset = ruleset->anchor->parent?
2609 &ruleset->anchor->parent->ruleset:&pf_main_ruleset;
2610
2611 rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2612 while (rule && (rule->anchor != rs_copy->anchor)) {
2613 rule = TAILQ_NEXT(rule, entries);
2614 }
2615 if (rule == NULL) {
2616 panic("%s: parent rule of anchor not found!", __func__);
2617 }
2618 if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2619 rule = TAILQ_NEXT(rule, entries);
2620 }
2621
2622 *ruleset_ptr = ruleset;
2623 *rule_ptr = rule;
2624 }
2625
2626 static void
2627 pf_addrwrap_setup(struct pf_addr_wrap *aw)
2628 {
2629 VERIFY(aw);
2630 bzero(&aw->p, sizeof aw->p);
2631 }
2632
2633 static int
2634 pf_rule_setup(struct pfioc_rule *pr, struct pf_rule *rule,
2635 struct pf_ruleset *ruleset)
2636 {
2637 struct pf_pooladdr *apa;
2638 int error = 0;
2639
2640 if (rule->ifname[0]) {
2641 rule->kif = pfi_kif_get(rule->ifname);
2642 if (rule->kif == NULL) {
2643 pool_put(&pf_rule_pl, rule);
2644 return EINVAL;
2645 }
2646 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
2647 }
2648 if (rule->tagname[0]) {
2649 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) {
2650 error = EBUSY;
2651 }
2652 }
2653 if (rule->match_tagname[0]) {
2654 if ((rule->match_tag =
2655 pf_tagname2tag(rule->match_tagname)) == 0) {
2656 error = EBUSY;
2657 }
2658 }
2659 if (rule->rt && !rule->direction) {
2660 error = EINVAL;
2661 }
2662 #if PFLOG
2663 if (!rule->log) {
2664 rule->logif = 0;
2665 }
2666 if (rule->logif >= PFLOGIFS_MAX) {
2667 error = EINVAL;
2668 }
2669 #endif /* PFLOG */
2670 pf_addrwrap_setup(&rule->src.addr);
2671 pf_addrwrap_setup(&rule->dst.addr);
2672 if (pf_rtlabel_add(&rule->src.addr) ||
2673 pf_rtlabel_add(&rule->dst.addr)) {
2674 error = EBUSY;
2675 }
2676 if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) {
2677 error = EINVAL;
2678 }
2679 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) {
2680 error = EINVAL;
2681 }
2682 if (pf_tbladdr_setup(ruleset, &rule->src.addr)) {
2683 error = EINVAL;
2684 }
2685 if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) {
2686 error = EINVAL;
2687 }
2688 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) {
2689 error = EINVAL;
2690 }
2691 TAILQ_FOREACH(apa, &pf_pabuf, entries)
2692 if (pf_tbladdr_setup(ruleset, &apa->addr)) {
2693 error = EINVAL;
2694 }
2695
2696 if (rule->overload_tblname[0]) {
2697 if ((rule->overload_tbl = pfr_attach_table(ruleset,
2698 rule->overload_tblname)) == NULL) {
2699 error = EINVAL;
2700 } else {
2701 rule->overload_tbl->pfrkt_flags |=
2702 PFR_TFLAG_ACTIVE;
2703 }
2704 }
2705
2706 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
2707
2708 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2709 (rule->action == PF_BINAT) || (rule->action == PF_NAT64)) &&
2710 rule->anchor == NULL) ||
2711 (rule->rt > PF_FASTROUTE)) &&
2712 (TAILQ_FIRST(&rule->rpool.list) == NULL)) {
2713 error = EINVAL;
2714 }
2715
2716 if (error) {
2717 pf_rm_rule(NULL, rule);
2718 return error;
2719 }
2720 /* For a NAT64 rule the rule's address family is AF_INET6 whereas
2721 * the address pool's family will be AF_INET
2722 */
2723 rule->rpool.af = (rule->action == PF_NAT64) ? AF_INET: rule->af;
2724 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2725 rule->evaluations = rule->packets[0] = rule->packets[1] =
2726 rule->bytes[0] = rule->bytes[1] = 0;
2727
2728 return 0;
2729 }
2730
2731 static int
2732 pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p)
2733 {
2734 int error = 0;
2735 u_int32_t req_dev = 0;
2736
2737 switch (cmd) {
2738 case DIOCADDRULE: {
2739 struct pf_ruleset *ruleset;
2740 struct pf_rule *rule, *tail;
2741 int rs_num;
2742
2743 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2744 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2745 ruleset = pf_find_ruleset(pr->anchor);
2746 if (ruleset == NULL) {
2747 error = EINVAL;
2748 break;
2749 }
2750 rs_num = pf_get_ruleset_number(pr->rule.action);
2751 if (rs_num >= PF_RULESET_MAX) {
2752 error = EINVAL;
2753 break;
2754 }
2755 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
2756 error = EINVAL;
2757 break;
2758 }
2759 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
2760 error = EBUSY;
2761 break;
2762 }
2763 if (pr->pool_ticket != ticket_pabuf) {
2764 error = EBUSY;
2765 break;
2766 }
2767 rule = pool_get(&pf_rule_pl, PR_WAITOK);
2768 if (rule == NULL) {
2769 error = ENOMEM;
2770 break;
2771 }
2772 pf_rule_copyin(&pr->rule, rule, p, minordev);
2773 #if !INET
2774 if (rule->af == AF_INET) {
2775 pool_put(&pf_rule_pl, rule);
2776 error = EAFNOSUPPORT;
2777 break;
2778 }
2779 #endif /* INET */
2780 #if !INET6
2781 if (rule->af == AF_INET6) {
2782 pool_put(&pf_rule_pl, rule);
2783 error = EAFNOSUPPORT;
2784 break;
2785 }
2786 #endif /* INET6 */
2787 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2788 pf_rulequeue);
2789 if (tail) {
2790 rule->nr = tail->nr + 1;
2791 } else {
2792 rule->nr = 0;
2793 }
2794
2795 if ((error = pf_rule_setup(pr, rule, ruleset))) {
2796 break;
2797 }
2798
2799 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2800 rule, entries);
2801 ruleset->rules[rs_num].inactive.rcount++;
2802 if (rule->rule_flag & PFRULE_PFM) {
2803 pffwrules++;
2804 }
2805
2806 if (rule->action == PF_NAT64) {
2807 atomic_add_16(&pf_nat64_configured, 1);
2808 }
2809
2810 if (pr->anchor_call[0] == '\0') {
2811 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
2812 if (rule->rule_flag & PFRULE_PFM) {
2813 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
2814 }
2815 }
2816
2817 #if DUMMYNET
2818 if (rule->action == PF_DUMMYNET) {
2819 struct dummynet_event dn_event;
2820 uint32_t direction = DN_INOUT;;
2821 bzero(&dn_event, sizeof(dn_event));
2822
2823 dn_event.dn_event_code = DUMMYNET_RULE_CONFIG;
2824
2825 if (rule->direction == PF_IN) {
2826 direction = DN_IN;
2827 } else if (rule->direction == PF_OUT) {
2828 direction = DN_OUT;
2829 }
2830
2831 dn_event.dn_event_rule_config.dir = direction;
2832 dn_event.dn_event_rule_config.af = rule->af;
2833 dn_event.dn_event_rule_config.proto = rule->proto;
2834 dn_event.dn_event_rule_config.src_port = rule->src.xport.range.port[0];
2835 dn_event.dn_event_rule_config.dst_port = rule->dst.xport.range.port[0];
2836 strlcpy(dn_event.dn_event_rule_config.ifname, rule->ifname,
2837 sizeof(dn_event.dn_event_rule_config.ifname));
2838
2839 dummynet_event_enqueue_nwk_wq_entry(&dn_event);
2840 }
2841 #endif
2842 break;
2843 }
2844
2845 case DIOCGETRULES: {
2846 struct pf_ruleset *ruleset;
2847 struct pf_rule *tail;
2848 int rs_num;
2849
2850 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2851 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2852 ruleset = pf_find_ruleset(pr->anchor);
2853 if (ruleset == NULL) {
2854 error = EINVAL;
2855 break;
2856 }
2857 rs_num = pf_get_ruleset_number(pr->rule.action);
2858 if (rs_num >= PF_RULESET_MAX) {
2859 error = EINVAL;
2860 break;
2861 }
2862 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2863 pf_rulequeue);
2864 if (tail) {
2865 pr->nr = tail->nr + 1;
2866 } else {
2867 pr->nr = 0;
2868 }
2869 pr->ticket = ruleset->rules[rs_num].active.ticket;
2870 break;
2871 }
2872
2873 case DIOCGETRULE: {
2874 struct pf_ruleset *ruleset;
2875 struct pf_rule *rule;
2876 int rs_num, i;
2877
2878 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2879 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2880 ruleset = pf_find_ruleset(pr->anchor);
2881 if (ruleset == NULL) {
2882 error = EINVAL;
2883 break;
2884 }
2885 rs_num = pf_get_ruleset_number(pr->rule.action);
2886 if (rs_num >= PF_RULESET_MAX) {
2887 error = EINVAL;
2888 break;
2889 }
2890 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
2891 error = EBUSY;
2892 break;
2893 }
2894 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2895 while ((rule != NULL) && (rule->nr != pr->nr)) {
2896 rule = TAILQ_NEXT(rule, entries);
2897 }
2898 if (rule == NULL) {
2899 error = EBUSY;
2900 break;
2901 }
2902 pf_rule_copyout(rule, &pr->rule);
2903 if (pf_anchor_copyout(ruleset, rule, pr)) {
2904 error = EBUSY;
2905 break;
2906 }
2907 pfi_dynaddr_copyout(&pr->rule.src.addr);
2908 pfi_dynaddr_copyout(&pr->rule.dst.addr);
2909 pf_tbladdr_copyout(&pr->rule.src.addr);
2910 pf_tbladdr_copyout(&pr->rule.dst.addr);
2911 pf_rtlabel_copyout(&pr->rule.src.addr);
2912 pf_rtlabel_copyout(&pr->rule.dst.addr);
2913 for (i = 0; i < PF_SKIP_COUNT; ++i) {
2914 if (rule->skip[i].ptr == NULL) {
2915 pr->rule.skip[i].nr = -1;
2916 } else {
2917 pr->rule.skip[i].nr =
2918 rule->skip[i].ptr->nr;
2919 }
2920 }
2921
2922 if (pr->action == PF_GET_CLR_CNTR) {
2923 rule->evaluations = 0;
2924 rule->packets[0] = rule->packets[1] = 0;
2925 rule->bytes[0] = rule->bytes[1] = 0;
2926 }
2927 break;
2928 }
2929
2930 case DIOCCHANGERULE: {
2931 struct pfioc_rule *pcr = pr;
2932 struct pf_ruleset *ruleset;
2933 struct pf_rule *oldrule = NULL, *newrule = NULL;
2934 struct pf_pooladdr *pa;
2935 u_int32_t nr = 0;
2936 int rs_num;
2937
2938 if (!(pcr->action == PF_CHANGE_REMOVE ||
2939 pcr->action == PF_CHANGE_GET_TICKET) &&
2940 pcr->pool_ticket != ticket_pabuf) {
2941 error = EBUSY;
2942 break;
2943 }
2944
2945 if (pcr->action < PF_CHANGE_ADD_HEAD ||
2946 pcr->action > PF_CHANGE_GET_TICKET) {
2947 error = EINVAL;
2948 break;
2949 }
2950 pcr->anchor[sizeof(pcr->anchor) - 1] = '\0';
2951 pcr->anchor_call[sizeof(pcr->anchor_call) - 1] = '\0';
2952 ruleset = pf_find_ruleset(pcr->anchor);
2953 if (ruleset == NULL) {
2954 error = EINVAL;
2955 break;
2956 }
2957 rs_num = pf_get_ruleset_number(pcr->rule.action);
2958 if (rs_num >= PF_RULESET_MAX) {
2959 error = EINVAL;
2960 break;
2961 }
2962
2963 if (pcr->action == PF_CHANGE_GET_TICKET) {
2964 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
2965 break;
2966 } else {
2967 if (pcr->ticket !=
2968 ruleset->rules[rs_num].active.ticket) {
2969 error = EINVAL;
2970 break;
2971 }
2972 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
2973 error = EINVAL;
2974 break;
2975 }
2976 }
2977
2978 if (pcr->action != PF_CHANGE_REMOVE) {
2979 newrule = pool_get(&pf_rule_pl, PR_WAITOK);
2980 if (newrule == NULL) {
2981 error = ENOMEM;
2982 break;
2983 }
2984 pf_rule_copyin(&pcr->rule, newrule, p, minordev);
2985 #if !INET
2986 if (newrule->af == AF_INET) {
2987 pool_put(&pf_rule_pl, newrule);
2988 error = EAFNOSUPPORT;
2989 break;
2990 }
2991 #endif /* INET */
2992 #if !INET6
2993 if (newrule->af == AF_INET6) {
2994 pool_put(&pf_rule_pl, newrule);
2995 error = EAFNOSUPPORT;
2996 break;
2997 }
2998 #endif /* INET6 */
2999 if (newrule->ifname[0]) {
3000 newrule->kif = pfi_kif_get(newrule->ifname);
3001 if (newrule->kif == NULL) {
3002 pool_put(&pf_rule_pl, newrule);
3003 error = EINVAL;
3004 break;
3005 }
3006 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
3007 } else {
3008 newrule->kif = NULL;
3009 }
3010
3011 if (newrule->tagname[0]) {
3012 if ((newrule->tag =
3013 pf_tagname2tag(newrule->tagname)) == 0) {
3014 error = EBUSY;
3015 }
3016 }
3017 if (newrule->match_tagname[0]) {
3018 if ((newrule->match_tag = pf_tagname2tag(
3019 newrule->match_tagname)) == 0) {
3020 error = EBUSY;
3021 }
3022 }
3023 if (newrule->rt && !newrule->direction) {
3024 error = EINVAL;
3025 }
3026 #if PFLOG
3027 if (!newrule->log) {
3028 newrule->logif = 0;
3029 }
3030 if (newrule->logif >= PFLOGIFS_MAX) {
3031 error = EINVAL;
3032 }
3033 #endif /* PFLOG */
3034 pf_addrwrap_setup(&newrule->src.addr);
3035 pf_addrwrap_setup(&newrule->dst.addr);
3036 if (pf_rtlabel_add(&newrule->src.addr) ||
3037 pf_rtlabel_add(&newrule->dst.addr)) {
3038 error = EBUSY;
3039 }
3040 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) {
3041 error = EINVAL;
3042 }
3043 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) {
3044 error = EINVAL;
3045 }
3046 if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) {
3047 error = EINVAL;
3048 }
3049 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) {
3050 error = EINVAL;
3051 }
3052 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) {
3053 error = EINVAL;
3054 }
3055 TAILQ_FOREACH(pa, &pf_pabuf, entries)
3056 if (pf_tbladdr_setup(ruleset, &pa->addr)) {
3057 error = EINVAL;
3058 }
3059
3060 if (newrule->overload_tblname[0]) {
3061 if ((newrule->overload_tbl = pfr_attach_table(
3062 ruleset, newrule->overload_tblname)) ==
3063 NULL) {
3064 error = EINVAL;
3065 } else {
3066 newrule->overload_tbl->pfrkt_flags |=
3067 PFR_TFLAG_ACTIVE;
3068 }
3069 }
3070
3071 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
3072 if (((((newrule->action == PF_NAT) ||
3073 (newrule->action == PF_RDR) ||
3074 (newrule->action == PF_BINAT) ||
3075 (newrule->rt > PF_FASTROUTE)) &&
3076 !newrule->anchor)) &&
3077 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) {
3078 error = EINVAL;
3079 }
3080
3081 if (error) {
3082 pf_rm_rule(NULL, newrule);
3083 break;
3084 }
3085 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3086 newrule->evaluations = 0;
3087 newrule->packets[0] = newrule->packets[1] = 0;
3088 newrule->bytes[0] = newrule->bytes[1] = 0;
3089 }
3090 pf_empty_pool(&pf_pabuf);
3091
3092 if (pcr->action == PF_CHANGE_ADD_HEAD) {
3093 oldrule = TAILQ_FIRST(
3094 ruleset->rules[rs_num].active.ptr);
3095 } else if (pcr->action == PF_CHANGE_ADD_TAIL) {
3096 oldrule = TAILQ_LAST(
3097 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
3098 } else {
3099 oldrule = TAILQ_FIRST(
3100 ruleset->rules[rs_num].active.ptr);
3101 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) {
3102 oldrule = TAILQ_NEXT(oldrule, entries);
3103 }
3104 if (oldrule == NULL) {
3105 if (newrule != NULL) {
3106 pf_rm_rule(NULL, newrule);
3107 }
3108 error = EINVAL;
3109 break;
3110 }
3111 }
3112
3113 if (pcr->action == PF_CHANGE_REMOVE) {
3114 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
3115 ruleset->rules[rs_num].active.rcount--;
3116 } else {
3117 if (oldrule == NULL) {
3118 TAILQ_INSERT_TAIL(
3119 ruleset->rules[rs_num].active.ptr,
3120 newrule, entries);
3121 } else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3122 pcr->action == PF_CHANGE_ADD_BEFORE) {
3123 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3124 } else {
3125 TAILQ_INSERT_AFTER(
3126 ruleset->rules[rs_num].active.ptr,
3127 oldrule, newrule, entries);
3128 }
3129 ruleset->rules[rs_num].active.rcount++;
3130 }
3131
3132 nr = 0;
3133 TAILQ_FOREACH(oldrule,
3134 ruleset->rules[rs_num].active.ptr, entries)
3135 oldrule->nr = nr++;
3136
3137 ruleset->rules[rs_num].active.ticket++;
3138
3139 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3140 pf_remove_if_empty_ruleset(ruleset);
3141
3142 break;
3143 }
3144
3145 case DIOCINSERTRULE: {
3146 struct pf_ruleset *ruleset;
3147 struct pf_rule *rule, *tail, *r;
3148 int rs_num;
3149 int is_anchor;
3150
3151 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3152 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
3153 is_anchor = (pr->anchor_call[0] != '\0');
3154
3155 if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
3156 pr->rule.owner, is_anchor, &error)) == NULL) {
3157 break;
3158 }
3159
3160 rs_num = pf_get_ruleset_number(pr->rule.action);
3161 if (rs_num >= PF_RULESET_MAX) {
3162 error = EINVAL;
3163 break;
3164 }
3165 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3166 error = EINVAL;
3167 break;
3168 }
3169
3170 /* make sure this anchor rule doesn't exist already */
3171 if (is_anchor) {
3172 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3173 while (r) {
3174 if (r->anchor &&
3175 ((strcmp(r->anchor->name,
3176 pr->anchor_call)) == 0)) {
3177 if (((strcmp(pr->rule.owner,
3178 r->owner)) == 0) ||
3179 ((strcmp(r->owner, "")) == 0)) {
3180 error = EEXIST;
3181 } else {
3182 error = EPERM;
3183 }
3184 break;
3185 }
3186 r = TAILQ_NEXT(r, entries);
3187 }
3188 if (error != 0) {
3189 return error;
3190 }
3191 }
3192
3193 rule = pool_get(&pf_rule_pl, PR_WAITOK);
3194 if (rule == NULL) {
3195 error = ENOMEM;
3196 break;
3197 }
3198 pf_rule_copyin(&pr->rule, rule, p, minordev);
3199 #if !INET
3200 if (rule->af == AF_INET) {
3201 pool_put(&pf_rule_pl, rule);
3202 error = EAFNOSUPPORT;
3203 break;
3204 }
3205 #endif /* INET */
3206 #if !INET6
3207 if (rule->af == AF_INET6) {
3208 pool_put(&pf_rule_pl, rule);
3209 error = EAFNOSUPPORT;
3210 break;
3211 }
3212
3213 #endif /* INET6 */
3214 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3215 while ((r != NULL) && (rule->priority >= (unsigned)r->priority)) {
3216 r = TAILQ_NEXT(r, entries);
3217 }
3218 if (r == NULL) {
3219 if ((tail =
3220 TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3221 pf_rulequeue)) != NULL) {
3222 rule->nr = tail->nr + 1;
3223 } else {
3224 rule->nr = 0;
3225 }
3226 } else {
3227 rule->nr = r->nr;
3228 }
3229
3230 if ((error = pf_rule_setup(pr, rule, ruleset))) {
3231 break;
3232 }
3233
3234 if (rule->anchor != NULL) {
3235 strlcpy(rule->anchor->owner, rule->owner,
3236 PF_OWNER_NAME_SIZE);
3237 }
3238
3239 if (r) {
3240 TAILQ_INSERT_BEFORE(r, rule, entries);
3241 while (r && ++r->nr) {
3242 r = TAILQ_NEXT(r, entries);
3243 }
3244 } else {
3245 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].active.ptr,
3246 rule, entries);
3247 }
3248 ruleset->rules[rs_num].active.rcount++;
3249
3250 /* Calculate checksum for the main ruleset */
3251 if (ruleset == &pf_main_ruleset) {
3252 error = pf_setup_pfsync_matching(ruleset);
3253 }
3254
3255 pf_ruleset_cleanup(ruleset, rs_num);
3256 rule->ticket = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)rule);
3257
3258 pr->rule.ticket = rule->ticket;
3259 pf_rule_copyout(rule, &pr->rule);
3260 if (rule->rule_flag & PFRULE_PFM) {
3261 pffwrules++;
3262 }
3263 if (rule->action == PF_NAT64) {
3264 atomic_add_16(&pf_nat64_configured, 1);
3265 }
3266
3267 if (pr->anchor_call[0] == '\0') {
3268 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
3269 if (rule->rule_flag & PFRULE_PFM) {
3270 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
3271 }
3272 }
3273 break;
3274 }
3275
3276 case DIOCDELETERULE: {
3277 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3278 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
3279
3280 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3281 error = EINVAL;
3282 break;
3283 }
3284
3285 /* get device through which request is made */
3286 if ((uint8_t)minordev == PFDEV_PFM) {
3287 req_dev |= PFRULE_PFM;
3288 }
3289
3290 if (pr->rule.ticket) {
3291 if ((error = pf_delete_rule_by_ticket(pr, req_dev))) {
3292 break;
3293 }
3294 } else {
3295 pf_delete_rule_by_owner(pr->rule.owner, req_dev);
3296 }
3297 pr->nr = pffwrules;
3298 if (pr->rule.action == PF_NAT64) {
3299 atomic_add_16(&pf_nat64_configured, -1);
3300 }
3301 break;
3302 }
3303
3304 default:
3305 VERIFY(0);
3306 /* NOTREACHED */
3307 }
3308
3309 return error;
3310 }
3311
3312 static int
3313 pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p)
3314 {
3315 #pragma unused(p)
3316 int error = 0;
3317
3318 psk->psk_ifname[sizeof(psk->psk_ifname) - 1] = '\0';
3319 psk->psk_ownername[sizeof(psk->psk_ownername) - 1] = '\0';
3320
3321 bool ifname_matched = true;
3322 bool owner_matched = true;
3323
3324 switch (cmd) {
3325 case DIOCCLRSTATES: {
3326 struct pf_state *s, *nexts;
3327 int killed = 0;
3328
3329 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
3330 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3331 /*
3332 * Purge all states only when neither ifname
3333 * or owner is provided. If any of these are provided
3334 * we purge only the states with meta data that match
3335 */
3336 bool unlink_state = false;
3337 ifname_matched = true;
3338 owner_matched = true;
3339
3340 if (psk->psk_ifname[0] &&
3341 strcmp(psk->psk_ifname, s->kif->pfik_name)) {
3342 ifname_matched = false;
3343 }
3344
3345 if (psk->psk_ownername[0] &&
3346 ((NULL == s->rule.ptr) ||
3347 strcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3348 owner_matched = false;
3349 }
3350
3351 unlink_state = ifname_matched && owner_matched;
3352
3353 if (unlink_state) {
3354 #if NPFSYNC
3355 /* don't send out individual delete messages */
3356 s->sync_flags = PFSTATE_NOSYNC;
3357 #endif
3358 pf_unlink_state(s);
3359 killed++;
3360 }
3361 }
3362 psk->psk_af = killed;
3363 #if NPFSYNC
3364 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
3365 #endif
3366 break;
3367 }
3368
3369 case DIOCKILLSTATES: {
3370 struct pf_state *s, *nexts;
3371 struct pf_state_key *sk;
3372 struct pf_state_host *src, *dst;
3373 int killed = 0;
3374
3375 for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
3376 s = nexts) {
3377 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3378 sk = s->state_key;
3379 ifname_matched = true;
3380 owner_matched = true;
3381
3382 if (psk->psk_ifname[0] &&
3383 strcmp(psk->psk_ifname, s->kif->pfik_name)) {
3384 ifname_matched = false;
3385 }
3386
3387 if (psk->psk_ownername[0] &&
3388 ((NULL == s->rule.ptr) ||
3389 strcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3390 owner_matched = false;
3391 }
3392
3393 if (sk->direction == PF_OUT) {
3394 src = &sk->lan;
3395 dst = &sk->ext_lan;
3396 } else {
3397 src = &sk->ext_lan;
3398 dst = &sk->lan;
3399 }
3400 if ((!psk->psk_af || sk->af_lan == psk->psk_af) &&
3401 (!psk->psk_proto || psk->psk_proto == sk->proto) &&
3402 PF_MATCHA(psk->psk_src.neg,
3403 &psk->psk_src.addr.v.a.addr,
3404 &psk->psk_src.addr.v.a.mask,
3405 &src->addr, sk->af_lan) &&
3406 PF_MATCHA(psk->psk_dst.neg,
3407 &psk->psk_dst.addr.v.a.addr,
3408 &psk->psk_dst.addr.v.a.mask,
3409 &dst->addr, sk->af_lan) &&
3410 (pf_match_xport(psk->psk_proto,
3411 psk->psk_proto_variant, &psk->psk_src.xport,
3412 &src->xport)) &&
3413 (pf_match_xport(psk->psk_proto,
3414 psk->psk_proto_variant, &psk->psk_dst.xport,
3415 &dst->xport)) &&
3416 ifname_matched &&
3417 owner_matched) {
3418 #if NPFSYNC
3419 /* send immediate delete of state */
3420 pfsync_delete_state(s);
3421 s->sync_flags |= PFSTATE_NOSYNC;
3422 #endif
3423 pf_unlink_state(s);
3424 killed++;
3425 }
3426 }
3427 psk->psk_af = killed;
3428 break;
3429 }
3430
3431 default:
3432 VERIFY(0);
3433 /* NOTREACHED */
3434 }
3435
3436 return error;
3437 }
3438
3439 static int
3440 pfioctl_ioc_state(u_long cmd, struct pfioc_state *ps, struct proc *p)
3441 {
3442 #pragma unused(p)
3443 int error = 0;
3444
3445 switch (cmd) {
3446 case DIOCADDSTATE: {
3447 struct pfsync_state *sp = &ps->state;
3448 struct pf_state *s;
3449 struct pf_state_key *sk;
3450 struct pfi_kif *kif;
3451
3452 if (sp->timeout >= PFTM_MAX) {
3453 error = EINVAL;
3454 break;
3455 }
3456 s = pool_get(&pf_state_pl, PR_WAITOK);
3457 if (s == NULL) {
3458 error = ENOMEM;
3459 break;
3460 }
3461 bzero(s, sizeof(struct pf_state));
3462 if ((sk = pf_alloc_state_key(s, NULL)) == NULL) {
3463 pool_put(&pf_state_pl, s);
3464 error = ENOMEM;
3465 break;
3466 }
3467 pf_state_import(sp, sk, s);
3468 kif = pfi_kif_get(sp->ifname);
3469 if (kif == NULL) {
3470 pool_put(&pf_state_pl, s);
3471 pool_put(&pf_state_key_pl, sk);
3472 error = ENOENT;
3473 break;
3474 }
3475 TAILQ_INIT(&s->unlink_hooks);
3476 s->state_key->app_state = 0;
3477 if (pf_insert_state(kif, s)) {
3478 pfi_kif_unref(kif, PFI_KIF_REF_NONE);
3479 pool_put(&pf_state_pl, s);
3480 error = EEXIST;
3481 break;
3482 }
3483 pf_default_rule.states++;
3484 VERIFY(pf_default_rule.states != 0);
3485 break;
3486 }
3487
3488 case DIOCGETSTATE: {
3489 struct pf_state *s;
3490 struct pf_state_cmp id_key;
3491
3492 bcopy(ps->state.id, &id_key.id, sizeof(id_key.id));
3493 id_key.creatorid = ps->state.creatorid;
3494
3495 s = pf_find_state_byid(&id_key);
3496 if (s == NULL) {
3497 error = ENOENT;
3498 break;
3499 }
3500
3501 pf_state_export(&ps->state, s->state_key, s);
3502 break;
3503 }
3504
3505 default:
3506 VERIFY(0);
3507 /* NOTREACHED */
3508 }
3509
3510 return error;
3511 }
3512
3513 static int
3514 pfioctl_ioc_states(u_long cmd, struct pfioc_states_32 *ps32,
3515 struct pfioc_states_64 *ps64, struct proc *p)
3516 {
3517 int p64 = proc_is64bit(p);
3518 int error = 0;
3519
3520 switch (cmd) {
3521 case DIOCGETSTATES: { /* struct pfioc_states */
3522 struct pf_state *state;
3523 struct pfsync_state *pstore;
3524 user_addr_t buf;
3525 u_int32_t nr = 0;
3526 int len, size;
3527
3528 len = (p64 ? ps64->ps_len : ps32->ps_len);
3529 if (len == 0) {
3530 size = sizeof(struct pfsync_state) * pf_status.states;
3531 if (p64) {
3532 ps64->ps_len = size;
3533 } else {
3534 ps32->ps_len = size;
3535 }
3536 break;
3537 }
3538
3539 pstore = _MALLOC(sizeof(*pstore), M_TEMP, M_WAITOK | M_ZERO);
3540 if (pstore == NULL) {
3541 error = ENOMEM;
3542 break;
3543 }
3544 buf = (p64 ? ps64->ps_buf : ps32->ps_buf);
3545
3546 state = TAILQ_FIRST(&state_list);
3547 while (state) {
3548 if (state->timeout != PFTM_UNLINKED) {
3549 if ((nr + 1) * sizeof(*pstore) > (unsigned)len) {
3550 break;
3551 }
3552
3553 pf_state_export(pstore,
3554 state->state_key, state);
3555 error = copyout(pstore, buf, sizeof(*pstore));
3556 if (error) {
3557 _FREE(pstore, M_TEMP);
3558 goto fail;
3559 }
3560 buf += sizeof(*pstore);
3561 nr++;
3562 }
3563 state = TAILQ_NEXT(state, entry_list);
3564 }
3565
3566 size = sizeof(struct pfsync_state) * nr;
3567 if (p64) {
3568 ps64->ps_len = size;
3569 } else {
3570 ps32->ps_len = size;
3571 }
3572
3573 _FREE(pstore, M_TEMP);
3574 break;
3575 }
3576
3577 default:
3578 VERIFY(0);
3579 /* NOTREACHED */
3580 }
3581 fail:
3582 return error;
3583 }
3584
3585 static int
3586 pfioctl_ioc_natlook(u_long cmd, struct pfioc_natlook *pnl, struct proc *p)
3587 {
3588 #pragma unused(p)
3589 int error = 0;
3590
3591 switch (cmd) {
3592 case DIOCNATLOOK: {
3593 struct pf_state_key *sk;
3594 struct pf_state *state;
3595 struct pf_state_key_cmp key;
3596 int m = 0, direction = pnl->direction;
3597
3598 key.proto = pnl->proto;
3599 key.proto_variant = pnl->proto_variant;
3600
3601 if (!pnl->proto ||
3602 PF_AZERO(&pnl->saddr, pnl->af) ||
3603 PF_AZERO(&pnl->daddr, pnl->af) ||
3604 ((pnl->proto == IPPROTO_TCP ||
3605 pnl->proto == IPPROTO_UDP) &&
3606 (!pnl->dxport.port || !pnl->sxport.port))) {
3607 error = EINVAL;
3608 } else {
3609 /*
3610 * userland gives us source and dest of connection,
3611 * reverse the lookup so we ask for what happens with
3612 * the return traffic, enabling us to find it in the
3613 * state tree.
3614 */
3615 if (direction == PF_IN) {
3616 key.af_gwy = pnl->af;
3617 PF_ACPY(&key.ext_gwy.addr, &pnl->daddr,
3618 pnl->af);
3619 memcpy(&key.ext_gwy.xport, &pnl->dxport,
3620 sizeof(key.ext_gwy.xport));
3621 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
3622 memcpy(&key.gwy.xport, &pnl->sxport,
3623 sizeof(key.gwy.xport));
3624 state = pf_find_state_all(&key, PF_IN, &m);
3625 } else {
3626 key.af_lan = pnl->af;
3627 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
3628 memcpy(&key.lan.xport, &pnl->dxport,
3629 sizeof(key.lan.xport));
3630 PF_ACPY(&key.ext_lan.addr, &pnl->saddr,
3631 pnl->af);
3632 memcpy(&key.ext_lan.xport, &pnl->sxport,
3633 sizeof(key.ext_lan.xport));
3634 state = pf_find_state_all(&key, PF_OUT, &m);
3635 }
3636 if (m > 1) {
3637 error = E2BIG; /* more than one state */
3638 } else if (state != NULL) {
3639 sk = state->state_key;
3640 if (direction == PF_IN) {
3641 PF_ACPY(&pnl->rsaddr, &sk->lan.addr,
3642 sk->af_lan);
3643 memcpy(&pnl->rsxport, &sk->lan.xport,
3644 sizeof(pnl->rsxport));
3645 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
3646 pnl->af);
3647 memcpy(&pnl->rdxport, &pnl->dxport,
3648 sizeof(pnl->rdxport));
3649 } else {
3650 PF_ACPY(&pnl->rdaddr, &sk->gwy.addr,
3651 sk->af_gwy);
3652 memcpy(&pnl->rdxport, &sk->gwy.xport,
3653 sizeof(pnl->rdxport));
3654 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
3655 pnl->af);
3656 memcpy(&pnl->rsxport, &pnl->sxport,
3657 sizeof(pnl->rsxport));
3658 }
3659 } else {
3660 error = ENOENT;
3661 }
3662 }
3663 break;
3664 }
3665
3666 default:
3667 VERIFY(0);
3668 /* NOTREACHED */
3669 }
3670
3671 return error;
3672 }
3673
3674 static int
3675 pfioctl_ioc_tm(u_long cmd, struct pfioc_tm *pt, struct proc *p)
3676 {
3677 #pragma unused(p)
3678 int error = 0;
3679
3680 switch (cmd) {
3681 case DIOCSETTIMEOUT: {
3682 int old;
3683
3684 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
3685 pt->seconds < 0) {
3686 error = EINVAL;
3687 goto fail;
3688 }
3689 old = pf_default_rule.timeout[pt->timeout];
3690 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) {
3691 pt->seconds = 1;
3692 }
3693 pf_default_rule.timeout[pt->timeout] = pt->seconds;
3694 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) {
3695 wakeup(pf_purge_thread_fn);
3696 }
3697 pt->seconds = old;
3698 break;
3699 }
3700
3701 case DIOCGETTIMEOUT: {
3702 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
3703 error = EINVAL;
3704 goto fail;
3705 }
3706 pt->seconds = pf_default_rule.timeout[pt->timeout];
3707 break;
3708 }
3709
3710 default:
3711 VERIFY(0);
3712 /* NOTREACHED */
3713 }
3714 fail:
3715 return error;
3716 }
3717
3718 static int
3719 pfioctl_ioc_limit(u_long cmd, struct pfioc_limit *pl, struct proc *p)
3720 {
3721 #pragma unused(p)
3722 int error = 0;
3723
3724 switch (cmd) {
3725 case DIOCGETLIMIT: {
3726 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
3727 error = EINVAL;
3728 goto fail;
3729 }
3730 pl->limit = pf_pool_limits[pl->index].limit;
3731 break;
3732 }
3733
3734 case DIOCSETLIMIT: {
3735 int old_limit;
3736
3737 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
3738 pf_pool_limits[pl->index].pp == NULL) {
3739 error = EINVAL;
3740 goto fail;
3741 }
3742 pool_sethardlimit(pf_pool_limits[pl->index].pp,
3743 pl->limit, NULL, 0);
3744 old_limit = pf_pool_limits[pl->index].limit;
3745 pf_pool_limits[pl->index].limit = pl->limit;
3746 pl->limit = old_limit;
3747 break;
3748 }
3749
3750 default:
3751 VERIFY(0);
3752 /* NOTREACHED */
3753 }
3754 fail:
3755 return error;
3756 }
3757
3758 static int
3759 pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p)
3760 {
3761 #pragma unused(p)
3762 struct pf_pooladdr *pa = NULL;
3763 struct pf_pool *pool = NULL;
3764 int error = 0;
3765
3766 switch (cmd) {
3767 case DIOCBEGINADDRS: {
3768 pf_empty_pool(&pf_pabuf);
3769 pp->ticket = ++ticket_pabuf;
3770 break;
3771 }
3772
3773 case DIOCADDADDR: {
3774 pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3775 if (pp->ticket != ticket_pabuf) {
3776 error = EBUSY;
3777 break;
3778 }
3779 #if !INET
3780 if (pp->af == AF_INET) {
3781 error = EAFNOSUPPORT;
3782 break;
3783 }
3784 #endif /* INET */
3785 #if !INET6
3786 if (pp->af == AF_INET6) {
3787 error = EAFNOSUPPORT;
3788 break;
3789 }
3790 #endif /* INET6 */
3791 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
3792 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
3793 pp->addr.addr.type != PF_ADDR_TABLE) {
3794 error = EINVAL;
3795 break;
3796 }
3797 pa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3798 if (pa == NULL) {
3799 error = ENOMEM;
3800 break;
3801 }
3802 pf_pooladdr_copyin(&pp->addr, pa);
3803 if (pa->ifname[0]) {
3804 pa->kif = pfi_kif_get(pa->ifname);
3805 if (pa->kif == NULL) {
3806 pool_put(&pf_pooladdr_pl, pa);
3807 error = EINVAL;
3808 break;
3809 }
3810 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
3811 }
3812 pf_addrwrap_setup(&pa->addr);
3813 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
3814 pfi_dynaddr_remove(&pa->addr);
3815 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
3816 pool_put(&pf_pooladdr_pl, pa);
3817 error = EINVAL;
3818 break;
3819 }
3820 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
3821 break;
3822 }
3823
3824 case DIOCGETADDRS: {
3825 pp->nr = 0;
3826 pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3827 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
3828 pp->r_num, 0, 1, 0);
3829 if (pool == NULL) {
3830 error = EBUSY;
3831 break;
3832 }
3833 TAILQ_FOREACH(pa, &pool->list, entries)
3834 pp->nr++;
3835 break;
3836 }
3837
3838 case DIOCGETADDR: {
3839 u_int32_t nr = 0;
3840
3841 pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3842 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
3843 pp->r_num, 0, 1, 1);
3844 if (pool == NULL) {
3845 error = EBUSY;
3846 break;
3847 }
3848 pa = TAILQ_FIRST(&pool->list);
3849 while ((pa != NULL) && (nr < pp->nr)) {
3850 pa = TAILQ_NEXT(pa, entries);
3851 nr++;
3852 }
3853 if (pa == NULL) {
3854 error = EBUSY;
3855 break;
3856 }
3857 pf_pooladdr_copyout(pa, &pp->addr);
3858 pfi_dynaddr_copyout(&pp->addr.addr);
3859 pf_tbladdr_copyout(&pp->addr.addr);
3860 pf_rtlabel_copyout(&pp->addr.addr);
3861 break;
3862 }
3863
3864 case DIOCCHANGEADDR: {
3865 struct pfioc_pooladdr *pca = pp;
3866 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
3867 struct pf_ruleset *ruleset;
3868
3869 if (pca->action < PF_CHANGE_ADD_HEAD ||
3870 pca->action > PF_CHANGE_REMOVE) {
3871 error = EINVAL;
3872 break;
3873 }
3874 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
3875 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
3876 pca->addr.addr.type != PF_ADDR_TABLE) {
3877 error = EINVAL;
3878 break;
3879 }
3880
3881 pca->anchor[sizeof(pca->anchor) - 1] = '\0';
3882 ruleset = pf_find_ruleset(pca->anchor);
3883 if (ruleset == NULL) {
3884 error = EBUSY;
3885 break;
3886 }
3887 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
3888 pca->r_num, pca->r_last, 1, 1);
3889 if (pool == NULL) {
3890 error = EBUSY;
3891 break;
3892 }
3893 if (pca->action != PF_CHANGE_REMOVE) {
3894 newpa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3895 if (newpa == NULL) {
3896 error = ENOMEM;
3897 break;
3898 }
3899 pf_pooladdr_copyin(&pca->addr, newpa);
3900 #if !INET
3901 if (pca->af == AF_INET) {
3902 pool_put(&pf_pooladdr_pl, newpa);
3903 error = EAFNOSUPPORT;
3904 break;
3905 }
3906 #endif /* INET */
3907 #if !INET6
3908 if (pca->af == AF_INET6) {
3909 pool_put(&pf_pooladdr_pl, newpa);
3910 error = EAFNOSUPPORT;
3911 break;
3912 }
3913 #endif /* INET6 */
3914 if (newpa->ifname[0]) {
3915 newpa->kif = pfi_kif_get(newpa->ifname);
3916 if (newpa->kif == NULL) {
3917 pool_put(&pf_pooladdr_pl, newpa);
3918 error = EINVAL;
3919 break;
3920 }
3921 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
3922 } else {
3923 newpa->kif = NULL;
3924 }
3925 pf_addrwrap_setup(&newpa->addr);
3926 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
3927 pf_tbladdr_setup(ruleset, &newpa->addr)) {
3928 pfi_dynaddr_remove(&newpa->addr);
3929 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
3930 pool_put(&pf_pooladdr_pl, newpa);
3931 error = EINVAL;
3932 break;
3933 }
3934 }
3935
3936 if (pca->action == PF_CHANGE_ADD_HEAD) {
3937 oldpa = TAILQ_FIRST(&pool->list);
3938 } else if (pca->action == PF_CHANGE_ADD_TAIL) {
3939 oldpa = TAILQ_LAST(&pool->list, pf_palist);
3940 } else {
3941 int i = 0;
3942
3943 oldpa = TAILQ_FIRST(&pool->list);
3944 while ((oldpa != NULL) && (i < (int)pca->nr)) {
3945 oldpa = TAILQ_NEXT(oldpa, entries);
3946 i++;
3947 }
3948 if (oldpa == NULL) {
3949 error = EINVAL;
3950 break;
3951 }
3952 }
3953
3954 if (pca->action == PF_CHANGE_REMOVE) {
3955 TAILQ_REMOVE(&pool->list, oldpa, entries);
3956 pfi_dynaddr_remove(&oldpa->addr);
3957 pf_tbladdr_remove(&oldpa->addr);
3958 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
3959 pool_put(&pf_pooladdr_pl, oldpa);
3960 } else {
3961 if (oldpa == NULL) {
3962 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
3963 } else if (pca->action == PF_CHANGE_ADD_HEAD ||
3964 pca->action == PF_CHANGE_ADD_BEFORE) {
3965 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
3966 } else {
3967 TAILQ_INSERT_AFTER(&pool->list, oldpa,
3968 newpa, entries);
3969 }
3970 }
3971
3972 pool->cur = TAILQ_FIRST(&pool->list);
3973 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
3974 pca->af);
3975 break;
3976 }
3977
3978 default:
3979 VERIFY(0);
3980 /* NOTREACHED */
3981 }
3982
3983 return error;
3984 }
3985
3986 static int
3987 pfioctl_ioc_ruleset(u_long cmd, struct pfioc_ruleset *pr, struct proc *p)
3988 {
3989 #pragma unused(p)
3990 int error = 0;
3991
3992 switch (cmd) {
3993 case DIOCGETRULESETS: {
3994 struct pf_ruleset *ruleset;
3995 struct pf_anchor *anchor;
3996
3997 pr->path[sizeof(pr->path) - 1] = '\0';
3998 pr->name[sizeof(pr->name) - 1] = '\0';
3999 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
4000 error = EINVAL;
4001 break;
4002 }
4003 pr->nr = 0;
4004 if (ruleset->anchor == NULL) {
4005 /* XXX kludge for pf_main_ruleset */
4006 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4007 if (anchor->parent == NULL) {
4008 pr->nr++;
4009 }
4010 } else {
4011 RB_FOREACH(anchor, pf_anchor_node,
4012 &ruleset->anchor->children)
4013 pr->nr++;
4014 }
4015 break;
4016 }
4017
4018 case DIOCGETRULESET: {
4019 struct pf_ruleset *ruleset;
4020 struct pf_anchor *anchor;
4021 u_int32_t nr = 0;
4022
4023 pr->path[sizeof(pr->path) - 1] = '\0';
4024 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
4025 error = EINVAL;
4026 break;
4027 }
4028 pr->name[0] = 0;
4029 if (ruleset->anchor == NULL) {
4030 /* XXX kludge for pf_main_ruleset */
4031 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4032 if (anchor->parent == NULL && nr++ == pr->nr) {
4033 strlcpy(pr->name, anchor->name,
4034 sizeof(pr->name));
4035 break;
4036 }
4037 } else {
4038 RB_FOREACH(anchor, pf_anchor_node,
4039 &ruleset->anchor->children)
4040 if (nr++ == pr->nr) {
4041 strlcpy(pr->name, anchor->name,
4042 sizeof(pr->name));
4043 break;
4044 }
4045 }
4046 if (!pr->name[0]) {
4047 error = EBUSY;
4048 }
4049 break;
4050 }
4051
4052 default:
4053 VERIFY(0);
4054 /* NOTREACHED */
4055 }
4056
4057 return error;
4058 }
4059
4060 static int
4061 pfioctl_ioc_trans(u_long cmd, struct pfioc_trans_32 *io32,
4062 struct pfioc_trans_64 *io64, struct proc *p)
4063 {
4064 int p64 = proc_is64bit(p);
4065 int error = 0, esize, size;
4066 user_addr_t buf;
4067
4068 esize = (p64 ? io64->esize : io32->esize);
4069 size = (p64 ? io64->size : io32->size);
4070 buf = (p64 ? io64->array : io32->array);
4071
4072 switch (cmd) {
4073 case DIOCXBEGIN: {
4074 struct pfioc_trans_e *ioe;
4075 struct pfr_table *table;
4076 int i;
4077
4078 if (esize != sizeof(*ioe)) {
4079 error = ENODEV;
4080 goto fail;
4081 }
4082 ioe = _MALLOC(sizeof(*ioe), M_TEMP, M_WAITOK);
4083 table = _MALLOC(sizeof(*table), M_TEMP, M_WAITOK);
4084 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4085 if (copyin(buf, ioe, sizeof(*ioe))) {
4086 _FREE(table, M_TEMP);
4087 _FREE(ioe, M_TEMP);
4088 error = EFAULT;
4089 goto fail;
4090 }
4091 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4092 switch (ioe->rs_num) {
4093 case PF_RULESET_ALTQ:
4094 break;
4095 case PF_RULESET_TABLE:
4096 bzero(table, sizeof(*table));
4097 strlcpy(table->pfrt_anchor, ioe->anchor,
4098 sizeof(table->pfrt_anchor));
4099 if ((error = pfr_ina_begin(table,
4100 &ioe->ticket, NULL, 0))) {
4101 _FREE(table, M_TEMP);
4102 _FREE(ioe, M_TEMP);
4103 goto fail;
4104 }
4105 break;
4106 default:
4107 if ((error = pf_begin_rules(&ioe->ticket,
4108 ioe->rs_num, ioe->anchor))) {
4109 _FREE(table, M_TEMP);
4110 _FREE(ioe, M_TEMP);
4111 goto fail;
4112 }
4113 break;
4114 }
4115 if (copyout(ioe, buf, sizeof(*ioe))) {
4116 _FREE(table, M_TEMP);
4117 _FREE(ioe, M_TEMP);
4118 error = EFAULT;
4119 goto fail;
4120 }
4121 }
4122 _FREE(table, M_TEMP);
4123 _FREE(ioe, M_TEMP);
4124 break;
4125 }
4126
4127 case DIOCXROLLBACK: {
4128 struct pfioc_trans_e *ioe;
4129 struct pfr_table *table;
4130 int i;
4131
4132 if (esize != sizeof(*ioe)) {
4133 error = ENODEV;
4134 goto fail;
4135 }
4136 ioe = _MALLOC(sizeof(*ioe), M_TEMP, M_WAITOK);
4137 table = _MALLOC(sizeof(*table), M_TEMP, M_WAITOK);
4138 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4139 if (copyin(buf, ioe, sizeof(*ioe))) {
4140 _FREE(table, M_TEMP);
4141 _FREE(ioe, M_TEMP);
4142 error = EFAULT;
4143 goto fail;
4144 }
4145 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4146 switch (ioe->rs_num) {
4147 case PF_RULESET_ALTQ:
4148 break;
4149 case PF_RULESET_TABLE:
4150 bzero(table, sizeof(*table));
4151 strlcpy(table->pfrt_anchor, ioe->anchor,
4152 sizeof(table->pfrt_anchor));
4153 if ((error = pfr_ina_rollback(table,
4154 ioe->ticket, NULL, 0))) {
4155 _FREE(table, M_TEMP);
4156 _FREE(ioe, M_TEMP);
4157 goto fail; /* really bad */
4158 }
4159 break;
4160 default:
4161 if ((error = pf_rollback_rules(ioe->ticket,
4162 ioe->rs_num, ioe->anchor))) {
4163 _FREE(table, M_TEMP);
4164 _FREE(ioe, M_TEMP);
4165 goto fail; /* really bad */
4166 }
4167 break;
4168 }
4169 }
4170 _FREE(table, M_TEMP);
4171 _FREE(ioe, M_TEMP);
4172 break;
4173 }
4174
4175 case DIOCXCOMMIT: {
4176 struct pfioc_trans_e *ioe;
4177 struct pfr_table *table;
4178 struct pf_ruleset *rs;
4179 user_addr_t _buf = buf;
4180 int i;
4181
4182 if (esize != sizeof(*ioe)) {
4183 error = ENODEV;
4184 goto fail;
4185 }
4186 ioe = _MALLOC(sizeof(*ioe), M_TEMP, M_WAITOK);
4187 table = _MALLOC(sizeof(*table), M_TEMP, M_WAITOK);
4188 /* first makes sure everything will succeed */
4189 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4190 if (copyin(buf, ioe, sizeof(*ioe))) {
4191 _FREE(table, M_TEMP);
4192 _FREE(ioe, M_TEMP);
4193 error = EFAULT;
4194 goto fail;
4195 }
4196 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4197 switch (ioe->rs_num) {
4198 case PF_RULESET_ALTQ:
4199 break;
4200 case PF_RULESET_TABLE:
4201 rs = pf_find_ruleset(ioe->anchor);
4202 if (rs == NULL || !rs->topen || ioe->ticket !=
4203 rs->tticket) {
4204 _FREE(table, M_TEMP);
4205 _FREE(ioe, M_TEMP);
4206 error = EBUSY;
4207 goto fail;
4208 }
4209 break;
4210 default:
4211 if (ioe->rs_num < 0 || ioe->rs_num >=
4212 PF_RULESET_MAX) {
4213 _FREE(table, M_TEMP);
4214 _FREE(ioe, M_TEMP);
4215 error = EINVAL;
4216 goto fail;
4217 }
4218 rs = pf_find_ruleset(ioe->anchor);
4219 if (rs == NULL ||
4220 !rs->rules[ioe->rs_num].inactive.open ||
4221 rs->rules[ioe->rs_num].inactive.ticket !=
4222 ioe->ticket) {
4223 _FREE(table, M_TEMP);
4224 _FREE(ioe, M_TEMP);
4225 error = EBUSY;
4226 goto fail;
4227 }
4228 break;
4229 }
4230 }
4231 buf = _buf;
4232 /* now do the commit - no errors should happen here */
4233 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4234 if (copyin(buf, ioe, sizeof(*ioe))) {
4235 _FREE(table, M_TEMP);
4236 _FREE(ioe, M_TEMP);
4237 error = EFAULT;
4238 goto fail;
4239 }
4240 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4241 switch (ioe->rs_num) {
4242 case PF_RULESET_ALTQ:
4243 break;
4244 case PF_RULESET_TABLE:
4245 bzero(table, sizeof(*table));
4246 strlcpy(table->pfrt_anchor, ioe->anchor,
4247 sizeof(table->pfrt_anchor));
4248 if ((error = pfr_ina_commit(table, ioe->ticket,
4249 NULL, NULL, 0))) {
4250 _FREE(table, M_TEMP);
4251 _FREE(ioe, M_TEMP);
4252 goto fail; /* really bad */
4253 }
4254 break;
4255 default:
4256 if ((error = pf_commit_rules(ioe->ticket,
4257 ioe->rs_num, ioe->anchor))) {
4258 _FREE(table, M_TEMP);
4259 _FREE(ioe, M_TEMP);
4260 goto fail; /* really bad */
4261 }
4262 break;
4263 }
4264 }
4265 _FREE(table, M_TEMP);
4266 _FREE(ioe, M_TEMP);
4267 break;
4268 }
4269
4270 default:
4271 VERIFY(0);
4272 /* NOTREACHED */
4273 }
4274 fail:
4275 return error;
4276 }
4277
4278 static int
4279 pfioctl_ioc_src_nodes(u_long cmd, struct pfioc_src_nodes_32 *psn32,
4280 struct pfioc_src_nodes_64 *psn64, struct proc *p)
4281 {
4282 int p64 = proc_is64bit(p);
4283 int error = 0;
4284
4285 switch (cmd) {
4286 case DIOCGETSRCNODES: {
4287 struct pf_src_node *n, *pstore;
4288 user_addr_t buf;
4289 u_int32_t nr = 0;
4290 int space, size;
4291
4292 space = (p64 ? psn64->psn_len : psn32->psn_len);
4293 if (space == 0) {
4294 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
4295 nr++;
4296
4297 size = sizeof(struct pf_src_node) * nr;
4298 if (p64) {
4299 psn64->psn_len = size;
4300 } else {
4301 psn32->psn_len = size;
4302 }
4303 break;
4304 }
4305
4306 pstore = _MALLOC(sizeof(*pstore), M_TEMP, M_WAITOK);
4307 if (pstore == NULL) {
4308 error = ENOMEM;
4309 break;
4310 }
4311 buf = (p64 ? psn64->psn_buf : psn32->psn_buf);
4312
4313 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
4314 uint64_t secs = pf_time_second(), diff;
4315
4316 if ((nr + 1) * sizeof(*pstore) > (unsigned)space) {
4317 break;
4318 }
4319
4320 bcopy(n, pstore, sizeof(*pstore));
4321 if (n->rule.ptr != NULL) {
4322 pstore->rule.nr = n->rule.ptr->nr;
4323 }
4324 pstore->creation = secs - pstore->creation;
4325 if (pstore->expire > secs) {
4326 pstore->expire -= secs;
4327 } else {
4328 pstore->expire = 0;
4329 }
4330
4331 /* adjust the connection rate estimate */
4332 diff = secs - n->conn_rate.last;
4333 if (diff >= n->conn_rate.seconds) {
4334 pstore->conn_rate.count = 0;
4335 } else {
4336 pstore->conn_rate.count -=
4337 n->conn_rate.count * diff /
4338 n->conn_rate.seconds;
4339 }
4340
4341 _RB_PARENT(pstore, entry) = NULL;
4342 RB_LEFT(pstore, entry) = RB_RIGHT(pstore, entry) = NULL;
4343 pstore->kif = NULL;
4344
4345 error = copyout(pstore, buf, sizeof(*pstore));
4346 if (error) {
4347 _FREE(pstore, M_TEMP);
4348 goto fail;
4349 }
4350 buf += sizeof(*pstore);
4351 nr++;
4352 }
4353
4354 size = sizeof(struct pf_src_node) * nr;
4355 if (p64) {
4356 psn64->psn_len = size;
4357 } else {
4358 psn32->psn_len = size;
4359 }
4360
4361 _FREE(pstore, M_TEMP);
4362 break;
4363 }
4364
4365 default:
4366 VERIFY(0);
4367 /* NOTREACHED */
4368 }
4369 fail:
4370 return error;
4371 }
4372
4373 static int
4374 pfioctl_ioc_src_node_kill(u_long cmd, struct pfioc_src_node_kill *psnk,
4375 struct proc *p)
4376 {
4377 #pragma unused(p)
4378 int error = 0;
4379
4380 switch (cmd) {
4381 case DIOCKILLSRCNODES: {
4382 struct pf_src_node *sn;
4383 struct pf_state *s;
4384 int killed = 0;
4385
4386 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
4387 if (PF_MATCHA(psnk->psnk_src.neg,
4388 &psnk->psnk_src.addr.v.a.addr,
4389 &psnk->psnk_src.addr.v.a.mask,
4390 &sn->addr, sn->af) &&
4391 PF_MATCHA(psnk->psnk_dst.neg,
4392 &psnk->psnk_dst.addr.v.a.addr,
4393 &psnk->psnk_dst.addr.v.a.mask,
4394 &sn->raddr, sn->af)) {
4395 /* Handle state to src_node linkage */
4396 if (sn->states != 0) {
4397 RB_FOREACH(s, pf_state_tree_id,
4398 &tree_id) {
4399 if (s->src_node == sn) {
4400 s->src_node = NULL;
4401 }
4402 if (s->nat_src_node == sn) {
4403 s->nat_src_node = NULL;
4404 }
4405 }
4406 sn->states = 0;
4407 }
4408 sn->expire = 1;
4409 killed++;
4410 }
4411 }
4412
4413 if (killed > 0) {
4414 pf_purge_expired_src_nodes();
4415 }
4416
4417 psnk->psnk_af = killed;
4418 break;
4419 }
4420
4421 default:
4422 VERIFY(0);
4423 /* NOTREACHED */
4424 }
4425
4426 return error;
4427 }
4428
4429 static int
4430 pfioctl_ioc_iface(u_long cmd, struct pfioc_iface_32 *io32,
4431 struct pfioc_iface_64 *io64, struct proc *p)
4432 {
4433 int p64 = proc_is64bit(p);
4434 int error = 0;
4435
4436 switch (cmd) {
4437 case DIOCIGETIFACES: {
4438 user_addr_t buf;
4439 int esize;
4440
4441 buf = (p64 ? io64->pfiio_buffer : io32->pfiio_buffer);
4442 esize = (p64 ? io64->pfiio_esize : io32->pfiio_esize);
4443
4444 /* esize must be that of the user space version of pfi_kif */
4445 if (esize != sizeof(struct pfi_uif)) {
4446 error = ENODEV;
4447 break;
4448 }
4449 if (p64) {
4450 io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4451 } else {
4452 io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4453 }
4454 error = pfi_get_ifaces(
4455 p64 ? io64->pfiio_name : io32->pfiio_name, buf,
4456 p64 ? &io64->pfiio_size : &io32->pfiio_size);
4457 break;
4458 }
4459
4460 case DIOCSETIFFLAG: {
4461 if (p64) {
4462 io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4463 } else {
4464 io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4465 }
4466
4467 error = pfi_set_flags(
4468 p64 ? io64->pfiio_name : io32->pfiio_name,
4469 p64 ? io64->pfiio_flags : io32->pfiio_flags);
4470 break;
4471 }
4472
4473 case DIOCCLRIFFLAG: {
4474 if (p64) {
4475 io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4476 } else {
4477 io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4478 }
4479
4480 error = pfi_clear_flags(
4481 p64 ? io64->pfiio_name : io32->pfiio_name,
4482 p64 ? io64->pfiio_flags : io32->pfiio_flags);
4483 break;
4484 }
4485
4486 default:
4487 VERIFY(0);
4488 /* NOTREACHED */
4489 }
4490
4491 return error;
4492 }
4493
4494 int
4495 pf_af_hook(struct ifnet *ifp, struct mbuf **mppn, struct mbuf **mp,
4496 unsigned int af, int input, struct ip_fw_args *fwa)
4497 {
4498 int error = 0;
4499 struct mbuf *nextpkt;
4500 net_thread_marks_t marks;
4501 struct ifnet * pf_ifp = ifp;
4502
4503 /* Always allow traffic on co-processor interfaces. */
4504 if (!intcoproc_unrestricted && ifp && IFNET_IS_INTCOPROC(ifp)) {
4505 return 0;
4506 }
4507
4508 marks = net_thread_marks_push(NET_THREAD_HELD_PF);
4509
4510 if (marks != net_thread_marks_none) {
4511 lck_rw_lock_shared(pf_perim_lock);
4512 if (!pf_is_enabled) {
4513 goto done;
4514 }
4515 lck_mtx_lock(pf_lock);
4516 }
4517
4518 if (mppn != NULL && *mppn != NULL) {
4519 VERIFY(*mppn == *mp);
4520 }
4521 if ((nextpkt = (*mp)->m_nextpkt) != NULL) {
4522 (*mp)->m_nextpkt = NULL;
4523 }
4524
4525 /*
4526 * For packets destined to locally hosted IP address
4527 * ip_output_list sets Mbuf's pkt header's rcvif to
4528 * the interface hosting the IP address.
4529 * While on the output path ifp passed to pf_af_hook
4530 * to such local communication is the loopback interface,
4531 * the input path derives ifp from mbuf packet header's
4532 * rcvif.
4533 * This asymmetry caues issues with PF.
4534 * To handle that case, we have a limited change here to
4535 * pass interface as loopback if packets are looped in.
4536 */
4537 if (input && ((*mp)->m_pkthdr.pkt_flags & PKTF_LOOP)) {
4538 pf_ifp = lo_ifp;
4539 }
4540
4541 switch (af) {
4542 #if INET
4543 case AF_INET: {
4544 error = pf_inet_hook(pf_ifp, mp, input, fwa);
4545 break;
4546 }
4547 #endif /* INET */
4548 #if INET6
4549 case AF_INET6:
4550 error = pf_inet6_hook(pf_ifp, mp, input, fwa);
4551 break;
4552 #endif /* INET6 */
4553 default:
4554 break;
4555 }
4556
4557 /* When packet valid, link to the next packet */
4558 if (*mp != NULL && nextpkt != NULL) {
4559 struct mbuf *m = *mp;
4560 while (m->m_nextpkt != NULL) {
4561 m = m->m_nextpkt;
4562 }
4563 m->m_nextpkt = nextpkt;
4564 }
4565 /* Fix up linkage of previous packet in the chain */
4566 if (mppn != NULL) {
4567 if (*mp != NULL) {
4568 *mppn = *mp;
4569 } else {
4570 *mppn = nextpkt;
4571 }
4572 }
4573
4574 if (marks != net_thread_marks_none) {
4575 lck_mtx_unlock(pf_lock);
4576 }
4577
4578 done:
4579 if (marks != net_thread_marks_none) {
4580 lck_rw_done(pf_perim_lock);
4581 }
4582
4583 net_thread_marks_pop(marks);
4584 return error;
4585 }
4586
4587
4588 #if INET
4589 static int
4590 pf_inet_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4591 struct ip_fw_args *fwa)
4592 {
4593 struct mbuf *m = *mp;
4594 #if BYTE_ORDER != BIG_ENDIAN
4595 struct ip *ip = mtod(m, struct ip *);
4596 #endif
4597 int error = 0;
4598
4599 /*
4600 * If the packet is outbound, is originated locally, is flagged for
4601 * delayed UDP/TCP checksum calculation, and is about to be processed
4602 * for an interface that doesn't support the appropriate checksum
4603 * offloading, then calculated the checksum here so that PF can adjust
4604 * it properly.
4605 */
4606 if (!input && m->m_pkthdr.rcvif == NULL) {
4607 static const int mask = CSUM_DELAY_DATA;
4608 const int flags = m->m_pkthdr.csum_flags &
4609 ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4610
4611 if (flags & mask) {
4612 in_delayed_cksum(m);
4613 m->m_pkthdr.csum_flags &= ~mask;
4614 }
4615 }
4616
4617 #if BYTE_ORDER != BIG_ENDIAN
4618 HTONS(ip->ip_len);
4619 HTONS(ip->ip_off);
4620 #endif
4621 if (pf_test_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4622 if (*mp != NULL) {
4623 m_freem(*mp);
4624 *mp = NULL;
4625 error = EHOSTUNREACH;
4626 } else {
4627 error = ENOBUFS;
4628 }
4629 }
4630 #if BYTE_ORDER != BIG_ENDIAN
4631 else {
4632 if (*mp != NULL) {
4633 ip = mtod(*mp, struct ip *);
4634 NTOHS(ip->ip_len);
4635 NTOHS(ip->ip_off);
4636 }
4637 }
4638 #endif
4639 return error;
4640 }
4641 #endif /* INET */
4642
4643 #if INET6
4644 int
4645 pf_inet6_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4646 struct ip_fw_args *fwa)
4647 {
4648 int error = 0;
4649
4650 /*
4651 * If the packet is outbound, is originated locally, is flagged for
4652 * delayed UDP/TCP checksum calculation, and is about to be processed
4653 * for an interface that doesn't support the appropriate checksum
4654 * offloading, then calculated the checksum here so that PF can adjust
4655 * it properly.
4656 */
4657 if (!input && (*mp)->m_pkthdr.rcvif == NULL) {
4658 static const int mask = CSUM_DELAY_IPV6_DATA;
4659 const int flags = (*mp)->m_pkthdr.csum_flags &
4660 ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4661
4662 if (flags & mask) {
4663 /*
4664 * Checksum offload should not have been enabled
4665 * when extension headers exist, thus 0 for optlen.
4666 */
4667 in6_delayed_cksum(*mp);
4668 (*mp)->m_pkthdr.csum_flags &= ~mask;
4669 }
4670 }
4671
4672 if (pf_test6_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4673 if (*mp != NULL) {
4674 m_freem(*mp);
4675 *mp = NULL;
4676 error = EHOSTUNREACH;
4677 } else {
4678 error = ENOBUFS;
4679 }
4680 }
4681 return error;
4682 }
4683 #endif /* INET6 */
4684
4685 int
4686 pf_ifaddr_hook(struct ifnet *ifp)
4687 {
4688 struct pfi_kif *kif = ifp->if_pf_kif;
4689
4690 if (kif != NULL) {
4691 lck_rw_lock_shared(pf_perim_lock);
4692 lck_mtx_lock(pf_lock);
4693
4694 pfi_kifaddr_update(kif);
4695
4696 lck_mtx_unlock(pf_lock);
4697 lck_rw_done(pf_perim_lock);
4698 }
4699 return 0;
4700 }
4701
4702 /*
4703 * Caller acquires dlil lock as writer (exclusive)
4704 */
4705 void
4706 pf_ifnet_hook(struct ifnet *ifp, int attach)
4707 {
4708 lck_rw_lock_shared(pf_perim_lock);
4709 lck_mtx_lock(pf_lock);
4710 if (attach) {
4711 pfi_attach_ifnet(ifp);
4712 } else {
4713 pfi_detach_ifnet(ifp);
4714 }
4715 lck_mtx_unlock(pf_lock);
4716 lck_rw_done(pf_perim_lock);
4717 }
4718
4719 static void
4720 pf_attach_hooks(void)
4721 {
4722 ifnet_head_lock_shared();
4723 /*
4724 * Check against ifnet_addrs[] before proceeding, in case this
4725 * is called very early on, e.g. during dlil_init() before any
4726 * network interface is attached.
4727 */
4728 if (ifnet_addrs != NULL) {
4729 int i;
4730
4731 for (i = 0; i <= if_index; i++) {
4732 struct ifnet *ifp = ifindex2ifnet[i];
4733 if (ifp != NULL) {
4734 pfi_attach_ifnet(ifp);
4735 }
4736 }
4737 }
4738 ifnet_head_done();
4739 }
4740
4741 #if 0
4742 /* currently unused along with pfdetach() */
4743 static void
4744 pf_detach_hooks(void)
4745 {
4746 ifnet_head_lock_shared();
4747 if (ifnet_addrs != NULL) {
4748 for (i = 0; i <= if_index; i++) {
4749 int i;
4750
4751 struct ifnet *ifp = ifindex2ifnet[i];
4752 if (ifp != NULL && ifp->if_pf_kif != NULL) {
4753 pfi_detach_ifnet(ifp);
4754 }
4755 }
4756 }
4757 ifnet_head_done();
4758 }
4759 #endif
4760
4761 /*
4762 * 'D' group ioctls.
4763 *
4764 * The switch statement below does nothing at runtime, as it serves as a
4765 * compile time check to ensure that all of the socket 'D' ioctls (those
4766 * in the 'D' group going thru soo_ioctl) that are made available by the
4767 * networking stack is unique. This works as long as this routine gets
4768 * updated each time a new interface ioctl gets added.
4769 *
4770 * Any failures at compile time indicates duplicated ioctl values.
4771 */
4772 static __attribute__((unused)) void
4773 pfioctl_cassert(void)
4774 {
4775 /*
4776 * This is equivalent to _CASSERT() and the compiler wouldn't
4777 * generate any instructions, thus for compile time only.
4778 */
4779 switch ((u_long)0) {
4780 case 0:
4781
4782 /* bsd/net/pfvar.h */
4783 case DIOCSTART:
4784 case DIOCSTOP:
4785 case DIOCADDRULE:
4786 case DIOCGETSTARTERS:
4787 case DIOCGETRULES:
4788 case DIOCGETRULE:
4789 case DIOCSTARTREF:
4790 case DIOCSTOPREF:
4791 case DIOCCLRSTATES:
4792 case DIOCGETSTATE:
4793 case DIOCSETSTATUSIF:
4794 case DIOCGETSTATUS:
4795 case DIOCCLRSTATUS:
4796 case DIOCNATLOOK:
4797 case DIOCSETDEBUG:
4798 case DIOCGETSTATES:
4799 case DIOCCHANGERULE:
4800 case DIOCINSERTRULE:
4801 case DIOCDELETERULE:
4802 case DIOCSETTIMEOUT:
4803 case DIOCGETTIMEOUT:
4804 case DIOCADDSTATE:
4805 case DIOCCLRRULECTRS:
4806 case DIOCGETLIMIT:
4807 case DIOCSETLIMIT:
4808 case DIOCKILLSTATES:
4809 case DIOCSTARTALTQ:
4810 case DIOCSTOPALTQ:
4811 case DIOCADDALTQ:
4812 case DIOCGETALTQS:
4813 case DIOCGETALTQ:
4814 case DIOCCHANGEALTQ:
4815 case DIOCGETQSTATS:
4816 case DIOCBEGINADDRS:
4817 case DIOCADDADDR:
4818 case DIOCGETADDRS:
4819 case DIOCGETADDR:
4820 case DIOCCHANGEADDR:
4821 case DIOCGETRULESETS:
4822 case DIOCGETRULESET:
4823 case DIOCRCLRTABLES:
4824 case DIOCRADDTABLES:
4825 case DIOCRDELTABLES:
4826 case DIOCRGETTABLES:
4827 case DIOCRGETTSTATS:
4828 case DIOCRCLRTSTATS:
4829 case DIOCRCLRADDRS:
4830 case DIOCRADDADDRS:
4831 case DIOCRDELADDRS:
4832 case DIOCRSETADDRS:
4833 case DIOCRGETADDRS:
4834 case DIOCRGETASTATS:
4835 case DIOCRCLRASTATS:
4836 case DIOCRTSTADDRS:
4837 case DIOCRSETTFLAGS:
4838 case DIOCRINADEFINE:
4839 case DIOCOSFPFLUSH:
4840 case DIOCOSFPADD:
4841 case DIOCOSFPGET:
4842 case DIOCXBEGIN:
4843 case DIOCXCOMMIT:
4844 case DIOCXROLLBACK:
4845 case DIOCGETSRCNODES:
4846 case DIOCCLRSRCNODES:
4847 case DIOCSETHOSTID:
4848 case DIOCIGETIFACES:
4849 case DIOCSETIFFLAG:
4850 case DIOCCLRIFFLAG:
4851 case DIOCKILLSRCNODES:
4852 case DIOCGIFSPEED:
4853 ;
4854 }
4855 }