]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/pf_ioctl.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / bsd / net / pf_ioctl.c
1 /*
2 * Copyright (c) 2007-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */
30 /* $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
31
32 /*
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * - Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * - Redistributions in binary form must reproduce the above
44 * copyright notice, this list of conditions and the following
45 * disclaimer in the documentation and/or other materials provided
46 * with the distribution.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 *
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
64 *
65 */
66
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/mbuf.h>
71 #include <sys/filio.h>
72 #include <sys/fcntl.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/kernel.h>
76 #include <sys/time.h>
77 #include <sys/proc_internal.h>
78 #include <sys/malloc.h>
79 #include <sys/kauth.h>
80 #include <sys/conf.h>
81 #include <sys/mcache.h>
82 #include <sys/queue.h>
83 #include <os/log.h>
84
85 #include <mach/vm_param.h>
86
87 #include <net/dlil.h>
88 #include <net/if.h>
89 #include <net/if_types.h>
90 #include <net/net_api_stats.h>
91 #include <net/route.h>
92
93 #include <netinet/in.h>
94 #include <netinet/in_var.h>
95 #include <netinet/in_systm.h>
96 #include <netinet/ip.h>
97 #include <netinet/ip_var.h>
98 #include <netinet/ip_icmp.h>
99 #include <netinet/if_ether.h>
100
101 #if DUMMYNET
102 #include <netinet/ip_dummynet.h>
103 #else
104 struct ip_fw_args;
105 #endif /* DUMMYNET */
106
107 #include <libkern/crypto/md5.h>
108
109 #include <machine/machine_routines.h>
110
111 #include <miscfs/devfs/devfs.h>
112
113 #include <net/pfvar.h>
114
115 #if NPFSYNC
116 #include <net/if_pfsync.h>
117 #endif /* NPFSYNC */
118
119 #if PFLOG
120 #include <net/if_pflog.h>
121 #endif /* PFLOG */
122
123 #include <netinet/ip6.h>
124 #include <netinet/in_pcb.h>
125
126 #include <dev/random/randomdev.h>
127
128 #if 0
129 static void pfdetach(void);
130 #endif
131 static int pfopen(dev_t, int, int, struct proc *);
132 static int pfclose(dev_t, int, int, struct proc *);
133 static int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
134 static int pfioctl_ioc_table(u_long, struct pfioc_table_32 *,
135 struct pfioc_table_64 *, struct proc *);
136 static int pfioctl_ioc_tokens(u_long, struct pfioc_tokens_32 *,
137 struct pfioc_tokens_64 *, struct proc *);
138 static int pfioctl_ioc_rule(u_long, int, struct pfioc_rule *, struct proc *);
139 static int pfioctl_ioc_state_kill(u_long, struct pfioc_state_kill *,
140 struct proc *);
141 static int pfioctl_ioc_state(u_long, struct pfioc_state *, struct proc *);
142 static int pfioctl_ioc_states(u_long, struct pfioc_states_32 *,
143 struct pfioc_states_64 *, struct proc *);
144 static int pfioctl_ioc_natlook(u_long, struct pfioc_natlook *, struct proc *);
145 static int pfioctl_ioc_tm(u_long, struct pfioc_tm *, struct proc *);
146 static int pfioctl_ioc_limit(u_long, struct pfioc_limit *, struct proc *);
147 static int pfioctl_ioc_pooladdr(u_long, struct pfioc_pooladdr *, struct proc *);
148 static int pfioctl_ioc_ruleset(u_long, struct pfioc_ruleset *, struct proc *);
149 static int pfioctl_ioc_trans(u_long, struct pfioc_trans_32 *,
150 struct pfioc_trans_64 *, struct proc *);
151 static int pfioctl_ioc_src_nodes(u_long, struct pfioc_src_nodes_32 *,
152 struct pfioc_src_nodes_64 *, struct proc *);
153 static int pfioctl_ioc_src_node_kill(u_long, struct pfioc_src_node_kill *,
154 struct proc *);
155 static int pfioctl_ioc_iface(u_long, struct pfioc_iface_32 *,
156 struct pfioc_iface_64 *, struct proc *);
157 static struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
158 u_int8_t, u_int8_t, u_int8_t);
159 static void pf_mv_pool(struct pf_palist *, struct pf_palist *);
160 static void pf_empty_pool(struct pf_palist *);
161 static int pf_begin_rules(u_int32_t *, int, const char *);
162 static int pf_rollback_rules(u_int32_t, int, char *);
163 static int pf_setup_pfsync_matching(struct pf_ruleset *);
164 static void pf_hash_rule(MD5_CTX *, struct pf_rule *);
165 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *, u_int8_t);
166 static int pf_commit_rules(u_int32_t, int, char *);
167 static void pf_rule_copyin(struct pf_rule *, struct pf_rule *, struct proc *,
168 int);
169 static void pf_rule_copyout(struct pf_rule *, struct pf_rule *);
170 static void pf_state_export(struct pfsync_state *, struct pf_state_key *,
171 struct pf_state *);
172 static void pf_state_import(struct pfsync_state *, struct pf_state_key *,
173 struct pf_state *);
174 static void pf_pooladdr_copyin(struct pf_pooladdr *, struct pf_pooladdr *);
175 static void pf_pooladdr_copyout(struct pf_pooladdr *, struct pf_pooladdr *);
176 static void pf_expire_states_and_src_nodes(struct pf_rule *);
177 static void pf_delete_rule_from_ruleset(struct pf_ruleset *,
178 int, struct pf_rule *);
179 static void pf_addrwrap_setup(struct pf_addr_wrap *);
180 static int pf_rule_setup(struct pfioc_rule *, struct pf_rule *,
181 struct pf_ruleset *);
182 static void pf_delete_rule_by_owner(char *, u_int32_t);
183 static int pf_delete_rule_by_ticket(struct pfioc_rule *, u_int32_t);
184 static void pf_ruleset_cleanup(struct pf_ruleset *, int);
185 static void pf_deleterule_anchor_step_out(struct pf_ruleset **,
186 int, struct pf_rule **);
187
188 #define PF_CDEV_MAJOR (-1)
189
190 static const struct cdevsw pf_cdevsw = {
191 .d_open = pfopen,
192 .d_close = pfclose,
193 .d_read = eno_rdwrt,
194 .d_write = eno_rdwrt,
195 .d_ioctl = pfioctl,
196 .d_stop = eno_stop,
197 .d_reset = eno_reset,
198 .d_ttys = NULL,
199 .d_select = eno_select,
200 .d_mmap = eno_mmap,
201 .d_strategy = eno_strat,
202 .d_reserved_1 = eno_getc,
203 .d_reserved_2 = eno_putc,
204 .d_type = 0
205 };
206
207 static void pf_attach_hooks(void);
208 #if 0
209 /* currently unused along with pfdetach() */
210 static void pf_detach_hooks(void);
211 #endif
212
213 /*
214 * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer,
215 * and used in pf_af_hook() for performance optimization, such that packets
216 * will enter pf_test() or pf_test6() only when PF is running.
217 */
218 int pf_is_enabled = 0;
219
220 u_int32_t pf_hash_seed;
221 int16_t pf_nat64_configured = 0;
222
223 /*
224 * These are the pf enabled reference counting variables
225 */
226 #define NR_TOKENS_LIMIT (INT_MAX / sizeof(struct pfioc_token))
227
228 static u_int64_t pf_enabled_ref_count;
229 static u_int32_t nr_tokens = 0;
230 static u_int32_t pffwrules;
231 static u_int32_t pfdevcnt;
232
233 SLIST_HEAD(list_head, pfioc_kernel_token);
234 static struct list_head token_list_head;
235
236 struct pf_rule pf_default_rule;
237
238 typedef struct {
239 char tag_name[PF_TAG_NAME_SIZE];
240 uint16_t tag_id;
241 } pf_reserved_tag_table_t;
242
243 #define NUM_RESERVED_TAGS 2
244 static pf_reserved_tag_table_t pf_reserved_tag_table[NUM_RESERVED_TAGS] = {
245 { PF_TAG_NAME_SYSTEM_SERVICE, PF_TAG_ID_SYSTEM_SERVICE},
246 { PF_TAG_NAME_STACK_DROP, PF_TAG_ID_STACK_DROP},
247 };
248 #define RESERVED_TAG_ID_MIN PF_TAG_ID_SYSTEM_SERVICE
249
250 #define DYNAMIC_TAG_ID_MAX 50000
251 static TAILQ_HEAD(pf_tags, pf_tagname) pf_tags =
252 TAILQ_HEAD_INITIALIZER(pf_tags);
253
254 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
255 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
256 #endif
257 static u_int16_t tagname2tag(struct pf_tags *, char *);
258 static void tag2tagname(struct pf_tags *, u_int16_t, char *);
259 static void tag_unref(struct pf_tags *, u_int16_t);
260 static int pf_rtlabel_add(struct pf_addr_wrap *);
261 static void pf_rtlabel_remove(struct pf_addr_wrap *);
262 static void pf_rtlabel_copyout(struct pf_addr_wrap *);
263
264 #if INET
265 static int pf_inet_hook(struct ifnet *, struct mbuf **, int,
266 struct ip_fw_args *);
267 #endif /* INET */
268 static int pf_inet6_hook(struct ifnet *, struct mbuf **, int,
269 struct ip_fw_args *);
270
271 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
272
273 /*
274 * Helper macros for ioctl structures which vary in size (32-bit vs. 64-bit)
275 */
276 #define PFIOCX_STRUCT_DECL(s) \
277 struct { \
278 union { \
279 struct s##_32 _s##_32; \
280 struct s##_64 _s##_64; \
281 } _u; \
282 } *s##_un = NULL \
283
284 #define PFIOCX_STRUCT_BEGIN(a, s, _action) { \
285 VERIFY(s##_un == NULL); \
286 s##_un = _MALLOC(sizeof (*s##_un), M_TEMP, M_WAITOK|M_ZERO); \
287 if (s##_un == NULL) { \
288 _action \
289 } else { \
290 if (p64) \
291 bcopy(a, &s##_un->_u._s##_64, \
292 sizeof (struct s##_64)); \
293 else \
294 bcopy(a, &s##_un->_u._s##_32, \
295 sizeof (struct s##_32)); \
296 } \
297 }
298
299 #define PFIOCX_STRUCT_END(s, a) { \
300 VERIFY(s##_un != NULL); \
301 if (p64) \
302 bcopy(&s##_un->_u._s##_64, a, sizeof (struct s##_64)); \
303 else \
304 bcopy(&s##_un->_u._s##_32, a, sizeof (struct s##_32)); \
305 _FREE(s##_un, M_TEMP); \
306 s##_un = NULL; \
307 }
308
309 #define PFIOCX_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
310 #define PFIOCX_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
311
312 /*
313 * Helper macros for regular ioctl structures.
314 */
315 #define PFIOC_STRUCT_BEGIN(a, v, _action) { \
316 VERIFY((v) == NULL); \
317 (v) = _MALLOC(sizeof (*(v)), M_TEMP, M_WAITOK|M_ZERO); \
318 if ((v) == NULL) { \
319 _action \
320 } else { \
321 bcopy(a, v, sizeof (*(v))); \
322 } \
323 }
324
325 #define PFIOC_STRUCT_END(v, a) { \
326 VERIFY((v) != NULL); \
327 bcopy(v, a, sizeof (*(v))); \
328 _FREE(v, M_TEMP); \
329 (v) = NULL; \
330 }
331
332 #define PFIOC_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
333 #define PFIOC_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
334
335 static lck_attr_t *pf_perim_lock_attr;
336 static lck_grp_t *pf_perim_lock_grp;
337 static lck_grp_attr_t *pf_perim_lock_grp_attr;
338
339 static lck_attr_t *pf_lock_attr;
340 static lck_grp_t *pf_lock_grp;
341 static lck_grp_attr_t *pf_lock_grp_attr;
342
343 struct thread *pf_purge_thread;
344
345 extern void pfi_kifaddr_update(void *);
346
347 /* pf enable ref-counting helper functions */
348 static u_int64_t generate_token(struct proc *);
349 static int remove_token(struct pfioc_remove_token *);
350 static void invalidate_all_tokens(void);
351
352 static u_int64_t
353 generate_token(struct proc *p)
354 {
355 u_int64_t token_value;
356 struct pfioc_kernel_token *new_token;
357
358 if (nr_tokens + 1 > NR_TOKENS_LIMIT) {
359 os_log_error(OS_LOG_DEFAULT, "%s: NR_TOKENS_LIMIT reached", __func__);
360 return 0;
361 }
362
363 new_token = _MALLOC(sizeof(struct pfioc_kernel_token), M_TEMP,
364 M_WAITOK | M_ZERO);
365
366 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
367
368 if (new_token == NULL) {
369 /* malloc failed! bail! */
370 os_log_error(OS_LOG_DEFAULT, "%s: unable to allocate pf token structure!", __func__);
371 return 0;
372 }
373
374 token_value = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)new_token);
375
376 new_token->token.token_value = token_value;
377 new_token->token.pid = proc_pid(p);
378 proc_name(new_token->token.pid, new_token->token.proc_name,
379 sizeof(new_token->token.proc_name));
380 new_token->token.timestamp = pf_calendar_time_second();
381
382 SLIST_INSERT_HEAD(&token_list_head, new_token, next);
383 nr_tokens++;
384
385 return token_value;
386 }
387
388 static int
389 remove_token(struct pfioc_remove_token *tok)
390 {
391 struct pfioc_kernel_token *entry, *tmp;
392
393 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
394
395 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
396 if (tok->token_value == entry->token.token_value) {
397 SLIST_REMOVE(&token_list_head, entry,
398 pfioc_kernel_token, next);
399 _FREE(entry, M_TEMP);
400 nr_tokens--;
401 return 0; /* success */
402 }
403 }
404
405 printf("pf : remove failure\n");
406 return ESRCH; /* failure */
407 }
408
409 static void
410 invalidate_all_tokens(void)
411 {
412 struct pfioc_kernel_token *entry, *tmp;
413
414 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
415
416 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
417 SLIST_REMOVE(&token_list_head, entry, pfioc_kernel_token, next);
418 _FREE(entry, M_TEMP);
419 }
420
421 nr_tokens = 0;
422 }
423
424 void
425 pfinit(void)
426 {
427 u_int32_t *t = pf_default_rule.timeout;
428 int maj;
429
430 pf_perim_lock_grp_attr = lck_grp_attr_alloc_init();
431 pf_perim_lock_grp = lck_grp_alloc_init("pf_perim",
432 pf_perim_lock_grp_attr);
433 pf_perim_lock_attr = lck_attr_alloc_init();
434 lck_rw_init(pf_perim_lock, pf_perim_lock_grp, pf_perim_lock_attr);
435
436 pf_lock_grp_attr = lck_grp_attr_alloc_init();
437 pf_lock_grp = lck_grp_alloc_init("pf", pf_lock_grp_attr);
438 pf_lock_attr = lck_attr_alloc_init();
439 lck_mtx_init(pf_lock, pf_lock_grp, pf_lock_attr);
440
441 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
442 NULL);
443 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
444 "pfsrctrpl", NULL);
445 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
446 NULL);
447 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0,
448 "pfstatekeypl", NULL);
449 pool_init(&pf_app_state_pl, sizeof(struct pf_app_state), 0, 0, 0,
450 "pfappstatepl", NULL);
451 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
452 "pfpooladdrpl", NULL);
453 pfr_initialize();
454 pfi_initialize();
455 pf_osfp_initialize();
456
457 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
458 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
459
460 if (max_mem <= 256 * 1024 * 1024) {
461 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
462 PFR_KENTRY_HIWAT_SMALL;
463 }
464
465 RB_INIT(&tree_src_tracking);
466 RB_INIT(&pf_anchors);
467 pf_init_ruleset(&pf_main_ruleset);
468 TAILQ_INIT(&pf_pabuf);
469 TAILQ_INIT(&state_list);
470
471 _CASSERT((SC_BE & SCIDX_MASK) == SCIDX_BE);
472 _CASSERT((SC_BK_SYS & SCIDX_MASK) == SCIDX_BK_SYS);
473 _CASSERT((SC_BK & SCIDX_MASK) == SCIDX_BK);
474 _CASSERT((SC_RD & SCIDX_MASK) == SCIDX_RD);
475 _CASSERT((SC_OAM & SCIDX_MASK) == SCIDX_OAM);
476 _CASSERT((SC_AV & SCIDX_MASK) == SCIDX_AV);
477 _CASSERT((SC_RV & SCIDX_MASK) == SCIDX_RV);
478 _CASSERT((SC_VI & SCIDX_MASK) == SCIDX_VI);
479 _CASSERT((SC_SIG & SCIDX_MASK) == SCIDX_SIG);
480 _CASSERT((SC_VO & SCIDX_MASK) == SCIDX_VO);
481 _CASSERT((SC_CTL & SCIDX_MASK) == SCIDX_CTL);
482
483 /* default rule should never be garbage collected */
484 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
485 pf_default_rule.action = PF_PASS;
486 pf_default_rule.nr = -1;
487 pf_default_rule.rtableid = IFSCOPE_NONE;
488
489 /* initialize default timeouts */
490 t[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
491 t[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
492 t[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
493 t[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
494 t[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
495 t[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
496 t[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
497 t[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
498 t[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
499 t[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
500 t[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
501 t[PFTM_GREv1_FIRST_PACKET] = PFTM_GREv1_FIRST_PACKET_VAL;
502 t[PFTM_GREv1_INITIATING] = PFTM_GREv1_INITIATING_VAL;
503 t[PFTM_GREv1_ESTABLISHED] = PFTM_GREv1_ESTABLISHED_VAL;
504 t[PFTM_ESP_FIRST_PACKET] = PFTM_ESP_FIRST_PACKET_VAL;
505 t[PFTM_ESP_INITIATING] = PFTM_ESP_INITIATING_VAL;
506 t[PFTM_ESP_ESTABLISHED] = PFTM_ESP_ESTABLISHED_VAL;
507 t[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
508 t[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
509 t[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
510 t[PFTM_FRAG] = PFTM_FRAG_VAL;
511 t[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
512 t[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
513 t[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
514 t[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
515 t[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
516
517 pf_normalize_init();
518 bzero(&pf_status, sizeof(pf_status));
519 pf_status.debug = PF_DEBUG_URGENT;
520 pf_hash_seed = RandomULong();
521
522 /* XXX do our best to avoid a conflict */
523 pf_status.hostid = random();
524
525 if (kernel_thread_start(pf_purge_thread_fn, NULL,
526 &pf_purge_thread) != 0) {
527 printf("%s: unable to start purge thread!", __func__);
528 return;
529 }
530
531 maj = cdevsw_add(PF_CDEV_MAJOR, &pf_cdevsw);
532 if (maj == -1) {
533 printf("%s: failed to allocate major number!\n", __func__);
534 return;
535 }
536 (void) devfs_make_node(makedev(maj, PFDEV_PF), DEVFS_CHAR,
537 UID_ROOT, GID_WHEEL, 0600, "pf", 0);
538
539 (void) devfs_make_node(makedev(maj, PFDEV_PFM), DEVFS_CHAR,
540 UID_ROOT, GID_WHEEL, 0600, "pfm", 0);
541
542 pf_attach_hooks();
543 #if DUMMYNET
544 dummynet_init();
545 #endif
546 }
547
548 #if 0
549 static void
550 pfdetach(void)
551 {
552 struct pf_anchor *anchor;
553 struct pf_state *state;
554 struct pf_src_node *node;
555 struct pfioc_table pt;
556 u_int32_t ticket;
557 int i;
558 char r = '\0';
559
560 pf_detach_hooks();
561
562 pf_status.running = 0;
563 wakeup(pf_purge_thread_fn);
564
565 /* clear the rulesets */
566 for (i = 0; i < PF_RULESET_MAX; i++) {
567 if (pf_begin_rules(&ticket, i, &r) == 0) {
568 pf_commit_rules(ticket, i, &r);
569 }
570 }
571
572 /* clear states */
573 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
574 state->timeout = PFTM_PURGE;
575 #if NPFSYNC
576 state->sync_flags = PFSTATE_NOSYNC;
577 #endif
578 }
579 pf_purge_expired_states(pf_status.states);
580
581 #if NPFSYNC
582 pfsync_clear_states(pf_status.hostid, NULL);
583 #endif
584
585 /* clear source nodes */
586 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
587 state->src_node = NULL;
588 state->nat_src_node = NULL;
589 }
590 RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
591 node->expire = 1;
592 node->states = 0;
593 }
594 pf_purge_expired_src_nodes();
595
596 /* clear tables */
597 memset(&pt, '\0', sizeof(pt));
598 pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
599
600 /* destroy anchors */
601 while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
602 for (i = 0; i < PF_RULESET_MAX; i++) {
603 if (pf_begin_rules(&ticket, i, anchor->name) == 0) {
604 pf_commit_rules(ticket, i, anchor->name);
605 }
606 }
607 }
608
609 /* destroy main ruleset */
610 pf_remove_if_empty_ruleset(&pf_main_ruleset);
611
612 /* destroy the pools */
613 pool_destroy(&pf_pooladdr_pl);
614 pool_destroy(&pf_state_pl);
615 pool_destroy(&pf_rule_pl);
616 pool_destroy(&pf_src_tree_pl);
617
618 /* destroy subsystems */
619 pf_normalize_destroy();
620 pf_osfp_destroy();
621 pfr_destroy();
622 pfi_destroy();
623 }
624 #endif
625
626 static int
627 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
628 {
629 #pragma unused(flags, fmt, p)
630 if (minor(dev) >= PFDEV_MAX) {
631 return ENXIO;
632 }
633
634 if (minor(dev) == PFDEV_PFM) {
635 lck_mtx_lock(pf_lock);
636 if (pfdevcnt != 0) {
637 lck_mtx_unlock(pf_lock);
638 return EBUSY;
639 }
640 pfdevcnt++;
641 lck_mtx_unlock(pf_lock);
642 }
643 return 0;
644 }
645
646 static int
647 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
648 {
649 #pragma unused(flags, fmt, p)
650 if (minor(dev) >= PFDEV_MAX) {
651 return ENXIO;
652 }
653
654 if (minor(dev) == PFDEV_PFM) {
655 lck_mtx_lock(pf_lock);
656 VERIFY(pfdevcnt > 0);
657 pfdevcnt--;
658 lck_mtx_unlock(pf_lock);
659 }
660 return 0;
661 }
662
663 static struct pf_pool *
664 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
665 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
666 u_int8_t check_ticket)
667 {
668 struct pf_ruleset *ruleset;
669 struct pf_rule *rule;
670 int rs_num;
671
672 ruleset = pf_find_ruleset(anchor);
673 if (ruleset == NULL) {
674 return NULL;
675 }
676 rs_num = pf_get_ruleset_number(rule_action);
677 if (rs_num >= PF_RULESET_MAX) {
678 return NULL;
679 }
680 if (active) {
681 if (check_ticket && ticket !=
682 ruleset->rules[rs_num].active.ticket) {
683 return NULL;
684 }
685 if (r_last) {
686 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
687 pf_rulequeue);
688 } else {
689 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
690 }
691 } else {
692 if (check_ticket && ticket !=
693 ruleset->rules[rs_num].inactive.ticket) {
694 return NULL;
695 }
696 if (r_last) {
697 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
698 pf_rulequeue);
699 } else {
700 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
701 }
702 }
703 if (!r_last) {
704 while ((rule != NULL) && (rule->nr != rule_number)) {
705 rule = TAILQ_NEXT(rule, entries);
706 }
707 }
708 if (rule == NULL) {
709 return NULL;
710 }
711
712 return &rule->rpool;
713 }
714
715 static void
716 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
717 {
718 struct pf_pooladdr *mv_pool_pa;
719
720 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
721 TAILQ_REMOVE(poola, mv_pool_pa, entries);
722 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
723 }
724 }
725
726 static void
727 pf_empty_pool(struct pf_palist *poola)
728 {
729 struct pf_pooladdr *empty_pool_pa;
730
731 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
732 pfi_dynaddr_remove(&empty_pool_pa->addr);
733 pf_tbladdr_remove(&empty_pool_pa->addr);
734 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
735 TAILQ_REMOVE(poola, empty_pool_pa, entries);
736 pool_put(&pf_pooladdr_pl, empty_pool_pa);
737 }
738 }
739
740 void
741 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
742 {
743 if (rulequeue != NULL) {
744 if (rule->states <= 0) {
745 /*
746 * XXX - we need to remove the table *before* detaching
747 * the rule to make sure the table code does not delete
748 * the anchor under our feet.
749 */
750 pf_tbladdr_remove(&rule->src.addr);
751 pf_tbladdr_remove(&rule->dst.addr);
752 if (rule->overload_tbl) {
753 pfr_detach_table(rule->overload_tbl);
754 }
755 }
756 TAILQ_REMOVE(rulequeue, rule, entries);
757 rule->entries.tqe_prev = NULL;
758 rule->nr = -1;
759 }
760
761 if (rule->states > 0 || rule->src_nodes > 0 ||
762 rule->entries.tqe_prev != NULL) {
763 return;
764 }
765 pf_tag_unref(rule->tag);
766 pf_tag_unref(rule->match_tag);
767 pf_rtlabel_remove(&rule->src.addr);
768 pf_rtlabel_remove(&rule->dst.addr);
769 pfi_dynaddr_remove(&rule->src.addr);
770 pfi_dynaddr_remove(&rule->dst.addr);
771 if (rulequeue == NULL) {
772 pf_tbladdr_remove(&rule->src.addr);
773 pf_tbladdr_remove(&rule->dst.addr);
774 if (rule->overload_tbl) {
775 pfr_detach_table(rule->overload_tbl);
776 }
777 }
778 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
779 pf_anchor_remove(rule);
780 pf_empty_pool(&rule->rpool.list);
781 pool_put(&pf_rule_pl, rule);
782 }
783
784 static u_int16_t
785 tagname2tag(struct pf_tags *head, char *tagname)
786 {
787 struct pf_tagname *tag, *p = NULL;
788 uint16_t new_tagid = 1;
789 bool reserved_tag = false;
790
791 TAILQ_FOREACH(tag, head, entries)
792 if (strcmp(tagname, tag->name) == 0) {
793 tag->ref++;
794 return tag->tag;
795 }
796
797 /*
798 * check if it is a reserved tag.
799 */
800 _CASSERT(RESERVED_TAG_ID_MIN > DYNAMIC_TAG_ID_MAX);
801 for (int i = 0; i < NUM_RESERVED_TAGS; i++) {
802 if (strncmp(tagname, pf_reserved_tag_table[i].tag_name,
803 PF_TAG_NAME_SIZE) == 0) {
804 new_tagid = pf_reserved_tag_table[i].tag_id;
805 reserved_tag = true;
806 goto skip_dynamic_tag_alloc;
807 }
808 }
809
810 /*
811 * to avoid fragmentation, we do a linear search from the beginning
812 * and take the first free slot we find. if there is none or the list
813 * is empty, append a new entry at the end.
814 */
815
816 /* new entry */
817 if (!TAILQ_EMPTY(head)) {
818 /* skip reserved tags */
819 for (p = TAILQ_FIRST(head); p != NULL &&
820 p->tag >= RESERVED_TAG_ID_MIN;
821 p = TAILQ_NEXT(p, entries)) {
822 ;
823 }
824
825 for (; p != NULL && p->tag == new_tagid;
826 p = TAILQ_NEXT(p, entries)) {
827 new_tagid = p->tag + 1;
828 }
829 }
830
831 if (new_tagid > DYNAMIC_TAG_ID_MAX) {
832 return 0;
833 }
834
835 skip_dynamic_tag_alloc:
836 /* allocate and fill new struct pf_tagname */
837 tag = _MALLOC(sizeof(*tag), M_TEMP, M_WAITOK | M_ZERO);
838 if (tag == NULL) {
839 return 0;
840 }
841 strlcpy(tag->name, tagname, sizeof(tag->name));
842 tag->tag = new_tagid;
843 tag->ref++;
844
845 if (reserved_tag) { /* insert reserved tag at the head */
846 TAILQ_INSERT_HEAD(head, tag, entries);
847 } else if (p != NULL) { /* insert new entry before p */
848 TAILQ_INSERT_BEFORE(p, tag, entries);
849 } else { /* either list empty or no free slot in between */
850 TAILQ_INSERT_TAIL(head, tag, entries);
851 }
852
853 return tag->tag;
854 }
855
856 static void
857 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
858 {
859 struct pf_tagname *tag;
860
861 TAILQ_FOREACH(tag, head, entries)
862 if (tag->tag == tagid) {
863 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
864 return;
865 }
866 }
867
868 static void
869 tag_unref(struct pf_tags *head, u_int16_t tag)
870 {
871 struct pf_tagname *p, *next;
872
873 if (tag == 0) {
874 return;
875 }
876
877 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
878 next = TAILQ_NEXT(p, entries);
879 if (tag == p->tag) {
880 if (--p->ref == 0) {
881 TAILQ_REMOVE(head, p, entries);
882 _FREE(p, M_TEMP);
883 }
884 break;
885 }
886 }
887 }
888
889 u_int16_t
890 pf_tagname2tag(char *tagname)
891 {
892 return tagname2tag(&pf_tags, tagname);
893 }
894
895 void
896 pf_tag2tagname(u_int16_t tagid, char *p)
897 {
898 tag2tagname(&pf_tags, tagid, p);
899 }
900
901 void
902 pf_tag_ref(u_int16_t tag)
903 {
904 struct pf_tagname *t;
905
906 TAILQ_FOREACH(t, &pf_tags, entries)
907 if (t->tag == tag) {
908 break;
909 }
910 if (t != NULL) {
911 t->ref++;
912 }
913 }
914
915 void
916 pf_tag_unref(u_int16_t tag)
917 {
918 tag_unref(&pf_tags, tag);
919 }
920
921 static int
922 pf_rtlabel_add(struct pf_addr_wrap *a)
923 {
924 #pragma unused(a)
925 return 0;
926 }
927
928 static void
929 pf_rtlabel_remove(struct pf_addr_wrap *a)
930 {
931 #pragma unused(a)
932 }
933
934 static void
935 pf_rtlabel_copyout(struct pf_addr_wrap *a)
936 {
937 #pragma unused(a)
938 }
939
940 static int
941 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
942 {
943 struct pf_ruleset *rs;
944 struct pf_rule *rule;
945
946 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
947 return EINVAL;
948 }
949 rs = pf_find_or_create_ruleset(anchor);
950 if (rs == NULL) {
951 return EINVAL;
952 }
953 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
954 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
955 rs->rules[rs_num].inactive.rcount--;
956 }
957 *ticket = ++rs->rules[rs_num].inactive.ticket;
958 rs->rules[rs_num].inactive.open = 1;
959 return 0;
960 }
961
962 static int
963 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
964 {
965 struct pf_ruleset *rs;
966 struct pf_rule *rule;
967
968 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
969 return EINVAL;
970 }
971 rs = pf_find_ruleset(anchor);
972 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
973 rs->rules[rs_num].inactive.ticket != ticket) {
974 return 0;
975 }
976 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
977 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
978 rs->rules[rs_num].inactive.rcount--;
979 }
980 rs->rules[rs_num].inactive.open = 0;
981 return 0;
982 }
983
984 #define PF_MD5_UPD(st, elm) \
985 MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm))
986
987 #define PF_MD5_UPD_STR(st, elm) \
988 MD5Update(ctx, (u_int8_t *)(st)->elm, (unsigned int)strlen((st)->elm))
989
990 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
991 (stor) = htonl((st)->elm); \
992 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t)); \
993 } while (0)
994
995 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
996 (stor) = htons((st)->elm); \
997 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t)); \
998 } while (0)
999
1000 static void
1001 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr, u_int8_t proto)
1002 {
1003 PF_MD5_UPD(pfr, addr.type);
1004 switch (pfr->addr.type) {
1005 case PF_ADDR_DYNIFTL:
1006 PF_MD5_UPD(pfr, addr.v.ifname);
1007 PF_MD5_UPD(pfr, addr.iflags);
1008 break;
1009 case PF_ADDR_TABLE:
1010 PF_MD5_UPD(pfr, addr.v.tblname);
1011 break;
1012 case PF_ADDR_ADDRMASK:
1013 /* XXX ignore af? */
1014 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1015 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1016 break;
1017 case PF_ADDR_RTLABEL:
1018 PF_MD5_UPD(pfr, addr.v.rtlabelname);
1019 break;
1020 }
1021
1022 switch (proto) {
1023 case IPPROTO_TCP:
1024 case IPPROTO_UDP:
1025 PF_MD5_UPD(pfr, xport.range.port[0]);
1026 PF_MD5_UPD(pfr, xport.range.port[1]);
1027 PF_MD5_UPD(pfr, xport.range.op);
1028 break;
1029
1030 default:
1031 break;
1032 }
1033
1034 PF_MD5_UPD(pfr, neg);
1035 }
1036
1037 static void
1038 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
1039 {
1040 u_int16_t x;
1041 u_int32_t y;
1042
1043 pf_hash_rule_addr(ctx, &rule->src, rule->proto);
1044 pf_hash_rule_addr(ctx, &rule->dst, rule->proto);
1045 PF_MD5_UPD_STR(rule, label);
1046 PF_MD5_UPD_STR(rule, ifname);
1047 PF_MD5_UPD_STR(rule, match_tagname);
1048 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1049 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1050 PF_MD5_UPD_HTONL(rule, prob, y);
1051 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1052 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1053 PF_MD5_UPD(rule, uid.op);
1054 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1055 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1056 PF_MD5_UPD(rule, gid.op);
1057 PF_MD5_UPD_HTONL(rule, rule_flag, y);
1058 PF_MD5_UPD(rule, action);
1059 PF_MD5_UPD(rule, direction);
1060 PF_MD5_UPD(rule, af);
1061 PF_MD5_UPD(rule, quick);
1062 PF_MD5_UPD(rule, ifnot);
1063 PF_MD5_UPD(rule, match_tag_not);
1064 PF_MD5_UPD(rule, natpass);
1065 PF_MD5_UPD(rule, keep_state);
1066 PF_MD5_UPD(rule, proto);
1067 PF_MD5_UPD(rule, type);
1068 PF_MD5_UPD(rule, code);
1069 PF_MD5_UPD(rule, flags);
1070 PF_MD5_UPD(rule, flagset);
1071 PF_MD5_UPD(rule, allow_opts);
1072 PF_MD5_UPD(rule, rt);
1073 PF_MD5_UPD(rule, tos);
1074 }
1075
1076 static int
1077 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1078 {
1079 struct pf_ruleset *rs;
1080 struct pf_rule *rule, **old_array, *r;
1081 struct pf_rulequeue *old_rules;
1082 int error;
1083 u_int32_t old_rcount;
1084
1085 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1086
1087 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
1088 return EINVAL;
1089 }
1090 rs = pf_find_ruleset(anchor);
1091 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1092 ticket != rs->rules[rs_num].inactive.ticket) {
1093 return EBUSY;
1094 }
1095
1096 /* Calculate checksum for the main ruleset */
1097 if (rs == &pf_main_ruleset) {
1098 error = pf_setup_pfsync_matching(rs);
1099 if (error != 0) {
1100 return error;
1101 }
1102 }
1103
1104 /* Swap rules, keep the old. */
1105 old_rules = rs->rules[rs_num].active.ptr;
1106 old_rcount = rs->rules[rs_num].active.rcount;
1107 old_array = rs->rules[rs_num].active.ptr_array;
1108
1109 if (old_rcount != 0) {
1110 r = TAILQ_FIRST(rs->rules[rs_num].active.ptr);
1111 while (r) {
1112 if (r->rule_flag & PFRULE_PFM) {
1113 pffwrules--;
1114 }
1115 r = TAILQ_NEXT(r, entries);
1116 }
1117 }
1118
1119
1120 rs->rules[rs_num].active.ptr =
1121 rs->rules[rs_num].inactive.ptr;
1122 rs->rules[rs_num].active.ptr_array =
1123 rs->rules[rs_num].inactive.ptr_array;
1124 rs->rules[rs_num].active.rcount =
1125 rs->rules[rs_num].inactive.rcount;
1126 rs->rules[rs_num].inactive.ptr = old_rules;
1127 rs->rules[rs_num].inactive.ptr_array = old_array;
1128 rs->rules[rs_num].inactive.rcount = old_rcount;
1129
1130 rs->rules[rs_num].active.ticket =
1131 rs->rules[rs_num].inactive.ticket;
1132 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1133
1134
1135 /* Purge the old rule list. */
1136 while ((rule = TAILQ_FIRST(old_rules)) != NULL) {
1137 pf_rm_rule(old_rules, rule);
1138 }
1139 if (rs->rules[rs_num].inactive.ptr_array) {
1140 _FREE(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1141 }
1142 rs->rules[rs_num].inactive.ptr_array = NULL;
1143 rs->rules[rs_num].inactive.rcount = 0;
1144 rs->rules[rs_num].inactive.open = 0;
1145 pf_remove_if_empty_ruleset(rs);
1146 return 0;
1147 }
1148
1149 static void
1150 pf_rule_copyin(struct pf_rule *src, struct pf_rule *dst, struct proc *p,
1151 int minordev)
1152 {
1153 bcopy(src, dst, sizeof(struct pf_rule));
1154
1155 dst->label[sizeof(dst->label) - 1] = '\0';
1156 dst->ifname[sizeof(dst->ifname) - 1] = '\0';
1157 dst->qname[sizeof(dst->qname) - 1] = '\0';
1158 dst->pqname[sizeof(dst->pqname) - 1] = '\0';
1159 dst->tagname[sizeof(dst->tagname) - 1] = '\0';
1160 dst->match_tagname[sizeof(dst->match_tagname) - 1] = '\0';
1161 dst->overload_tblname[sizeof(dst->overload_tblname) - 1] = '\0';
1162 dst->owner[sizeof(dst->owner) - 1] = '\0';
1163
1164 dst->cuid = kauth_cred_getuid(p->p_ucred);
1165 dst->cpid = p->p_pid;
1166
1167 dst->anchor = NULL;
1168 dst->kif = NULL;
1169 dst->overload_tbl = NULL;
1170
1171 TAILQ_INIT(&dst->rpool.list);
1172 dst->rpool.cur = NULL;
1173
1174 /* initialize refcounting */
1175 dst->states = 0;
1176 dst->src_nodes = 0;
1177
1178 dst->entries.tqe_prev = NULL;
1179 dst->entries.tqe_next = NULL;
1180 if ((uint8_t)minordev == PFDEV_PFM) {
1181 dst->rule_flag |= PFRULE_PFM;
1182 }
1183 }
1184
1185 static void
1186 pf_rule_copyout(struct pf_rule *src, struct pf_rule *dst)
1187 {
1188 bcopy(src, dst, sizeof(struct pf_rule));
1189
1190 dst->anchor = NULL;
1191 dst->kif = NULL;
1192 dst->overload_tbl = NULL;
1193
1194 dst->rpool.list.tqh_first = NULL;
1195 dst->rpool.list.tqh_last = NULL;
1196 dst->rpool.cur = NULL;
1197
1198 dst->entries.tqe_prev = NULL;
1199 dst->entries.tqe_next = NULL;
1200 }
1201
1202 static void
1203 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk,
1204 struct pf_state *s)
1205 {
1206 uint64_t secs = pf_time_second();
1207 bzero(sp, sizeof(struct pfsync_state));
1208
1209 /* copy from state key */
1210 sp->lan.addr = sk->lan.addr;
1211 sp->lan.xport = sk->lan.xport;
1212 sp->gwy.addr = sk->gwy.addr;
1213 sp->gwy.xport = sk->gwy.xport;
1214 sp->ext_lan.addr = sk->ext_lan.addr;
1215 sp->ext_lan.xport = sk->ext_lan.xport;
1216 sp->ext_gwy.addr = sk->ext_gwy.addr;
1217 sp->ext_gwy.xport = sk->ext_gwy.xport;
1218 sp->proto_variant = sk->proto_variant;
1219 sp->tag = s->tag;
1220 sp->proto = sk->proto;
1221 sp->af_lan = sk->af_lan;
1222 sp->af_gwy = sk->af_gwy;
1223 sp->direction = sk->direction;
1224 sp->flowhash = sk->flowhash;
1225
1226 /* copy from state */
1227 memcpy(&sp->id, &s->id, sizeof(sp->id));
1228 sp->creatorid = s->creatorid;
1229 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname));
1230 pf_state_peer_to_pfsync(&s->src, &sp->src);
1231 pf_state_peer_to_pfsync(&s->dst, &sp->dst);
1232
1233 sp->rule = s->rule.ptr->nr;
1234 sp->nat_rule = (s->nat_rule.ptr == NULL) ?
1235 (unsigned)-1 : s->nat_rule.ptr->nr;
1236 sp->anchor = (s->anchor.ptr == NULL) ?
1237 (unsigned)-1 : s->anchor.ptr->nr;
1238
1239 pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]);
1240 pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]);
1241 pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]);
1242 pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]);
1243 sp->creation = secs - s->creation;
1244 sp->expire = pf_state_expires(s);
1245 sp->log = s->log;
1246 sp->allow_opts = s->allow_opts;
1247 sp->timeout = s->timeout;
1248
1249 if (s->src_node) {
1250 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
1251 }
1252 if (s->nat_src_node) {
1253 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
1254 }
1255
1256 if (sp->expire > secs) {
1257 sp->expire -= secs;
1258 } else {
1259 sp->expire = 0;
1260 }
1261 }
1262
1263 static void
1264 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk,
1265 struct pf_state *s)
1266 {
1267 /* copy to state key */
1268 sk->lan.addr = sp->lan.addr;
1269 sk->lan.xport = sp->lan.xport;
1270 sk->gwy.addr = sp->gwy.addr;
1271 sk->gwy.xport = sp->gwy.xport;
1272 sk->ext_lan.addr = sp->ext_lan.addr;
1273 sk->ext_lan.xport = sp->ext_lan.xport;
1274 sk->ext_gwy.addr = sp->ext_gwy.addr;
1275 sk->ext_gwy.xport = sp->ext_gwy.xport;
1276 sk->proto_variant = sp->proto_variant;
1277 s->tag = sp->tag;
1278 sk->proto = sp->proto;
1279 sk->af_lan = sp->af_lan;
1280 sk->af_gwy = sp->af_gwy;
1281 sk->direction = sp->direction;
1282 sk->flowhash = pf_calc_state_key_flowhash(sk);
1283
1284 /* copy to state */
1285 memcpy(&s->id, &sp->id, sizeof(sp->id));
1286 s->creatorid = sp->creatorid;
1287 pf_state_peer_from_pfsync(&sp->src, &s->src);
1288 pf_state_peer_from_pfsync(&sp->dst, &s->dst);
1289
1290 s->rule.ptr = &pf_default_rule;
1291 s->nat_rule.ptr = NULL;
1292 s->anchor.ptr = NULL;
1293 s->rt_kif = NULL;
1294 s->creation = pf_time_second();
1295 s->expire = pf_time_second();
1296 if (sp->expire > 0) {
1297 s->expire -= pf_default_rule.timeout[sp->timeout] - sp->expire;
1298 }
1299 s->pfsync_time = 0;
1300 s->packets[0] = s->packets[1] = 0;
1301 s->bytes[0] = s->bytes[1] = 0;
1302 }
1303
1304 static void
1305 pf_pooladdr_copyin(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1306 {
1307 bcopy(src, dst, sizeof(struct pf_pooladdr));
1308
1309 dst->entries.tqe_prev = NULL;
1310 dst->entries.tqe_next = NULL;
1311 dst->ifname[sizeof(dst->ifname) - 1] = '\0';
1312 dst->kif = NULL;
1313 }
1314
1315 static void
1316 pf_pooladdr_copyout(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1317 {
1318 bcopy(src, dst, sizeof(struct pf_pooladdr));
1319
1320 dst->entries.tqe_prev = NULL;
1321 dst->entries.tqe_next = NULL;
1322 dst->kif = NULL;
1323 }
1324
1325 static int
1326 pf_setup_pfsync_matching(struct pf_ruleset *rs)
1327 {
1328 MD5_CTX ctx;
1329 struct pf_rule *rule;
1330 int rs_cnt;
1331 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
1332
1333 MD5Init(&ctx);
1334 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1335 /* XXX PF_RULESET_SCRUB as well? */
1336 if (rs_cnt == PF_RULESET_SCRUB) {
1337 continue;
1338 }
1339
1340 if (rs->rules[rs_cnt].inactive.ptr_array) {
1341 _FREE(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1342 }
1343 rs->rules[rs_cnt].inactive.ptr_array = NULL;
1344
1345 if (rs->rules[rs_cnt].inactive.rcount) {
1346 rs->rules[rs_cnt].inactive.ptr_array =
1347 _MALLOC(sizeof(caddr_t) *
1348 rs->rules[rs_cnt].inactive.rcount,
1349 M_TEMP, M_WAITOK);
1350
1351 if (!rs->rules[rs_cnt].inactive.ptr_array) {
1352 return ENOMEM;
1353 }
1354 }
1355
1356 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1357 entries) {
1358 pf_hash_rule(&ctx, rule);
1359 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1360 }
1361 }
1362
1363 MD5Final(digest, &ctx);
1364 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
1365 return 0;
1366 }
1367
1368 static void
1369 pf_start(void)
1370 {
1371 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1372
1373 VERIFY(pf_is_enabled == 0);
1374
1375 pf_is_enabled = 1;
1376 pf_status.running = 1;
1377 pf_status.since = pf_calendar_time_second();
1378 if (pf_status.stateid == 0) {
1379 pf_status.stateid = pf_time_second();
1380 pf_status.stateid = pf_status.stateid << 32;
1381 }
1382 wakeup(pf_purge_thread_fn);
1383 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1384 }
1385
1386 static void
1387 pf_stop(void)
1388 {
1389 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1390
1391 VERIFY(pf_is_enabled);
1392
1393 pf_status.running = 0;
1394 pf_is_enabled = 0;
1395 pf_status.since = pf_calendar_time_second();
1396 wakeup(pf_purge_thread_fn);
1397 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1398 }
1399
1400 static int
1401 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1402 {
1403 #pragma unused(dev)
1404 int p64 = proc_is64bit(p);
1405 int error = 0;
1406 int minordev = minor(dev);
1407
1408 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
1409 return EPERM;
1410 }
1411
1412 /* XXX keep in sync with switch() below */
1413 if (securelevel > 1) {
1414 switch (cmd) {
1415 case DIOCGETRULES:
1416 case DIOCGETRULE:
1417 case DIOCGETADDRS:
1418 case DIOCGETADDR:
1419 case DIOCGETSTATE:
1420 case DIOCSETSTATUSIF:
1421 case DIOCGETSTATUS:
1422 case DIOCCLRSTATUS:
1423 case DIOCNATLOOK:
1424 case DIOCSETDEBUG:
1425 case DIOCGETSTATES:
1426 case DIOCINSERTRULE:
1427 case DIOCDELETERULE:
1428 case DIOCGETTIMEOUT:
1429 case DIOCCLRRULECTRS:
1430 case DIOCGETLIMIT:
1431 case DIOCGETALTQS:
1432 case DIOCGETALTQ:
1433 case DIOCGETQSTATS:
1434 case DIOCGETRULESETS:
1435 case DIOCGETRULESET:
1436 case DIOCRGETTABLES:
1437 case DIOCRGETTSTATS:
1438 case DIOCRCLRTSTATS:
1439 case DIOCRCLRADDRS:
1440 case DIOCRADDADDRS:
1441 case DIOCRDELADDRS:
1442 case DIOCRSETADDRS:
1443 case DIOCRGETADDRS:
1444 case DIOCRGETASTATS:
1445 case DIOCRCLRASTATS:
1446 case DIOCRTSTADDRS:
1447 case DIOCOSFPGET:
1448 case DIOCGETSRCNODES:
1449 case DIOCCLRSRCNODES:
1450 case DIOCIGETIFACES:
1451 case DIOCGIFSPEED:
1452 case DIOCSETIFFLAG:
1453 case DIOCCLRIFFLAG:
1454 break;
1455 case DIOCRCLRTABLES:
1456 case DIOCRADDTABLES:
1457 case DIOCRDELTABLES:
1458 case DIOCRSETTFLAGS: {
1459 int pfrio_flags;
1460
1461 bcopy(&((struct pfioc_table *)(void *)addr)->
1462 pfrio_flags, &pfrio_flags, sizeof(pfrio_flags));
1463
1464 if (pfrio_flags & PFR_FLAG_DUMMY) {
1465 break; /* dummy operation ok */
1466 }
1467 return EPERM;
1468 }
1469 default:
1470 return EPERM;
1471 }
1472 }
1473
1474 if (!(flags & FWRITE)) {
1475 switch (cmd) {
1476 case DIOCSTART:
1477 case DIOCSTARTREF:
1478 case DIOCSTOP:
1479 case DIOCSTOPREF:
1480 case DIOCGETSTARTERS:
1481 case DIOCGETRULES:
1482 case DIOCGETADDRS:
1483 case DIOCGETADDR:
1484 case DIOCGETSTATE:
1485 case DIOCGETSTATUS:
1486 case DIOCGETSTATES:
1487 case DIOCINSERTRULE:
1488 case DIOCDELETERULE:
1489 case DIOCGETTIMEOUT:
1490 case DIOCGETLIMIT:
1491 case DIOCGETALTQS:
1492 case DIOCGETALTQ:
1493 case DIOCGETQSTATS:
1494 case DIOCGETRULESETS:
1495 case DIOCGETRULESET:
1496 case DIOCNATLOOK:
1497 case DIOCRGETTABLES:
1498 case DIOCRGETTSTATS:
1499 case DIOCRGETADDRS:
1500 case DIOCRGETASTATS:
1501 case DIOCRTSTADDRS:
1502 case DIOCOSFPGET:
1503 case DIOCGETSRCNODES:
1504 case DIOCIGETIFACES:
1505 case DIOCGIFSPEED:
1506 break;
1507 case DIOCRCLRTABLES:
1508 case DIOCRADDTABLES:
1509 case DIOCRDELTABLES:
1510 case DIOCRCLRTSTATS:
1511 case DIOCRCLRADDRS:
1512 case DIOCRADDADDRS:
1513 case DIOCRDELADDRS:
1514 case DIOCRSETADDRS:
1515 case DIOCRSETTFLAGS: {
1516 int pfrio_flags;
1517
1518 bcopy(&((struct pfioc_table *)(void *)addr)->
1519 pfrio_flags, &pfrio_flags, sizeof(pfrio_flags));
1520
1521 if (pfrio_flags & PFR_FLAG_DUMMY) {
1522 flags |= FWRITE; /* need write lock for dummy */
1523 break; /* dummy operation ok */
1524 }
1525 return EACCES;
1526 }
1527 case DIOCGETRULE: {
1528 u_int32_t action;
1529
1530 bcopy(&((struct pfioc_rule *)(void *)addr)->action,
1531 &action, sizeof(action));
1532
1533 if (action == PF_GET_CLR_CNTR) {
1534 return EACCES;
1535 }
1536 break;
1537 }
1538 default:
1539 return EACCES;
1540 }
1541 }
1542
1543 if (flags & FWRITE) {
1544 lck_rw_lock_exclusive(pf_perim_lock);
1545 } else {
1546 lck_rw_lock_shared(pf_perim_lock);
1547 }
1548
1549 lck_mtx_lock(pf_lock);
1550
1551 switch (cmd) {
1552 case DIOCSTART:
1553 if (pf_status.running) {
1554 /*
1555 * Increment the reference for a simple -e enable, so
1556 * that even if other processes drop their references,
1557 * pf will still be available to processes that turned
1558 * it on without taking a reference
1559 */
1560 if (nr_tokens == pf_enabled_ref_count) {
1561 pf_enabled_ref_count++;
1562 VERIFY(pf_enabled_ref_count != 0);
1563 }
1564 error = EEXIST;
1565 } else if (pf_purge_thread == NULL) {
1566 error = ENOMEM;
1567 } else {
1568 pf_start();
1569 pf_enabled_ref_count++;
1570 VERIFY(pf_enabled_ref_count != 0);
1571 }
1572 break;
1573
1574 case DIOCSTARTREF: /* u_int64_t */
1575 if (pf_purge_thread == NULL) {
1576 error = ENOMEM;
1577 } else {
1578 u_int64_t token;
1579
1580 /* small enough to be on stack */
1581 if ((token = generate_token(p)) != 0) {
1582 if (pf_is_enabled == 0) {
1583 pf_start();
1584 }
1585 pf_enabled_ref_count++;
1586 VERIFY(pf_enabled_ref_count != 0);
1587 } else {
1588 error = ENOMEM;
1589 DPFPRINTF(PF_DEBUG_URGENT,
1590 ("pf: unable to generate token\n"));
1591 }
1592 bcopy(&token, addr, sizeof(token));
1593 }
1594 break;
1595
1596 case DIOCSTOP:
1597 if (!pf_status.running) {
1598 error = ENOENT;
1599 } else {
1600 pf_stop();
1601 pf_enabled_ref_count = 0;
1602 invalidate_all_tokens();
1603 }
1604 break;
1605
1606 case DIOCSTOPREF: /* struct pfioc_remove_token */
1607 if (!pf_status.running) {
1608 error = ENOENT;
1609 } else {
1610 struct pfioc_remove_token pfrt;
1611
1612 /* small enough to be on stack */
1613 bcopy(addr, &pfrt, sizeof(pfrt));
1614 if ((error = remove_token(&pfrt)) == 0) {
1615 VERIFY(pf_enabled_ref_count != 0);
1616 pf_enabled_ref_count--;
1617 /* return currently held references */
1618 pfrt.refcount = pf_enabled_ref_count;
1619 DPFPRINTF(PF_DEBUG_MISC,
1620 ("pf: enabled refcount decremented\n"));
1621 } else {
1622 error = EINVAL;
1623 DPFPRINTF(PF_DEBUG_URGENT,
1624 ("pf: token mismatch\n"));
1625 }
1626 bcopy(&pfrt, addr, sizeof(pfrt));
1627
1628 if (error == 0 && pf_enabled_ref_count == 0) {
1629 pf_stop();
1630 }
1631 }
1632 break;
1633
1634 case DIOCGETSTARTERS: { /* struct pfioc_tokens */
1635 PFIOCX_STRUCT_DECL(pfioc_tokens);
1636
1637 PFIOCX_STRUCT_BEGIN(addr, pfioc_tokens, error = ENOMEM; break; );
1638 error = pfioctl_ioc_tokens(cmd,
1639 PFIOCX_STRUCT_ADDR32(pfioc_tokens),
1640 PFIOCX_STRUCT_ADDR64(pfioc_tokens), p);
1641 PFIOCX_STRUCT_END(pfioc_tokens, addr);
1642 break;
1643 }
1644
1645 case DIOCADDRULE: /* struct pfioc_rule */
1646 case DIOCGETRULES: /* struct pfioc_rule */
1647 case DIOCGETRULE: /* struct pfioc_rule */
1648 case DIOCCHANGERULE: /* struct pfioc_rule */
1649 case DIOCINSERTRULE: /* struct pfioc_rule */
1650 case DIOCDELETERULE: { /* struct pfioc_rule */
1651 struct pfioc_rule *pr = NULL;
1652
1653 PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break; );
1654 error = pfioctl_ioc_rule(cmd, minordev, pr, p);
1655 PFIOC_STRUCT_END(pr, addr);
1656 break;
1657 }
1658
1659 case DIOCCLRSTATES: /* struct pfioc_state_kill */
1660 case DIOCKILLSTATES: { /* struct pfioc_state_kill */
1661 struct pfioc_state_kill *psk = NULL;
1662
1663 PFIOC_STRUCT_BEGIN(addr, psk, error = ENOMEM; break; );
1664 error = pfioctl_ioc_state_kill(cmd, psk, p);
1665 PFIOC_STRUCT_END(psk, addr);
1666 break;
1667 }
1668
1669 case DIOCADDSTATE: /* struct pfioc_state */
1670 case DIOCGETSTATE: { /* struct pfioc_state */
1671 struct pfioc_state *ps = NULL;
1672
1673 PFIOC_STRUCT_BEGIN(addr, ps, error = ENOMEM; break; );
1674 error = pfioctl_ioc_state(cmd, ps, p);
1675 PFIOC_STRUCT_END(ps, addr);
1676 break;
1677 }
1678
1679 case DIOCGETSTATES: { /* struct pfioc_states */
1680 PFIOCX_STRUCT_DECL(pfioc_states);
1681
1682 PFIOCX_STRUCT_BEGIN(addr, pfioc_states, error = ENOMEM; break; );
1683 error = pfioctl_ioc_states(cmd,
1684 PFIOCX_STRUCT_ADDR32(pfioc_states),
1685 PFIOCX_STRUCT_ADDR64(pfioc_states), p);
1686 PFIOCX_STRUCT_END(pfioc_states, addr);
1687 break;
1688 }
1689
1690 case DIOCGETSTATUS: { /* struct pf_status */
1691 struct pf_status *s = NULL;
1692
1693 PFIOC_STRUCT_BEGIN(&pf_status, s, error = ENOMEM; break; );
1694 pfi_update_status(s->ifname, s);
1695 PFIOC_STRUCT_END(s, addr);
1696 break;
1697 }
1698
1699 case DIOCSETSTATUSIF: { /* struct pfioc_if */
1700 struct pfioc_if *pi = (struct pfioc_if *)(void *)addr;
1701
1702 /* OK for unaligned accesses */
1703 if (pi->ifname[0] == 0) {
1704 bzero(pf_status.ifname, IFNAMSIZ);
1705 break;
1706 }
1707 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1708 break;
1709 }
1710
1711 case DIOCCLRSTATUS: {
1712 bzero(pf_status.counters, sizeof(pf_status.counters));
1713 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1714 bzero(pf_status.scounters, sizeof(pf_status.scounters));
1715 pf_status.since = pf_calendar_time_second();
1716 if (*pf_status.ifname) {
1717 pfi_update_status(pf_status.ifname, NULL);
1718 }
1719 break;
1720 }
1721
1722 case DIOCNATLOOK: { /* struct pfioc_natlook */
1723 struct pfioc_natlook *pnl = NULL;
1724
1725 PFIOC_STRUCT_BEGIN(addr, pnl, error = ENOMEM; break; );
1726 error = pfioctl_ioc_natlook(cmd, pnl, p);
1727 PFIOC_STRUCT_END(pnl, addr);
1728 break;
1729 }
1730
1731 case DIOCSETTIMEOUT: /* struct pfioc_tm */
1732 case DIOCGETTIMEOUT: { /* struct pfioc_tm */
1733 struct pfioc_tm pt;
1734
1735 /* small enough to be on stack */
1736 bcopy(addr, &pt, sizeof(pt));
1737 error = pfioctl_ioc_tm(cmd, &pt, p);
1738 bcopy(&pt, addr, sizeof(pt));
1739 break;
1740 }
1741
1742 case DIOCGETLIMIT: /* struct pfioc_limit */
1743 case DIOCSETLIMIT: { /* struct pfioc_limit */
1744 struct pfioc_limit pl;
1745
1746 /* small enough to be on stack */
1747 bcopy(addr, &pl, sizeof(pl));
1748 error = pfioctl_ioc_limit(cmd, &pl, p);
1749 bcopy(&pl, addr, sizeof(pl));
1750 break;
1751 }
1752
1753 case DIOCSETDEBUG: { /* u_int32_t */
1754 bcopy(addr, &pf_status.debug, sizeof(u_int32_t));
1755 break;
1756 }
1757
1758 case DIOCCLRRULECTRS: {
1759 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1760 struct pf_ruleset *ruleset = &pf_main_ruleset;
1761 struct pf_rule *rule;
1762
1763 TAILQ_FOREACH(rule,
1764 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1765 rule->evaluations = 0;
1766 rule->packets[0] = rule->packets[1] = 0;
1767 rule->bytes[0] = rule->bytes[1] = 0;
1768 }
1769 break;
1770 }
1771
1772 case DIOCGIFSPEED: {
1773 struct pf_ifspeed *psp = (struct pf_ifspeed *)(void *)addr;
1774 struct pf_ifspeed ps;
1775 struct ifnet *ifp;
1776 u_int64_t baudrate;
1777
1778 if (psp->ifname[0] != '\0') {
1779 /* Can we completely trust user-land? */
1780 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
1781 ps.ifname[IFNAMSIZ - 1] = '\0';
1782 ifp = ifunit(ps.ifname);
1783 if (ifp != NULL) {
1784 baudrate = ifp->if_output_bw.max_bw;
1785 bcopy(&baudrate, &psp->baudrate,
1786 sizeof(baudrate));
1787 } else {
1788 error = EINVAL;
1789 }
1790 } else {
1791 error = EINVAL;
1792 }
1793 break;
1794 }
1795
1796 case DIOCBEGINADDRS: /* struct pfioc_pooladdr */
1797 case DIOCADDADDR: /* struct pfioc_pooladdr */
1798 case DIOCGETADDRS: /* struct pfioc_pooladdr */
1799 case DIOCGETADDR: /* struct pfioc_pooladdr */
1800 case DIOCCHANGEADDR: { /* struct pfioc_pooladdr */
1801 struct pfioc_pooladdr *pp = NULL;
1802
1803 PFIOC_STRUCT_BEGIN(addr, pp, error = ENOMEM; break; )
1804 error = pfioctl_ioc_pooladdr(cmd, pp, p);
1805 PFIOC_STRUCT_END(pp, addr);
1806 break;
1807 }
1808
1809 case DIOCGETRULESETS: /* struct pfioc_ruleset */
1810 case DIOCGETRULESET: { /* struct pfioc_ruleset */
1811 struct pfioc_ruleset *pr = NULL;
1812
1813 PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break; );
1814 error = pfioctl_ioc_ruleset(cmd, pr, p);
1815 PFIOC_STRUCT_END(pr, addr);
1816 break;
1817 }
1818
1819 case DIOCRCLRTABLES: /* struct pfioc_table */
1820 case DIOCRADDTABLES: /* struct pfioc_table */
1821 case DIOCRDELTABLES: /* struct pfioc_table */
1822 case DIOCRGETTABLES: /* struct pfioc_table */
1823 case DIOCRGETTSTATS: /* struct pfioc_table */
1824 case DIOCRCLRTSTATS: /* struct pfioc_table */
1825 case DIOCRSETTFLAGS: /* struct pfioc_table */
1826 case DIOCRCLRADDRS: /* struct pfioc_table */
1827 case DIOCRADDADDRS: /* struct pfioc_table */
1828 case DIOCRDELADDRS: /* struct pfioc_table */
1829 case DIOCRSETADDRS: /* struct pfioc_table */
1830 case DIOCRGETADDRS: /* struct pfioc_table */
1831 case DIOCRGETASTATS: /* struct pfioc_table */
1832 case DIOCRCLRASTATS: /* struct pfioc_table */
1833 case DIOCRTSTADDRS: /* struct pfioc_table */
1834 case DIOCRINADEFINE: { /* struct pfioc_table */
1835 PFIOCX_STRUCT_DECL(pfioc_table);
1836
1837 PFIOCX_STRUCT_BEGIN(addr, pfioc_table, error = ENOMEM; break; );
1838 error = pfioctl_ioc_table(cmd,
1839 PFIOCX_STRUCT_ADDR32(pfioc_table),
1840 PFIOCX_STRUCT_ADDR64(pfioc_table), p);
1841 PFIOCX_STRUCT_END(pfioc_table, addr);
1842 break;
1843 }
1844
1845 case DIOCOSFPADD: /* struct pf_osfp_ioctl */
1846 case DIOCOSFPGET: { /* struct pf_osfp_ioctl */
1847 struct pf_osfp_ioctl *io = NULL;
1848
1849 PFIOC_STRUCT_BEGIN(addr, io, error = ENOMEM; break; );
1850 if (cmd == DIOCOSFPADD) {
1851 error = pf_osfp_add(io);
1852 } else {
1853 VERIFY(cmd == DIOCOSFPGET);
1854 error = pf_osfp_get(io);
1855 }
1856 PFIOC_STRUCT_END(io, addr);
1857 break;
1858 }
1859
1860 case DIOCXBEGIN: /* struct pfioc_trans */
1861 case DIOCXROLLBACK: /* struct pfioc_trans */
1862 case DIOCXCOMMIT: { /* struct pfioc_trans */
1863 PFIOCX_STRUCT_DECL(pfioc_trans);
1864
1865 PFIOCX_STRUCT_BEGIN(addr, pfioc_trans, error = ENOMEM; break; );
1866 error = pfioctl_ioc_trans(cmd,
1867 PFIOCX_STRUCT_ADDR32(pfioc_trans),
1868 PFIOCX_STRUCT_ADDR64(pfioc_trans), p);
1869 PFIOCX_STRUCT_END(pfioc_trans, addr);
1870 break;
1871 }
1872
1873 case DIOCGETSRCNODES: { /* struct pfioc_src_nodes */
1874 PFIOCX_STRUCT_DECL(pfioc_src_nodes);
1875
1876 PFIOCX_STRUCT_BEGIN(addr, pfioc_src_nodes,
1877 error = ENOMEM; break; );
1878 error = pfioctl_ioc_src_nodes(cmd,
1879 PFIOCX_STRUCT_ADDR32(pfioc_src_nodes),
1880 PFIOCX_STRUCT_ADDR64(pfioc_src_nodes), p);
1881 PFIOCX_STRUCT_END(pfioc_src_nodes, addr);
1882 break;
1883 }
1884
1885 case DIOCCLRSRCNODES: {
1886 struct pf_src_node *n;
1887 struct pf_state *state;
1888
1889 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1890 state->src_node = NULL;
1891 state->nat_src_node = NULL;
1892 }
1893 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
1894 n->expire = 1;
1895 n->states = 0;
1896 }
1897 pf_purge_expired_src_nodes();
1898 pf_status.src_nodes = 0;
1899 break;
1900 }
1901
1902 case DIOCKILLSRCNODES: { /* struct pfioc_src_node_kill */
1903 struct pfioc_src_node_kill *psnk = NULL;
1904
1905 PFIOC_STRUCT_BEGIN(addr, psnk, error = ENOMEM; break; );
1906 error = pfioctl_ioc_src_node_kill(cmd, psnk, p);
1907 PFIOC_STRUCT_END(psnk, addr);
1908 break;
1909 }
1910
1911 case DIOCSETHOSTID: { /* u_int32_t */
1912 u_int32_t hid;
1913
1914 /* small enough to be on stack */
1915 bcopy(addr, &hid, sizeof(hid));
1916 if (hid == 0) {
1917 pf_status.hostid = random();
1918 } else {
1919 pf_status.hostid = hid;
1920 }
1921 break;
1922 }
1923
1924 case DIOCOSFPFLUSH:
1925 pf_osfp_flush();
1926 break;
1927
1928 case DIOCIGETIFACES: /* struct pfioc_iface */
1929 case DIOCSETIFFLAG: /* struct pfioc_iface */
1930 case DIOCCLRIFFLAG: { /* struct pfioc_iface */
1931 PFIOCX_STRUCT_DECL(pfioc_iface);
1932
1933 PFIOCX_STRUCT_BEGIN(addr, pfioc_iface, error = ENOMEM; break; );
1934 error = pfioctl_ioc_iface(cmd,
1935 PFIOCX_STRUCT_ADDR32(pfioc_iface),
1936 PFIOCX_STRUCT_ADDR64(pfioc_iface), p);
1937 PFIOCX_STRUCT_END(pfioc_iface, addr);
1938 break;
1939 }
1940
1941 default:
1942 error = ENODEV;
1943 break;
1944 }
1945
1946 lck_mtx_unlock(pf_lock);
1947 lck_rw_done(pf_perim_lock);
1948
1949 return error;
1950 }
1951
1952 static int
1953 pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32,
1954 struct pfioc_table_64 *io64, struct proc *p)
1955 {
1956 int p64 = proc_is64bit(p);
1957 int error = 0;
1958
1959 if (!p64) {
1960 goto struct32;
1961 }
1962
1963 #ifdef __LP64__
1964 /*
1965 * 64-bit structure processing
1966 */
1967 switch (cmd) {
1968 case DIOCRCLRTABLES:
1969 if (io64->pfrio_esize != 0) {
1970 error = ENODEV;
1971 break;
1972 }
1973 pfr_table_copyin_cleanup(&io64->pfrio_table);
1974 error = pfr_clr_tables(&io64->pfrio_table, &io64->pfrio_ndel,
1975 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1976 break;
1977
1978 case DIOCRADDTABLES:
1979 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1980 error = ENODEV;
1981 break;
1982 }
1983 error = pfr_add_tables(io64->pfrio_buffer, io64->pfrio_size,
1984 &io64->pfrio_nadd, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1985 break;
1986
1987 case DIOCRDELTABLES:
1988 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1989 error = ENODEV;
1990 break;
1991 }
1992 error = pfr_del_tables(io64->pfrio_buffer, io64->pfrio_size,
1993 &io64->pfrio_ndel, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1994 break;
1995
1996 case DIOCRGETTABLES:
1997 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1998 error = ENODEV;
1999 break;
2000 }
2001 pfr_table_copyin_cleanup(&io64->pfrio_table);
2002 error = pfr_get_tables(&io64->pfrio_table, io64->pfrio_buffer,
2003 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2004 break;
2005
2006 case DIOCRGETTSTATS:
2007 if (io64->pfrio_esize != sizeof(struct pfr_tstats)) {
2008 error = ENODEV;
2009 break;
2010 }
2011 pfr_table_copyin_cleanup(&io64->pfrio_table);
2012 error = pfr_get_tstats(&io64->pfrio_table, io64->pfrio_buffer,
2013 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2014 break;
2015
2016 case DIOCRCLRTSTATS:
2017 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
2018 error = ENODEV;
2019 break;
2020 }
2021 error = pfr_clr_tstats(io64->pfrio_buffer, io64->pfrio_size,
2022 &io64->pfrio_nzero, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2023 break;
2024
2025 case DIOCRSETTFLAGS:
2026 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
2027 error = ENODEV;
2028 break;
2029 }
2030 error = pfr_set_tflags(io64->pfrio_buffer, io64->pfrio_size,
2031 io64->pfrio_setflag, io64->pfrio_clrflag,
2032 &io64->pfrio_nchange, &io64->pfrio_ndel,
2033 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2034 break;
2035
2036 case DIOCRCLRADDRS:
2037 if (io64->pfrio_esize != 0) {
2038 error = ENODEV;
2039 break;
2040 }
2041 pfr_table_copyin_cleanup(&io64->pfrio_table);
2042 error = pfr_clr_addrs(&io64->pfrio_table, &io64->pfrio_ndel,
2043 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2044 break;
2045
2046 case DIOCRADDADDRS:
2047 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2048 error = ENODEV;
2049 break;
2050 }
2051 pfr_table_copyin_cleanup(&io64->pfrio_table);
2052 error = pfr_add_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2053 io64->pfrio_size, &io64->pfrio_nadd, io64->pfrio_flags |
2054 PFR_FLAG_USERIOCTL);
2055 break;
2056
2057 case DIOCRDELADDRS:
2058 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2059 error = ENODEV;
2060 break;
2061 }
2062 pfr_table_copyin_cleanup(&io64->pfrio_table);
2063 error = pfr_del_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2064 io64->pfrio_size, &io64->pfrio_ndel, io64->pfrio_flags |
2065 PFR_FLAG_USERIOCTL);
2066 break;
2067
2068 case DIOCRSETADDRS:
2069 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2070 error = ENODEV;
2071 break;
2072 }
2073 pfr_table_copyin_cleanup(&io64->pfrio_table);
2074 error = pfr_set_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2075 io64->pfrio_size, &io64->pfrio_size2, &io64->pfrio_nadd,
2076 &io64->pfrio_ndel, &io64->pfrio_nchange, io64->pfrio_flags |
2077 PFR_FLAG_USERIOCTL, 0);
2078 break;
2079
2080 case DIOCRGETADDRS:
2081 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2082 error = ENODEV;
2083 break;
2084 }
2085 pfr_table_copyin_cleanup(&io64->pfrio_table);
2086 error = pfr_get_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2087 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2088 break;
2089
2090 case DIOCRGETASTATS:
2091 if (io64->pfrio_esize != sizeof(struct pfr_astats)) {
2092 error = ENODEV;
2093 break;
2094 }
2095 pfr_table_copyin_cleanup(&io64->pfrio_table);
2096 error = pfr_get_astats(&io64->pfrio_table, io64->pfrio_buffer,
2097 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2098 break;
2099
2100 case DIOCRCLRASTATS:
2101 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2102 error = ENODEV;
2103 break;
2104 }
2105 pfr_table_copyin_cleanup(&io64->pfrio_table);
2106 error = pfr_clr_astats(&io64->pfrio_table, io64->pfrio_buffer,
2107 io64->pfrio_size, &io64->pfrio_nzero, io64->pfrio_flags |
2108 PFR_FLAG_USERIOCTL);
2109 break;
2110
2111 case DIOCRTSTADDRS:
2112 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2113 error = ENODEV;
2114 break;
2115 }
2116 pfr_table_copyin_cleanup(&io64->pfrio_table);
2117 error = pfr_tst_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2118 io64->pfrio_size, &io64->pfrio_nmatch, io64->pfrio_flags |
2119 PFR_FLAG_USERIOCTL);
2120 break;
2121
2122 case DIOCRINADEFINE:
2123 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2124 error = ENODEV;
2125 break;
2126 }
2127 pfr_table_copyin_cleanup(&io64->pfrio_table);
2128 error = pfr_ina_define(&io64->pfrio_table, io64->pfrio_buffer,
2129 io64->pfrio_size, &io64->pfrio_nadd, &io64->pfrio_naddr,
2130 io64->pfrio_ticket, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2131 break;
2132
2133 default:
2134 VERIFY(0);
2135 /* NOTREACHED */
2136 }
2137 goto done;
2138 #else
2139 #pragma unused(io64)
2140 #endif /* __LP64__ */
2141
2142 struct32:
2143 /*
2144 * 32-bit structure processing
2145 */
2146 switch (cmd) {
2147 case DIOCRCLRTABLES:
2148 if (io32->pfrio_esize != 0) {
2149 error = ENODEV;
2150 break;
2151 }
2152 pfr_table_copyin_cleanup(&io32->pfrio_table);
2153 error = pfr_clr_tables(&io32->pfrio_table, &io32->pfrio_ndel,
2154 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2155 break;
2156
2157 case DIOCRADDTABLES:
2158 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2159 error = ENODEV;
2160 break;
2161 }
2162 error = pfr_add_tables(io32->pfrio_buffer, io32->pfrio_size,
2163 &io32->pfrio_nadd, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2164 break;
2165
2166 case DIOCRDELTABLES:
2167 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2168 error = ENODEV;
2169 break;
2170 }
2171 error = pfr_del_tables(io32->pfrio_buffer, io32->pfrio_size,
2172 &io32->pfrio_ndel, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2173 break;
2174
2175 case DIOCRGETTABLES:
2176 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2177 error = ENODEV;
2178 break;
2179 }
2180 pfr_table_copyin_cleanup(&io32->pfrio_table);
2181 error = pfr_get_tables(&io32->pfrio_table, io32->pfrio_buffer,
2182 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2183 break;
2184
2185 case DIOCRGETTSTATS:
2186 if (io32->pfrio_esize != sizeof(struct pfr_tstats)) {
2187 error = ENODEV;
2188 break;
2189 }
2190 pfr_table_copyin_cleanup(&io32->pfrio_table);
2191 error = pfr_get_tstats(&io32->pfrio_table, io32->pfrio_buffer,
2192 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2193 break;
2194
2195 case DIOCRCLRTSTATS:
2196 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2197 error = ENODEV;
2198 break;
2199 }
2200 error = pfr_clr_tstats(io32->pfrio_buffer, io32->pfrio_size,
2201 &io32->pfrio_nzero, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2202 break;
2203
2204 case DIOCRSETTFLAGS:
2205 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2206 error = ENODEV;
2207 break;
2208 }
2209 error = pfr_set_tflags(io32->pfrio_buffer, io32->pfrio_size,
2210 io32->pfrio_setflag, io32->pfrio_clrflag,
2211 &io32->pfrio_nchange, &io32->pfrio_ndel,
2212 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2213 break;
2214
2215 case DIOCRCLRADDRS:
2216 if (io32->pfrio_esize != 0) {
2217 error = ENODEV;
2218 break;
2219 }
2220 pfr_table_copyin_cleanup(&io32->pfrio_table);
2221 error = pfr_clr_addrs(&io32->pfrio_table, &io32->pfrio_ndel,
2222 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2223 break;
2224
2225 case DIOCRADDADDRS:
2226 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2227 error = ENODEV;
2228 break;
2229 }
2230 pfr_table_copyin_cleanup(&io32->pfrio_table);
2231 error = pfr_add_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2232 io32->pfrio_size, &io32->pfrio_nadd, io32->pfrio_flags |
2233 PFR_FLAG_USERIOCTL);
2234 break;
2235
2236 case DIOCRDELADDRS:
2237 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2238 error = ENODEV;
2239 break;
2240 }
2241 pfr_table_copyin_cleanup(&io32->pfrio_table);
2242 error = pfr_del_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2243 io32->pfrio_size, &io32->pfrio_ndel, io32->pfrio_flags |
2244 PFR_FLAG_USERIOCTL);
2245 break;
2246
2247 case DIOCRSETADDRS:
2248 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2249 error = ENODEV;
2250 break;
2251 }
2252 pfr_table_copyin_cleanup(&io32->pfrio_table);
2253 error = pfr_set_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2254 io32->pfrio_size, &io32->pfrio_size2, &io32->pfrio_nadd,
2255 &io32->pfrio_ndel, &io32->pfrio_nchange, io32->pfrio_flags |
2256 PFR_FLAG_USERIOCTL, 0);
2257 break;
2258
2259 case DIOCRGETADDRS:
2260 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2261 error = ENODEV;
2262 break;
2263 }
2264 pfr_table_copyin_cleanup(&io32->pfrio_table);
2265 error = pfr_get_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2266 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2267 break;
2268
2269 case DIOCRGETASTATS:
2270 if (io32->pfrio_esize != sizeof(struct pfr_astats)) {
2271 error = ENODEV;
2272 break;
2273 }
2274 pfr_table_copyin_cleanup(&io32->pfrio_table);
2275 error = pfr_get_astats(&io32->pfrio_table, io32->pfrio_buffer,
2276 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2277 break;
2278
2279 case DIOCRCLRASTATS:
2280 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2281 error = ENODEV;
2282 break;
2283 }
2284 pfr_table_copyin_cleanup(&io32->pfrio_table);
2285 error = pfr_clr_astats(&io32->pfrio_table, io32->pfrio_buffer,
2286 io32->pfrio_size, &io32->pfrio_nzero, io32->pfrio_flags |
2287 PFR_FLAG_USERIOCTL);
2288 break;
2289
2290 case DIOCRTSTADDRS:
2291 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2292 error = ENODEV;
2293 break;
2294 }
2295 pfr_table_copyin_cleanup(&io32->pfrio_table);
2296 error = pfr_tst_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2297 io32->pfrio_size, &io32->pfrio_nmatch, io32->pfrio_flags |
2298 PFR_FLAG_USERIOCTL);
2299 break;
2300
2301 case DIOCRINADEFINE:
2302 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2303 error = ENODEV;
2304 break;
2305 }
2306 pfr_table_copyin_cleanup(&io32->pfrio_table);
2307 error = pfr_ina_define(&io32->pfrio_table, io32->pfrio_buffer,
2308 io32->pfrio_size, &io32->pfrio_nadd, &io32->pfrio_naddr,
2309 io32->pfrio_ticket, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2310 break;
2311
2312 default:
2313 VERIFY(0);
2314 /* NOTREACHED */
2315 }
2316 #ifdef __LP64__
2317 done:
2318 #endif
2319 return error;
2320 }
2321
2322 static int
2323 pfioctl_ioc_tokens(u_long cmd, struct pfioc_tokens_32 *tok32,
2324 struct pfioc_tokens_64 *tok64, struct proc *p)
2325 {
2326 struct pfioc_token *tokens;
2327 struct pfioc_kernel_token *entry, *tmp;
2328 user_addr_t token_buf;
2329 int ocnt, cnt, error = 0, p64 = proc_is64bit(p);
2330 char *ptr;
2331
2332 switch (cmd) {
2333 case DIOCGETSTARTERS: {
2334 int size;
2335
2336 if (nr_tokens == 0) {
2337 error = ENOENT;
2338 break;
2339 }
2340
2341 size = sizeof(struct pfioc_token) * nr_tokens;
2342 if (size / nr_tokens != sizeof(struct pfioc_token)) {
2343 os_log_error(OS_LOG_DEFAULT, "%s: size overflows", __func__);
2344 error = ERANGE;
2345 break;
2346 }
2347 ocnt = cnt = (p64 ? tok64->size : tok32->size);
2348 if (cnt == 0) {
2349 if (p64) {
2350 tok64->size = size;
2351 } else {
2352 tok32->size = size;
2353 }
2354 break;
2355 }
2356
2357 #ifdef __LP64__
2358 token_buf = (p64 ? tok64->pgt_buf : tok32->pgt_buf);
2359 #else
2360 token_buf = tok32->pgt_buf;
2361 #endif
2362 tokens = _MALLOC(size, M_TEMP, M_WAITOK | M_ZERO);
2363 if (tokens == NULL) {
2364 error = ENOMEM;
2365 break;
2366 }
2367
2368 ptr = (void *)tokens;
2369 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
2370 struct pfioc_token *t;
2371
2372 if ((unsigned)cnt < sizeof(*tokens)) {
2373 break; /* no more buffer space left */
2374 }
2375 t = (struct pfioc_token *)(void *)ptr;
2376 t->token_value = entry->token.token_value;
2377 t->timestamp = entry->token.timestamp;
2378 t->pid = entry->token.pid;
2379 bcopy(entry->token.proc_name, t->proc_name,
2380 PFTOK_PROCNAME_LEN);
2381 ptr += sizeof(struct pfioc_token);
2382
2383 cnt -= sizeof(struct pfioc_token);
2384 }
2385
2386 if (cnt < ocnt) {
2387 error = copyout(tokens, token_buf, ocnt - cnt);
2388 }
2389
2390 if (p64) {
2391 tok64->size = ocnt - cnt;
2392 } else {
2393 tok32->size = ocnt - cnt;
2394 }
2395
2396 _FREE(tokens, M_TEMP);
2397 break;
2398 }
2399
2400 default:
2401 VERIFY(0);
2402 /* NOTREACHED */
2403 }
2404
2405 return error;
2406 }
2407
2408 static void
2409 pf_expire_states_and_src_nodes(struct pf_rule *rule)
2410 {
2411 struct pf_state *state;
2412 struct pf_src_node *sn;
2413 int killed = 0;
2414
2415 /* expire the states */
2416 state = TAILQ_FIRST(&state_list);
2417 while (state) {
2418 if (state->rule.ptr == rule) {
2419 state->timeout = PFTM_PURGE;
2420 }
2421 state = TAILQ_NEXT(state, entry_list);
2422 }
2423 pf_purge_expired_states(pf_status.states);
2424
2425 /* expire the src_nodes */
2426 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2427 if (sn->rule.ptr != rule) {
2428 continue;
2429 }
2430 if (sn->states != 0) {
2431 RB_FOREACH(state, pf_state_tree_id,
2432 &tree_id) {
2433 if (state->src_node == sn) {
2434 state->src_node = NULL;
2435 }
2436 if (state->nat_src_node == sn) {
2437 state->nat_src_node = NULL;
2438 }
2439 }
2440 sn->states = 0;
2441 }
2442 sn->expire = 1;
2443 killed++;
2444 }
2445 if (killed) {
2446 pf_purge_expired_src_nodes();
2447 }
2448 }
2449
2450 static void
2451 pf_delete_rule_from_ruleset(struct pf_ruleset *ruleset, int rs_num,
2452 struct pf_rule *rule)
2453 {
2454 struct pf_rule *r;
2455 int nr = 0;
2456
2457 pf_expire_states_and_src_nodes(rule);
2458
2459 pf_rm_rule(ruleset->rules[rs_num].active.ptr, rule);
2460 if (ruleset->rules[rs_num].active.rcount-- == 0) {
2461 panic("%s: rcount value broken!", __func__);
2462 }
2463 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2464
2465 while (r) {
2466 r->nr = nr++;
2467 r = TAILQ_NEXT(r, entries);
2468 }
2469 }
2470
2471
2472 static void
2473 pf_ruleset_cleanup(struct pf_ruleset *ruleset, int rs)
2474 {
2475 pf_calc_skip_steps(ruleset->rules[rs].active.ptr);
2476 ruleset->rules[rs].active.ticket =
2477 ++ruleset->rules[rs].inactive.ticket;
2478 }
2479
2480 /*
2481 * req_dev encodes the PF interface. Currently, possible values are
2482 * 0 or PFRULE_PFM
2483 */
2484 static int
2485 pf_delete_rule_by_ticket(struct pfioc_rule *pr, u_int32_t req_dev)
2486 {
2487 struct pf_ruleset *ruleset;
2488 struct pf_rule *rule = NULL;
2489 int is_anchor;
2490 int error;
2491 int i;
2492
2493 is_anchor = (pr->anchor_call[0] != '\0');
2494 if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
2495 pr->rule.owner, is_anchor, &error)) == NULL) {
2496 return error;
2497 }
2498
2499 for (i = 0; i < PF_RULESET_MAX && rule == NULL; i++) {
2500 rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2501 while (rule && (rule->ticket != pr->rule.ticket)) {
2502 rule = TAILQ_NEXT(rule, entries);
2503 }
2504 }
2505 if (rule == NULL) {
2506 return ENOENT;
2507 } else {
2508 i--;
2509 }
2510
2511 if (strcmp(rule->owner, pr->rule.owner)) {
2512 return EACCES;
2513 }
2514
2515 delete_rule:
2516 if (rule->anchor && (ruleset != &pf_main_ruleset) &&
2517 ((strcmp(ruleset->anchor->owner, "")) == 0) &&
2518 ((ruleset->rules[i].active.rcount - 1) == 0)) {
2519 /* set rule & ruleset to parent and repeat */
2520 struct pf_rule *delete_rule = rule;
2521 struct pf_ruleset *delete_ruleset = ruleset;
2522
2523 #define parent_ruleset ruleset->anchor->parent->ruleset
2524 if (ruleset->anchor->parent == NULL) {
2525 ruleset = &pf_main_ruleset;
2526 } else {
2527 ruleset = &parent_ruleset;
2528 }
2529
2530 rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2531 while (rule &&
2532 (rule->anchor != delete_ruleset->anchor)) {
2533 rule = TAILQ_NEXT(rule, entries);
2534 }
2535 if (rule == NULL) {
2536 panic("%s: rule not found!", __func__);
2537 }
2538
2539 /*
2540 * if reqest device != rule's device, bail :
2541 * with error if ticket matches;
2542 * without error if ticket doesn't match (i.e. its just cleanup)
2543 */
2544 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2545 if (rule->ticket != pr->rule.ticket) {
2546 return 0;
2547 } else {
2548 return EACCES;
2549 }
2550 }
2551
2552 if (delete_rule->rule_flag & PFRULE_PFM) {
2553 pffwrules--;
2554 }
2555
2556 pf_delete_rule_from_ruleset(delete_ruleset,
2557 i, delete_rule);
2558 delete_ruleset->rules[i].active.ticket =
2559 ++delete_ruleset->rules[i].inactive.ticket;
2560 goto delete_rule;
2561 } else {
2562 /*
2563 * process deleting rule only if device that added the
2564 * rule matches device that issued the request
2565 */
2566 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2567 return EACCES;
2568 }
2569 if (rule->rule_flag & PFRULE_PFM) {
2570 pffwrules--;
2571 }
2572 pf_delete_rule_from_ruleset(ruleset, i,
2573 rule);
2574 pf_ruleset_cleanup(ruleset, i);
2575 }
2576
2577 return 0;
2578 }
2579
2580 /*
2581 * req_dev encodes the PF interface. Currently, possible values are
2582 * 0 or PFRULE_PFM
2583 */
2584 static void
2585 pf_delete_rule_by_owner(char *owner, u_int32_t req_dev)
2586 {
2587 struct pf_ruleset *ruleset;
2588 struct pf_rule *rule, *next;
2589 int deleted = 0;
2590
2591 for (int rs = 0; rs < PF_RULESET_MAX; rs++) {
2592 rule = TAILQ_FIRST(pf_main_ruleset.rules[rs].active.ptr);
2593 ruleset = &pf_main_ruleset;
2594 while (rule) {
2595 next = TAILQ_NEXT(rule, entries);
2596 /*
2597 * process deleting rule only if device that added the
2598 * rule matches device that issued the request
2599 */
2600 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2601 rule = next;
2602 continue;
2603 }
2604 if (rule->anchor) {
2605 if (((strcmp(rule->owner, owner)) == 0) ||
2606 ((strcmp(rule->owner, "")) == 0)) {
2607 if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2608 if (deleted) {
2609 pf_ruleset_cleanup(ruleset, rs);
2610 deleted = 0;
2611 }
2612 /* step into anchor */
2613 ruleset =
2614 &rule->anchor->ruleset;
2615 rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2616 continue;
2617 } else {
2618 if (rule->rule_flag &
2619 PFRULE_PFM) {
2620 pffwrules--;
2621 }
2622 pf_delete_rule_from_ruleset(ruleset, rs, rule);
2623 deleted = 1;
2624 rule = next;
2625 }
2626 } else {
2627 rule = next;
2628 }
2629 } else {
2630 if (((strcmp(rule->owner, owner)) == 0)) {
2631 /* delete rule */
2632 if (rule->rule_flag & PFRULE_PFM) {
2633 pffwrules--;
2634 }
2635 pf_delete_rule_from_ruleset(ruleset,
2636 rs, rule);
2637 deleted = 1;
2638 }
2639 rule = next;
2640 }
2641 if (rule == NULL) {
2642 if (deleted) {
2643 pf_ruleset_cleanup(ruleset, rs);
2644 deleted = 0;
2645 }
2646 if (ruleset != &pf_main_ruleset) {
2647 pf_deleterule_anchor_step_out(&ruleset,
2648 rs, &rule);
2649 }
2650 }
2651 }
2652 }
2653 }
2654
2655 static void
2656 pf_deleterule_anchor_step_out(struct pf_ruleset **ruleset_ptr,
2657 int rs, struct pf_rule **rule_ptr)
2658 {
2659 struct pf_ruleset *ruleset = *ruleset_ptr;
2660 struct pf_rule *rule = *rule_ptr;
2661
2662 /* step out of anchor */
2663 struct pf_ruleset *rs_copy = ruleset;
2664 ruleset = ruleset->anchor->parent?
2665 &ruleset->anchor->parent->ruleset:&pf_main_ruleset;
2666
2667 rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2668 while (rule && (rule->anchor != rs_copy->anchor)) {
2669 rule = TAILQ_NEXT(rule, entries);
2670 }
2671 if (rule == NULL) {
2672 panic("%s: parent rule of anchor not found!", __func__);
2673 }
2674 if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2675 rule = TAILQ_NEXT(rule, entries);
2676 }
2677
2678 *ruleset_ptr = ruleset;
2679 *rule_ptr = rule;
2680 }
2681
2682 static void
2683 pf_addrwrap_setup(struct pf_addr_wrap *aw)
2684 {
2685 VERIFY(aw);
2686 bzero(&aw->p, sizeof aw->p);
2687 }
2688
2689 static int
2690 pf_rule_setup(struct pfioc_rule *pr, struct pf_rule *rule,
2691 struct pf_ruleset *ruleset)
2692 {
2693 struct pf_pooladdr *apa;
2694 int error = 0;
2695
2696 if (rule->ifname[0]) {
2697 rule->kif = pfi_kif_get(rule->ifname);
2698 if (rule->kif == NULL) {
2699 pool_put(&pf_rule_pl, rule);
2700 return EINVAL;
2701 }
2702 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
2703 }
2704 if (rule->tagname[0]) {
2705 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) {
2706 error = EBUSY;
2707 }
2708 }
2709 if (rule->match_tagname[0]) {
2710 if ((rule->match_tag =
2711 pf_tagname2tag(rule->match_tagname)) == 0) {
2712 error = EBUSY;
2713 }
2714 }
2715 if (rule->rt && !rule->direction) {
2716 error = EINVAL;
2717 }
2718 #if PFLOG
2719 if (!rule->log) {
2720 rule->logif = 0;
2721 }
2722 if (rule->logif >= PFLOGIFS_MAX) {
2723 error = EINVAL;
2724 }
2725 #endif /* PFLOG */
2726 pf_addrwrap_setup(&rule->src.addr);
2727 pf_addrwrap_setup(&rule->dst.addr);
2728 if (pf_rtlabel_add(&rule->src.addr) ||
2729 pf_rtlabel_add(&rule->dst.addr)) {
2730 error = EBUSY;
2731 }
2732 if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) {
2733 error = EINVAL;
2734 }
2735 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) {
2736 error = EINVAL;
2737 }
2738 if (pf_tbladdr_setup(ruleset, &rule->src.addr)) {
2739 error = EINVAL;
2740 }
2741 if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) {
2742 error = EINVAL;
2743 }
2744 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) {
2745 error = EINVAL;
2746 }
2747 TAILQ_FOREACH(apa, &pf_pabuf, entries)
2748 if (pf_tbladdr_setup(ruleset, &apa->addr)) {
2749 error = EINVAL;
2750 }
2751
2752 if (rule->overload_tblname[0]) {
2753 if ((rule->overload_tbl = pfr_attach_table(ruleset,
2754 rule->overload_tblname)) == NULL) {
2755 error = EINVAL;
2756 } else {
2757 rule->overload_tbl->pfrkt_flags |=
2758 PFR_TFLAG_ACTIVE;
2759 }
2760 }
2761
2762 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
2763
2764 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2765 (rule->action == PF_BINAT) || (rule->action == PF_NAT64)) &&
2766 rule->anchor == NULL) ||
2767 (rule->rt > PF_FASTROUTE)) &&
2768 (TAILQ_FIRST(&rule->rpool.list) == NULL)) {
2769 error = EINVAL;
2770 }
2771
2772 if (error) {
2773 pf_rm_rule(NULL, rule);
2774 return error;
2775 }
2776 /* For a NAT64 rule the rule's address family is AF_INET6 whereas
2777 * the address pool's family will be AF_INET
2778 */
2779 rule->rpool.af = (rule->action == PF_NAT64) ? AF_INET: rule->af;
2780 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2781 rule->evaluations = rule->packets[0] = rule->packets[1] =
2782 rule->bytes[0] = rule->bytes[1] = 0;
2783
2784 return 0;
2785 }
2786
2787 static int
2788 pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p)
2789 {
2790 int error = 0;
2791 u_int32_t req_dev = 0;
2792
2793 switch (cmd) {
2794 case DIOCADDRULE: {
2795 struct pf_ruleset *ruleset;
2796 struct pf_rule *rule, *tail;
2797 int rs_num;
2798
2799 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2800 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2801 ruleset = pf_find_ruleset(pr->anchor);
2802 if (ruleset == NULL) {
2803 error = EINVAL;
2804 break;
2805 }
2806 rs_num = pf_get_ruleset_number(pr->rule.action);
2807 if (rs_num >= PF_RULESET_MAX) {
2808 error = EINVAL;
2809 break;
2810 }
2811 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
2812 error = EINVAL;
2813 break;
2814 }
2815 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
2816 error = EBUSY;
2817 break;
2818 }
2819 if (pr->pool_ticket != ticket_pabuf) {
2820 error = EBUSY;
2821 break;
2822 }
2823 rule = pool_get(&pf_rule_pl, PR_WAITOK);
2824 if (rule == NULL) {
2825 error = ENOMEM;
2826 break;
2827 }
2828 pf_rule_copyin(&pr->rule, rule, p, minordev);
2829 #if !INET
2830 if (rule->af == AF_INET) {
2831 pool_put(&pf_rule_pl, rule);
2832 error = EAFNOSUPPORT;
2833 break;
2834 }
2835 #endif /* INET */
2836 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2837 pf_rulequeue);
2838 if (tail) {
2839 rule->nr = tail->nr + 1;
2840 } else {
2841 rule->nr = 0;
2842 }
2843
2844 if ((error = pf_rule_setup(pr, rule, ruleset))) {
2845 break;
2846 }
2847
2848 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2849 rule, entries);
2850 ruleset->rules[rs_num].inactive.rcount++;
2851 if (rule->rule_flag & PFRULE_PFM) {
2852 pffwrules++;
2853 }
2854
2855 if (rule->action == PF_NAT64) {
2856 atomic_add_16(&pf_nat64_configured, 1);
2857 }
2858
2859 if (pr->anchor_call[0] == '\0') {
2860 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
2861 if (rule->rule_flag & PFRULE_PFM) {
2862 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
2863 }
2864 }
2865
2866 #if DUMMYNET
2867 if (rule->action == PF_DUMMYNET) {
2868 struct dummynet_event dn_event;
2869 uint32_t direction = DN_INOUT;;
2870 bzero(&dn_event, sizeof(dn_event));
2871
2872 dn_event.dn_event_code = DUMMYNET_RULE_CONFIG;
2873
2874 if (rule->direction == PF_IN) {
2875 direction = DN_IN;
2876 } else if (rule->direction == PF_OUT) {
2877 direction = DN_OUT;
2878 }
2879
2880 dn_event.dn_event_rule_config.dir = direction;
2881 dn_event.dn_event_rule_config.af = rule->af;
2882 dn_event.dn_event_rule_config.proto = rule->proto;
2883 dn_event.dn_event_rule_config.src_port = rule->src.xport.range.port[0];
2884 dn_event.dn_event_rule_config.dst_port = rule->dst.xport.range.port[0];
2885 strlcpy(dn_event.dn_event_rule_config.ifname, rule->ifname,
2886 sizeof(dn_event.dn_event_rule_config.ifname));
2887
2888 dummynet_event_enqueue_nwk_wq_entry(&dn_event);
2889 }
2890 #endif
2891 break;
2892 }
2893
2894 case DIOCGETRULES: {
2895 struct pf_ruleset *ruleset;
2896 struct pf_rule *tail;
2897 int rs_num;
2898
2899 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2900 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2901 ruleset = pf_find_ruleset(pr->anchor);
2902 if (ruleset == NULL) {
2903 error = EINVAL;
2904 break;
2905 }
2906 rs_num = pf_get_ruleset_number(pr->rule.action);
2907 if (rs_num >= PF_RULESET_MAX) {
2908 error = EINVAL;
2909 break;
2910 }
2911 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2912 pf_rulequeue);
2913 if (tail) {
2914 pr->nr = tail->nr + 1;
2915 } else {
2916 pr->nr = 0;
2917 }
2918 pr->ticket = ruleset->rules[rs_num].active.ticket;
2919 break;
2920 }
2921
2922 case DIOCGETRULE: {
2923 struct pf_ruleset *ruleset;
2924 struct pf_rule *rule;
2925 int rs_num, i;
2926
2927 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2928 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2929 ruleset = pf_find_ruleset(pr->anchor);
2930 if (ruleset == NULL) {
2931 error = EINVAL;
2932 break;
2933 }
2934 rs_num = pf_get_ruleset_number(pr->rule.action);
2935 if (rs_num >= PF_RULESET_MAX) {
2936 error = EINVAL;
2937 break;
2938 }
2939 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
2940 error = EBUSY;
2941 break;
2942 }
2943 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2944 while ((rule != NULL) && (rule->nr != pr->nr)) {
2945 rule = TAILQ_NEXT(rule, entries);
2946 }
2947 if (rule == NULL) {
2948 error = EBUSY;
2949 break;
2950 }
2951 pf_rule_copyout(rule, &pr->rule);
2952 if (pf_anchor_copyout(ruleset, rule, pr)) {
2953 error = EBUSY;
2954 break;
2955 }
2956 pfi_dynaddr_copyout(&pr->rule.src.addr);
2957 pfi_dynaddr_copyout(&pr->rule.dst.addr);
2958 pf_tbladdr_copyout(&pr->rule.src.addr);
2959 pf_tbladdr_copyout(&pr->rule.dst.addr);
2960 pf_rtlabel_copyout(&pr->rule.src.addr);
2961 pf_rtlabel_copyout(&pr->rule.dst.addr);
2962 for (i = 0; i < PF_SKIP_COUNT; ++i) {
2963 if (rule->skip[i].ptr == NULL) {
2964 pr->rule.skip[i].nr = -1;
2965 } else {
2966 pr->rule.skip[i].nr =
2967 rule->skip[i].ptr->nr;
2968 }
2969 }
2970
2971 if (pr->action == PF_GET_CLR_CNTR) {
2972 rule->evaluations = 0;
2973 rule->packets[0] = rule->packets[1] = 0;
2974 rule->bytes[0] = rule->bytes[1] = 0;
2975 }
2976 break;
2977 }
2978
2979 case DIOCCHANGERULE: {
2980 struct pfioc_rule *pcr = pr;
2981 struct pf_ruleset *ruleset;
2982 struct pf_rule *oldrule = NULL, *newrule = NULL;
2983 struct pf_pooladdr *pa;
2984 u_int32_t nr = 0;
2985 int rs_num;
2986
2987 if (!(pcr->action == PF_CHANGE_REMOVE ||
2988 pcr->action == PF_CHANGE_GET_TICKET) &&
2989 pcr->pool_ticket != ticket_pabuf) {
2990 error = EBUSY;
2991 break;
2992 }
2993
2994 if (pcr->action < PF_CHANGE_ADD_HEAD ||
2995 pcr->action > PF_CHANGE_GET_TICKET) {
2996 error = EINVAL;
2997 break;
2998 }
2999 pcr->anchor[sizeof(pcr->anchor) - 1] = '\0';
3000 pcr->anchor_call[sizeof(pcr->anchor_call) - 1] = '\0';
3001 ruleset = pf_find_ruleset(pcr->anchor);
3002 if (ruleset == NULL) {
3003 error = EINVAL;
3004 break;
3005 }
3006 rs_num = pf_get_ruleset_number(pcr->rule.action);
3007 if (rs_num >= PF_RULESET_MAX) {
3008 error = EINVAL;
3009 break;
3010 }
3011
3012 if (pcr->action == PF_CHANGE_GET_TICKET) {
3013 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3014 break;
3015 } else {
3016 if (pcr->ticket !=
3017 ruleset->rules[rs_num].active.ticket) {
3018 error = EINVAL;
3019 break;
3020 }
3021 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3022 error = EINVAL;
3023 break;
3024 }
3025 }
3026
3027 if (pcr->action != PF_CHANGE_REMOVE) {
3028 newrule = pool_get(&pf_rule_pl, PR_WAITOK);
3029 if (newrule == NULL) {
3030 error = ENOMEM;
3031 break;
3032 }
3033 pf_rule_copyin(&pcr->rule, newrule, p, minordev);
3034 #if !INET
3035 if (newrule->af == AF_INET) {
3036 pool_put(&pf_rule_pl, newrule);
3037 error = EAFNOSUPPORT;
3038 break;
3039 }
3040 #endif /* INET */
3041 if (newrule->ifname[0]) {
3042 newrule->kif = pfi_kif_get(newrule->ifname);
3043 if (newrule->kif == NULL) {
3044 pool_put(&pf_rule_pl, newrule);
3045 error = EINVAL;
3046 break;
3047 }
3048 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
3049 } else {
3050 newrule->kif = NULL;
3051 }
3052
3053 if (newrule->tagname[0]) {
3054 if ((newrule->tag =
3055 pf_tagname2tag(newrule->tagname)) == 0) {
3056 error = EBUSY;
3057 }
3058 }
3059 if (newrule->match_tagname[0]) {
3060 if ((newrule->match_tag = pf_tagname2tag(
3061 newrule->match_tagname)) == 0) {
3062 error = EBUSY;
3063 }
3064 }
3065 if (newrule->rt && !newrule->direction) {
3066 error = EINVAL;
3067 }
3068 #if PFLOG
3069 if (!newrule->log) {
3070 newrule->logif = 0;
3071 }
3072 if (newrule->logif >= PFLOGIFS_MAX) {
3073 error = EINVAL;
3074 }
3075 #endif /* PFLOG */
3076 pf_addrwrap_setup(&newrule->src.addr);
3077 pf_addrwrap_setup(&newrule->dst.addr);
3078 if (pf_rtlabel_add(&newrule->src.addr) ||
3079 pf_rtlabel_add(&newrule->dst.addr)) {
3080 error = EBUSY;
3081 }
3082 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) {
3083 error = EINVAL;
3084 }
3085 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) {
3086 error = EINVAL;
3087 }
3088 if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) {
3089 error = EINVAL;
3090 }
3091 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) {
3092 error = EINVAL;
3093 }
3094 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) {
3095 error = EINVAL;
3096 }
3097 TAILQ_FOREACH(pa, &pf_pabuf, entries)
3098 if (pf_tbladdr_setup(ruleset, &pa->addr)) {
3099 error = EINVAL;
3100 }
3101
3102 if (newrule->overload_tblname[0]) {
3103 if ((newrule->overload_tbl = pfr_attach_table(
3104 ruleset, newrule->overload_tblname)) ==
3105 NULL) {
3106 error = EINVAL;
3107 } else {
3108 newrule->overload_tbl->pfrkt_flags |=
3109 PFR_TFLAG_ACTIVE;
3110 }
3111 }
3112
3113 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
3114 if (((((newrule->action == PF_NAT) ||
3115 (newrule->action == PF_RDR) ||
3116 (newrule->action == PF_BINAT) ||
3117 (newrule->rt > PF_FASTROUTE)) &&
3118 !newrule->anchor)) &&
3119 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) {
3120 error = EINVAL;
3121 }
3122
3123 if (error) {
3124 pf_rm_rule(NULL, newrule);
3125 break;
3126 }
3127 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3128 newrule->evaluations = 0;
3129 newrule->packets[0] = newrule->packets[1] = 0;
3130 newrule->bytes[0] = newrule->bytes[1] = 0;
3131 }
3132 pf_empty_pool(&pf_pabuf);
3133
3134 if (pcr->action == PF_CHANGE_ADD_HEAD) {
3135 oldrule = TAILQ_FIRST(
3136 ruleset->rules[rs_num].active.ptr);
3137 } else if (pcr->action == PF_CHANGE_ADD_TAIL) {
3138 oldrule = TAILQ_LAST(
3139 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
3140 } else {
3141 oldrule = TAILQ_FIRST(
3142 ruleset->rules[rs_num].active.ptr);
3143 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) {
3144 oldrule = TAILQ_NEXT(oldrule, entries);
3145 }
3146 if (oldrule == NULL) {
3147 if (newrule != NULL) {
3148 pf_rm_rule(NULL, newrule);
3149 }
3150 error = EINVAL;
3151 break;
3152 }
3153 }
3154
3155 if (pcr->action == PF_CHANGE_REMOVE) {
3156 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
3157 ruleset->rules[rs_num].active.rcount--;
3158 } else {
3159 if (oldrule == NULL) {
3160 TAILQ_INSERT_TAIL(
3161 ruleset->rules[rs_num].active.ptr,
3162 newrule, entries);
3163 } else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3164 pcr->action == PF_CHANGE_ADD_BEFORE) {
3165 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3166 } else {
3167 TAILQ_INSERT_AFTER(
3168 ruleset->rules[rs_num].active.ptr,
3169 oldrule, newrule, entries);
3170 }
3171 ruleset->rules[rs_num].active.rcount++;
3172 }
3173
3174 nr = 0;
3175 TAILQ_FOREACH(oldrule,
3176 ruleset->rules[rs_num].active.ptr, entries)
3177 oldrule->nr = nr++;
3178
3179 ruleset->rules[rs_num].active.ticket++;
3180
3181 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3182 pf_remove_if_empty_ruleset(ruleset);
3183
3184 break;
3185 }
3186
3187 case DIOCINSERTRULE: {
3188 struct pf_ruleset *ruleset;
3189 struct pf_rule *rule, *tail, *r;
3190 int rs_num;
3191 int is_anchor;
3192
3193 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3194 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
3195 is_anchor = (pr->anchor_call[0] != '\0');
3196
3197 if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
3198 pr->rule.owner, is_anchor, &error)) == NULL) {
3199 break;
3200 }
3201
3202 rs_num = pf_get_ruleset_number(pr->rule.action);
3203 if (rs_num >= PF_RULESET_MAX) {
3204 error = EINVAL;
3205 break;
3206 }
3207 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3208 error = EINVAL;
3209 break;
3210 }
3211
3212 /* make sure this anchor rule doesn't exist already */
3213 if (is_anchor) {
3214 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3215 while (r) {
3216 if (r->anchor &&
3217 ((strcmp(r->anchor->name,
3218 pr->anchor_call)) == 0)) {
3219 if (((strcmp(pr->rule.owner,
3220 r->owner)) == 0) ||
3221 ((strcmp(r->owner, "")) == 0)) {
3222 error = EEXIST;
3223 } else {
3224 error = EPERM;
3225 }
3226 break;
3227 }
3228 r = TAILQ_NEXT(r, entries);
3229 }
3230 if (error != 0) {
3231 return error;
3232 }
3233 }
3234
3235 rule = pool_get(&pf_rule_pl, PR_WAITOK);
3236 if (rule == NULL) {
3237 error = ENOMEM;
3238 break;
3239 }
3240 pf_rule_copyin(&pr->rule, rule, p, minordev);
3241 #if !INET
3242 if (rule->af == AF_INET) {
3243 pool_put(&pf_rule_pl, rule);
3244 error = EAFNOSUPPORT;
3245 break;
3246 }
3247 #endif /* INET */
3248 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3249 while ((r != NULL) && (rule->priority >= (unsigned)r->priority)) {
3250 r = TAILQ_NEXT(r, entries);
3251 }
3252 if (r == NULL) {
3253 if ((tail =
3254 TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3255 pf_rulequeue)) != NULL) {
3256 rule->nr = tail->nr + 1;
3257 } else {
3258 rule->nr = 0;
3259 }
3260 } else {
3261 rule->nr = r->nr;
3262 }
3263
3264 if ((error = pf_rule_setup(pr, rule, ruleset))) {
3265 break;
3266 }
3267
3268 if (rule->anchor != NULL) {
3269 strlcpy(rule->anchor->owner, rule->owner,
3270 PF_OWNER_NAME_SIZE);
3271 }
3272
3273 if (r) {
3274 TAILQ_INSERT_BEFORE(r, rule, entries);
3275 while (r && ++r->nr) {
3276 r = TAILQ_NEXT(r, entries);
3277 }
3278 } else {
3279 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].active.ptr,
3280 rule, entries);
3281 }
3282 ruleset->rules[rs_num].active.rcount++;
3283
3284 /* Calculate checksum for the main ruleset */
3285 if (ruleset == &pf_main_ruleset) {
3286 error = pf_setup_pfsync_matching(ruleset);
3287 }
3288
3289 pf_ruleset_cleanup(ruleset, rs_num);
3290 rule->ticket = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)rule);
3291
3292 pr->rule.ticket = rule->ticket;
3293 pf_rule_copyout(rule, &pr->rule);
3294 if (rule->rule_flag & PFRULE_PFM) {
3295 pffwrules++;
3296 }
3297 if (rule->action == PF_NAT64) {
3298 atomic_add_16(&pf_nat64_configured, 1);
3299 }
3300
3301 if (pr->anchor_call[0] == '\0') {
3302 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
3303 if (rule->rule_flag & PFRULE_PFM) {
3304 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
3305 }
3306 }
3307 break;
3308 }
3309
3310 case DIOCDELETERULE: {
3311 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3312 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
3313
3314 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3315 error = EINVAL;
3316 break;
3317 }
3318
3319 /* get device through which request is made */
3320 if ((uint8_t)minordev == PFDEV_PFM) {
3321 req_dev |= PFRULE_PFM;
3322 }
3323
3324 if (pr->rule.ticket) {
3325 if ((error = pf_delete_rule_by_ticket(pr, req_dev))) {
3326 break;
3327 }
3328 } else {
3329 pf_delete_rule_by_owner(pr->rule.owner, req_dev);
3330 }
3331 pr->nr = pffwrules;
3332 if (pr->rule.action == PF_NAT64) {
3333 atomic_add_16(&pf_nat64_configured, -1);
3334 }
3335 break;
3336 }
3337
3338 default:
3339 VERIFY(0);
3340 /* NOTREACHED */
3341 }
3342
3343 return error;
3344 }
3345
3346 static int
3347 pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p)
3348 {
3349 #pragma unused(p)
3350 int error = 0;
3351
3352 psk->psk_ifname[sizeof(psk->psk_ifname) - 1] = '\0';
3353 psk->psk_ownername[sizeof(psk->psk_ownername) - 1] = '\0';
3354
3355 bool ifname_matched = true;
3356 bool owner_matched = true;
3357
3358 switch (cmd) {
3359 case DIOCCLRSTATES: {
3360 struct pf_state *s, *nexts;
3361 int killed = 0;
3362
3363 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
3364 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3365 /*
3366 * Purge all states only when neither ifname
3367 * or owner is provided. If any of these are provided
3368 * we purge only the states with meta data that match
3369 */
3370 bool unlink_state = false;
3371 ifname_matched = true;
3372 owner_matched = true;
3373
3374 if (psk->psk_ifname[0] &&
3375 strcmp(psk->psk_ifname, s->kif->pfik_name)) {
3376 ifname_matched = false;
3377 }
3378
3379 if (psk->psk_ownername[0] &&
3380 ((NULL == s->rule.ptr) ||
3381 strcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3382 owner_matched = false;
3383 }
3384
3385 unlink_state = ifname_matched && owner_matched;
3386
3387 if (unlink_state) {
3388 #if NPFSYNC
3389 /* don't send out individual delete messages */
3390 s->sync_flags = PFSTATE_NOSYNC;
3391 #endif
3392 pf_unlink_state(s);
3393 killed++;
3394 }
3395 }
3396 psk->psk_af = (sa_family_t)killed;
3397 #if NPFSYNC
3398 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
3399 #endif
3400 break;
3401 }
3402
3403 case DIOCKILLSTATES: {
3404 struct pf_state *s, *nexts;
3405 struct pf_state_key *sk;
3406 struct pf_state_host *src, *dst;
3407 int killed = 0;
3408
3409 for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
3410 s = nexts) {
3411 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3412 sk = s->state_key;
3413 ifname_matched = true;
3414 owner_matched = true;
3415
3416 if (psk->psk_ifname[0] &&
3417 strcmp(psk->psk_ifname, s->kif->pfik_name)) {
3418 ifname_matched = false;
3419 }
3420
3421 if (psk->psk_ownername[0] &&
3422 ((NULL == s->rule.ptr) ||
3423 strcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3424 owner_matched = false;
3425 }
3426
3427 if (sk->direction == PF_OUT) {
3428 src = &sk->lan;
3429 dst = &sk->ext_lan;
3430 } else {
3431 src = &sk->ext_lan;
3432 dst = &sk->lan;
3433 }
3434 if ((!psk->psk_af || sk->af_lan == psk->psk_af) &&
3435 (!psk->psk_proto || psk->psk_proto == sk->proto) &&
3436 PF_MATCHA(psk->psk_src.neg,
3437 &psk->psk_src.addr.v.a.addr,
3438 &psk->psk_src.addr.v.a.mask,
3439 &src->addr, sk->af_lan) &&
3440 PF_MATCHA(psk->psk_dst.neg,
3441 &psk->psk_dst.addr.v.a.addr,
3442 &psk->psk_dst.addr.v.a.mask,
3443 &dst->addr, sk->af_lan) &&
3444 (pf_match_xport(psk->psk_proto,
3445 psk->psk_proto_variant, &psk->psk_src.xport,
3446 &src->xport)) &&
3447 (pf_match_xport(psk->psk_proto,
3448 psk->psk_proto_variant, &psk->psk_dst.xport,
3449 &dst->xport)) &&
3450 ifname_matched &&
3451 owner_matched) {
3452 #if NPFSYNC
3453 /* send immediate delete of state */
3454 pfsync_delete_state(s);
3455 s->sync_flags |= PFSTATE_NOSYNC;
3456 #endif
3457 pf_unlink_state(s);
3458 killed++;
3459 }
3460 }
3461 psk->psk_af = (sa_family_t)killed;
3462 break;
3463 }
3464
3465 default:
3466 VERIFY(0);
3467 /* NOTREACHED */
3468 }
3469
3470 return error;
3471 }
3472
3473 static int
3474 pfioctl_ioc_state(u_long cmd, struct pfioc_state *ps, struct proc *p)
3475 {
3476 #pragma unused(p)
3477 int error = 0;
3478
3479 switch (cmd) {
3480 case DIOCADDSTATE: {
3481 struct pfsync_state *sp = &ps->state;
3482 struct pf_state *s;
3483 struct pf_state_key *sk;
3484 struct pfi_kif *kif;
3485
3486 if (sp->timeout >= PFTM_MAX) {
3487 error = EINVAL;
3488 break;
3489 }
3490 s = pool_get(&pf_state_pl, PR_WAITOK);
3491 if (s == NULL) {
3492 error = ENOMEM;
3493 break;
3494 }
3495 bzero(s, sizeof(struct pf_state));
3496 if ((sk = pf_alloc_state_key(s, NULL)) == NULL) {
3497 pool_put(&pf_state_pl, s);
3498 error = ENOMEM;
3499 break;
3500 }
3501 pf_state_import(sp, sk, s);
3502 kif = pfi_kif_get(sp->ifname);
3503 if (kif == NULL) {
3504 pool_put(&pf_state_pl, s);
3505 pool_put(&pf_state_key_pl, sk);
3506 error = ENOENT;
3507 break;
3508 }
3509 TAILQ_INIT(&s->unlink_hooks);
3510 s->state_key->app_state = 0;
3511 if (pf_insert_state(kif, s)) {
3512 pfi_kif_unref(kif, PFI_KIF_REF_NONE);
3513 pool_put(&pf_state_pl, s);
3514 error = EEXIST;
3515 break;
3516 }
3517 pf_default_rule.states++;
3518 VERIFY(pf_default_rule.states != 0);
3519 break;
3520 }
3521
3522 case DIOCGETSTATE: {
3523 struct pf_state *s;
3524 struct pf_state_cmp id_key;
3525
3526 bcopy(ps->state.id, &id_key.id, sizeof(id_key.id));
3527 id_key.creatorid = ps->state.creatorid;
3528
3529 s = pf_find_state_byid(&id_key);
3530 if (s == NULL) {
3531 error = ENOENT;
3532 break;
3533 }
3534
3535 pf_state_export(&ps->state, s->state_key, s);
3536 break;
3537 }
3538
3539 default:
3540 VERIFY(0);
3541 /* NOTREACHED */
3542 }
3543
3544 return error;
3545 }
3546
3547 static int
3548 pfioctl_ioc_states(u_long cmd, struct pfioc_states_32 *ps32,
3549 struct pfioc_states_64 *ps64, struct proc *p)
3550 {
3551 int p64 = proc_is64bit(p);
3552 int error = 0;
3553
3554 switch (cmd) {
3555 case DIOCGETSTATES: { /* struct pfioc_states */
3556 struct pf_state *state;
3557 struct pfsync_state *pstore;
3558 user_addr_t buf;
3559 u_int32_t nr = 0;
3560 int len, size;
3561
3562 len = (p64 ? ps64->ps_len : ps32->ps_len);
3563 if (len == 0) {
3564 size = sizeof(struct pfsync_state) * pf_status.states;
3565 if (p64) {
3566 ps64->ps_len = size;
3567 } else {
3568 ps32->ps_len = size;
3569 }
3570 break;
3571 }
3572
3573 pstore = _MALLOC(sizeof(*pstore), M_TEMP, M_WAITOK | M_ZERO);
3574 if (pstore == NULL) {
3575 error = ENOMEM;
3576 break;
3577 }
3578 #ifdef __LP64__
3579 buf = (p64 ? ps64->ps_buf : ps32->ps_buf);
3580 #else
3581 buf = ps32->ps_buf;
3582 #endif
3583
3584 state = TAILQ_FIRST(&state_list);
3585 while (state) {
3586 if (state->timeout != PFTM_UNLINKED) {
3587 if ((nr + 1) * sizeof(*pstore) > (unsigned)len) {
3588 break;
3589 }
3590
3591 pf_state_export(pstore,
3592 state->state_key, state);
3593 error = copyout(pstore, buf, sizeof(*pstore));
3594 if (error) {
3595 _FREE(pstore, M_TEMP);
3596 goto fail;
3597 }
3598 buf += sizeof(*pstore);
3599 nr++;
3600 }
3601 state = TAILQ_NEXT(state, entry_list);
3602 }
3603
3604 size = sizeof(struct pfsync_state) * nr;
3605 if (p64) {
3606 ps64->ps_len = size;
3607 } else {
3608 ps32->ps_len = size;
3609 }
3610
3611 _FREE(pstore, M_TEMP);
3612 break;
3613 }
3614
3615 default:
3616 VERIFY(0);
3617 /* NOTREACHED */
3618 }
3619 fail:
3620 return error;
3621 }
3622
3623 static int
3624 pfioctl_ioc_natlook(u_long cmd, struct pfioc_natlook *pnl, struct proc *p)
3625 {
3626 #pragma unused(p)
3627 int error = 0;
3628
3629 switch (cmd) {
3630 case DIOCNATLOOK: {
3631 struct pf_state_key *sk;
3632 struct pf_state *state;
3633 struct pf_state_key_cmp key;
3634 int m = 0, direction = pnl->direction;
3635
3636 key.proto = pnl->proto;
3637 key.proto_variant = pnl->proto_variant;
3638
3639 if (!pnl->proto ||
3640 PF_AZERO(&pnl->saddr, pnl->af) ||
3641 PF_AZERO(&pnl->daddr, pnl->af) ||
3642 ((pnl->proto == IPPROTO_TCP ||
3643 pnl->proto == IPPROTO_UDP) &&
3644 (!pnl->dxport.port || !pnl->sxport.port))) {
3645 error = EINVAL;
3646 } else {
3647 /*
3648 * userland gives us source and dest of connection,
3649 * reverse the lookup so we ask for what happens with
3650 * the return traffic, enabling us to find it in the
3651 * state tree.
3652 */
3653 if (direction == PF_IN) {
3654 key.af_gwy = pnl->af;
3655 PF_ACPY(&key.ext_gwy.addr, &pnl->daddr,
3656 pnl->af);
3657 memcpy(&key.ext_gwy.xport, &pnl->dxport,
3658 sizeof(key.ext_gwy.xport));
3659 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
3660 memcpy(&key.gwy.xport, &pnl->sxport,
3661 sizeof(key.gwy.xport));
3662 state = pf_find_state_all(&key, PF_IN, &m);
3663 } else {
3664 key.af_lan = pnl->af;
3665 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
3666 memcpy(&key.lan.xport, &pnl->dxport,
3667 sizeof(key.lan.xport));
3668 PF_ACPY(&key.ext_lan.addr, &pnl->saddr,
3669 pnl->af);
3670 memcpy(&key.ext_lan.xport, &pnl->sxport,
3671 sizeof(key.ext_lan.xport));
3672 state = pf_find_state_all(&key, PF_OUT, &m);
3673 }
3674 if (m > 1) {
3675 error = E2BIG; /* more than one state */
3676 } else if (state != NULL) {
3677 sk = state->state_key;
3678 if (direction == PF_IN) {
3679 PF_ACPY(&pnl->rsaddr, &sk->lan.addr,
3680 sk->af_lan);
3681 memcpy(&pnl->rsxport, &sk->lan.xport,
3682 sizeof(pnl->rsxport));
3683 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
3684 pnl->af);
3685 memcpy(&pnl->rdxport, &pnl->dxport,
3686 sizeof(pnl->rdxport));
3687 } else {
3688 PF_ACPY(&pnl->rdaddr, &sk->gwy.addr,
3689 sk->af_gwy);
3690 memcpy(&pnl->rdxport, &sk->gwy.xport,
3691 sizeof(pnl->rdxport));
3692 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
3693 pnl->af);
3694 memcpy(&pnl->rsxport, &pnl->sxport,
3695 sizeof(pnl->rsxport));
3696 }
3697 } else {
3698 error = ENOENT;
3699 }
3700 }
3701 break;
3702 }
3703
3704 default:
3705 VERIFY(0);
3706 /* NOTREACHED */
3707 }
3708
3709 return error;
3710 }
3711
3712 static int
3713 pfioctl_ioc_tm(u_long cmd, struct pfioc_tm *pt, struct proc *p)
3714 {
3715 #pragma unused(p)
3716 int error = 0;
3717
3718 switch (cmd) {
3719 case DIOCSETTIMEOUT: {
3720 int old;
3721
3722 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
3723 pt->seconds < 0) {
3724 error = EINVAL;
3725 goto fail;
3726 }
3727 old = pf_default_rule.timeout[pt->timeout];
3728 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) {
3729 pt->seconds = 1;
3730 }
3731 pf_default_rule.timeout[pt->timeout] = pt->seconds;
3732 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) {
3733 wakeup(pf_purge_thread_fn);
3734 }
3735 pt->seconds = old;
3736 break;
3737 }
3738
3739 case DIOCGETTIMEOUT: {
3740 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
3741 error = EINVAL;
3742 goto fail;
3743 }
3744 pt->seconds = pf_default_rule.timeout[pt->timeout];
3745 break;
3746 }
3747
3748 default:
3749 VERIFY(0);
3750 /* NOTREACHED */
3751 }
3752 fail:
3753 return error;
3754 }
3755
3756 static int
3757 pfioctl_ioc_limit(u_long cmd, struct pfioc_limit *pl, struct proc *p)
3758 {
3759 #pragma unused(p)
3760 int error = 0;
3761
3762 switch (cmd) {
3763 case DIOCGETLIMIT: {
3764 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
3765 error = EINVAL;
3766 goto fail;
3767 }
3768 pl->limit = pf_pool_limits[pl->index].limit;
3769 break;
3770 }
3771
3772 case DIOCSETLIMIT: {
3773 int old_limit;
3774
3775 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
3776 pf_pool_limits[pl->index].pp == NULL) {
3777 error = EINVAL;
3778 goto fail;
3779 }
3780 pool_sethardlimit(pf_pool_limits[pl->index].pp,
3781 pl->limit, NULL, 0);
3782 old_limit = pf_pool_limits[pl->index].limit;
3783 pf_pool_limits[pl->index].limit = pl->limit;
3784 pl->limit = old_limit;
3785 break;
3786 }
3787
3788 default:
3789 VERIFY(0);
3790 /* NOTREACHED */
3791 }
3792 fail:
3793 return error;
3794 }
3795
3796 static int
3797 pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p)
3798 {
3799 #pragma unused(p)
3800 struct pf_pooladdr *pa = NULL;
3801 struct pf_pool *pool = NULL;
3802 int error = 0;
3803
3804 switch (cmd) {
3805 case DIOCBEGINADDRS: {
3806 pf_empty_pool(&pf_pabuf);
3807 pp->ticket = ++ticket_pabuf;
3808 break;
3809 }
3810
3811 case DIOCADDADDR: {
3812 pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3813 if (pp->ticket != ticket_pabuf) {
3814 error = EBUSY;
3815 break;
3816 }
3817 #if !INET
3818 if (pp->af == AF_INET) {
3819 error = EAFNOSUPPORT;
3820 break;
3821 }
3822 #endif /* INET */
3823 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
3824 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
3825 pp->addr.addr.type != PF_ADDR_TABLE) {
3826 error = EINVAL;
3827 break;
3828 }
3829 pa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3830 if (pa == NULL) {
3831 error = ENOMEM;
3832 break;
3833 }
3834 pf_pooladdr_copyin(&pp->addr, pa);
3835 if (pa->ifname[0]) {
3836 pa->kif = pfi_kif_get(pa->ifname);
3837 if (pa->kif == NULL) {
3838 pool_put(&pf_pooladdr_pl, pa);
3839 error = EINVAL;
3840 break;
3841 }
3842 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
3843 }
3844 pf_addrwrap_setup(&pa->addr);
3845 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
3846 pfi_dynaddr_remove(&pa->addr);
3847 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
3848 pool_put(&pf_pooladdr_pl, pa);
3849 error = EINVAL;
3850 break;
3851 }
3852 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
3853 break;
3854 }
3855
3856 case DIOCGETADDRS: {
3857 pp->nr = 0;
3858 pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3859 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
3860 pp->r_num, 0, 1, 0);
3861 if (pool == NULL) {
3862 error = EBUSY;
3863 break;
3864 }
3865 TAILQ_FOREACH(pa, &pool->list, entries)
3866 pp->nr++;
3867 break;
3868 }
3869
3870 case DIOCGETADDR: {
3871 u_int32_t nr = 0;
3872
3873 pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3874 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
3875 pp->r_num, 0, 1, 1);
3876 if (pool == NULL) {
3877 error = EBUSY;
3878 break;
3879 }
3880 pa = TAILQ_FIRST(&pool->list);
3881 while ((pa != NULL) && (nr < pp->nr)) {
3882 pa = TAILQ_NEXT(pa, entries);
3883 nr++;
3884 }
3885 if (pa == NULL) {
3886 error = EBUSY;
3887 break;
3888 }
3889 pf_pooladdr_copyout(pa, &pp->addr);
3890 pfi_dynaddr_copyout(&pp->addr.addr);
3891 pf_tbladdr_copyout(&pp->addr.addr);
3892 pf_rtlabel_copyout(&pp->addr.addr);
3893 break;
3894 }
3895
3896 case DIOCCHANGEADDR: {
3897 struct pfioc_pooladdr *pca = pp;
3898 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
3899 struct pf_ruleset *ruleset;
3900
3901 if (pca->action < PF_CHANGE_ADD_HEAD ||
3902 pca->action > PF_CHANGE_REMOVE) {
3903 error = EINVAL;
3904 break;
3905 }
3906 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
3907 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
3908 pca->addr.addr.type != PF_ADDR_TABLE) {
3909 error = EINVAL;
3910 break;
3911 }
3912
3913 pca->anchor[sizeof(pca->anchor) - 1] = '\0';
3914 ruleset = pf_find_ruleset(pca->anchor);
3915 if (ruleset == NULL) {
3916 error = EBUSY;
3917 break;
3918 }
3919 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
3920 pca->r_num, pca->r_last, 1, 1);
3921 if (pool == NULL) {
3922 error = EBUSY;
3923 break;
3924 }
3925 if (pca->action != PF_CHANGE_REMOVE) {
3926 newpa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3927 if (newpa == NULL) {
3928 error = ENOMEM;
3929 break;
3930 }
3931 pf_pooladdr_copyin(&pca->addr, newpa);
3932 #if !INET
3933 if (pca->af == AF_INET) {
3934 pool_put(&pf_pooladdr_pl, newpa);
3935 error = EAFNOSUPPORT;
3936 break;
3937 }
3938 #endif /* INET */
3939 if (newpa->ifname[0]) {
3940 newpa->kif = pfi_kif_get(newpa->ifname);
3941 if (newpa->kif == NULL) {
3942 pool_put(&pf_pooladdr_pl, newpa);
3943 error = EINVAL;
3944 break;
3945 }
3946 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
3947 } else {
3948 newpa->kif = NULL;
3949 }
3950 pf_addrwrap_setup(&newpa->addr);
3951 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
3952 pf_tbladdr_setup(ruleset, &newpa->addr)) {
3953 pfi_dynaddr_remove(&newpa->addr);
3954 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
3955 pool_put(&pf_pooladdr_pl, newpa);
3956 error = EINVAL;
3957 break;
3958 }
3959 }
3960
3961 if (pca->action == PF_CHANGE_ADD_HEAD) {
3962 oldpa = TAILQ_FIRST(&pool->list);
3963 } else if (pca->action == PF_CHANGE_ADD_TAIL) {
3964 oldpa = TAILQ_LAST(&pool->list, pf_palist);
3965 } else {
3966 int i = 0;
3967
3968 oldpa = TAILQ_FIRST(&pool->list);
3969 while ((oldpa != NULL) && (i < (int)pca->nr)) {
3970 oldpa = TAILQ_NEXT(oldpa, entries);
3971 i++;
3972 }
3973 if (oldpa == NULL) {
3974 error = EINVAL;
3975 break;
3976 }
3977 }
3978
3979 if (pca->action == PF_CHANGE_REMOVE) {
3980 TAILQ_REMOVE(&pool->list, oldpa, entries);
3981 pfi_dynaddr_remove(&oldpa->addr);
3982 pf_tbladdr_remove(&oldpa->addr);
3983 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
3984 pool_put(&pf_pooladdr_pl, oldpa);
3985 } else {
3986 if (oldpa == NULL) {
3987 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
3988 } else if (pca->action == PF_CHANGE_ADD_HEAD ||
3989 pca->action == PF_CHANGE_ADD_BEFORE) {
3990 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
3991 } else {
3992 TAILQ_INSERT_AFTER(&pool->list, oldpa,
3993 newpa, entries);
3994 }
3995 }
3996
3997 pool->cur = TAILQ_FIRST(&pool->list);
3998 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
3999 pca->af);
4000 break;
4001 }
4002
4003 default:
4004 VERIFY(0);
4005 /* NOTREACHED */
4006 }
4007
4008 return error;
4009 }
4010
4011 static int
4012 pfioctl_ioc_ruleset(u_long cmd, struct pfioc_ruleset *pr, struct proc *p)
4013 {
4014 #pragma unused(p)
4015 int error = 0;
4016
4017 switch (cmd) {
4018 case DIOCGETRULESETS: {
4019 struct pf_ruleset *ruleset;
4020 struct pf_anchor *anchor;
4021
4022 pr->path[sizeof(pr->path) - 1] = '\0';
4023 pr->name[sizeof(pr->name) - 1] = '\0';
4024 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
4025 error = EINVAL;
4026 break;
4027 }
4028 pr->nr = 0;
4029 if (ruleset->anchor == NULL) {
4030 /* XXX kludge for pf_main_ruleset */
4031 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4032 if (anchor->parent == NULL) {
4033 pr->nr++;
4034 }
4035 } else {
4036 RB_FOREACH(anchor, pf_anchor_node,
4037 &ruleset->anchor->children)
4038 pr->nr++;
4039 }
4040 break;
4041 }
4042
4043 case DIOCGETRULESET: {
4044 struct pf_ruleset *ruleset;
4045 struct pf_anchor *anchor;
4046 u_int32_t nr = 0;
4047
4048 pr->path[sizeof(pr->path) - 1] = '\0';
4049 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
4050 error = EINVAL;
4051 break;
4052 }
4053 pr->name[0] = 0;
4054 if (ruleset->anchor == NULL) {
4055 /* XXX kludge for pf_main_ruleset */
4056 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4057 if (anchor->parent == NULL && nr++ == pr->nr) {
4058 strlcpy(pr->name, anchor->name,
4059 sizeof(pr->name));
4060 break;
4061 }
4062 } else {
4063 RB_FOREACH(anchor, pf_anchor_node,
4064 &ruleset->anchor->children)
4065 if (nr++ == pr->nr) {
4066 strlcpy(pr->name, anchor->name,
4067 sizeof(pr->name));
4068 break;
4069 }
4070 }
4071 if (!pr->name[0]) {
4072 error = EBUSY;
4073 }
4074 break;
4075 }
4076
4077 default:
4078 VERIFY(0);
4079 /* NOTREACHED */
4080 }
4081
4082 return error;
4083 }
4084
4085 static int
4086 pfioctl_ioc_trans(u_long cmd, struct pfioc_trans_32 *io32,
4087 struct pfioc_trans_64 *io64, struct proc *p)
4088 {
4089 int error = 0, esize, size;
4090 user_addr_t buf;
4091
4092 #ifdef __LP64__
4093 int p64 = proc_is64bit(p);
4094
4095 esize = (p64 ? io64->esize : io32->esize);
4096 size = (p64 ? io64->size : io32->size);
4097 buf = (p64 ? io64->array : io32->array);
4098 #else
4099 #pragma unused(io64, p)
4100 esize = io32->esize;
4101 size = io32->size;
4102 buf = io32->array;
4103 #endif
4104
4105 switch (cmd) {
4106 case DIOCXBEGIN: {
4107 struct pfioc_trans_e *ioe;
4108 struct pfr_table *table;
4109 int i;
4110
4111 if (esize != sizeof(*ioe)) {
4112 error = ENODEV;
4113 goto fail;
4114 }
4115 ioe = _MALLOC(sizeof(*ioe), M_TEMP, M_WAITOK);
4116 table = _MALLOC(sizeof(*table), M_TEMP, M_WAITOK);
4117 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4118 if (copyin(buf, ioe, sizeof(*ioe))) {
4119 _FREE(table, M_TEMP);
4120 _FREE(ioe, M_TEMP);
4121 error = EFAULT;
4122 goto fail;
4123 }
4124 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4125 switch (ioe->rs_num) {
4126 case PF_RULESET_ALTQ:
4127 break;
4128 case PF_RULESET_TABLE:
4129 bzero(table, sizeof(*table));
4130 strlcpy(table->pfrt_anchor, ioe->anchor,
4131 sizeof(table->pfrt_anchor));
4132 if ((error = pfr_ina_begin(table,
4133 &ioe->ticket, NULL, 0))) {
4134 _FREE(table, M_TEMP);
4135 _FREE(ioe, M_TEMP);
4136 goto fail;
4137 }
4138 break;
4139 default:
4140 if ((error = pf_begin_rules(&ioe->ticket,
4141 ioe->rs_num, ioe->anchor))) {
4142 _FREE(table, M_TEMP);
4143 _FREE(ioe, M_TEMP);
4144 goto fail;
4145 }
4146 break;
4147 }
4148 if (copyout(ioe, buf, sizeof(*ioe))) {
4149 _FREE(table, M_TEMP);
4150 _FREE(ioe, M_TEMP);
4151 error = EFAULT;
4152 goto fail;
4153 }
4154 }
4155 _FREE(table, M_TEMP);
4156 _FREE(ioe, M_TEMP);
4157 break;
4158 }
4159
4160 case DIOCXROLLBACK: {
4161 struct pfioc_trans_e *ioe;
4162 struct pfr_table *table;
4163 int i;
4164
4165 if (esize != sizeof(*ioe)) {
4166 error = ENODEV;
4167 goto fail;
4168 }
4169 ioe = _MALLOC(sizeof(*ioe), M_TEMP, M_WAITOK);
4170 table = _MALLOC(sizeof(*table), M_TEMP, M_WAITOK);
4171 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4172 if (copyin(buf, ioe, sizeof(*ioe))) {
4173 _FREE(table, M_TEMP);
4174 _FREE(ioe, M_TEMP);
4175 error = EFAULT;
4176 goto fail;
4177 }
4178 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4179 switch (ioe->rs_num) {
4180 case PF_RULESET_ALTQ:
4181 break;
4182 case PF_RULESET_TABLE:
4183 bzero(table, sizeof(*table));
4184 strlcpy(table->pfrt_anchor, ioe->anchor,
4185 sizeof(table->pfrt_anchor));
4186 if ((error = pfr_ina_rollback(table,
4187 ioe->ticket, NULL, 0))) {
4188 _FREE(table, M_TEMP);
4189 _FREE(ioe, M_TEMP);
4190 goto fail; /* really bad */
4191 }
4192 break;
4193 default:
4194 if ((error = pf_rollback_rules(ioe->ticket,
4195 ioe->rs_num, ioe->anchor))) {
4196 _FREE(table, M_TEMP);
4197 _FREE(ioe, M_TEMP);
4198 goto fail; /* really bad */
4199 }
4200 break;
4201 }
4202 }
4203 _FREE(table, M_TEMP);
4204 _FREE(ioe, M_TEMP);
4205 break;
4206 }
4207
4208 case DIOCXCOMMIT: {
4209 struct pfioc_trans_e *ioe;
4210 struct pfr_table *table;
4211 struct pf_ruleset *rs;
4212 user_addr_t _buf = buf;
4213 int i;
4214
4215 if (esize != sizeof(*ioe)) {
4216 error = ENODEV;
4217 goto fail;
4218 }
4219 ioe = _MALLOC(sizeof(*ioe), M_TEMP, M_WAITOK);
4220 table = _MALLOC(sizeof(*table), M_TEMP, M_WAITOK);
4221 /* first makes sure everything will succeed */
4222 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4223 if (copyin(buf, ioe, sizeof(*ioe))) {
4224 _FREE(table, M_TEMP);
4225 _FREE(ioe, M_TEMP);
4226 error = EFAULT;
4227 goto fail;
4228 }
4229 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4230 switch (ioe->rs_num) {
4231 case PF_RULESET_ALTQ:
4232 break;
4233 case PF_RULESET_TABLE:
4234 rs = pf_find_ruleset(ioe->anchor);
4235 if (rs == NULL || !rs->topen || ioe->ticket !=
4236 rs->tticket) {
4237 _FREE(table, M_TEMP);
4238 _FREE(ioe, M_TEMP);
4239 error = EBUSY;
4240 goto fail;
4241 }
4242 break;
4243 default:
4244 if (ioe->rs_num < 0 || ioe->rs_num >=
4245 PF_RULESET_MAX) {
4246 _FREE(table, M_TEMP);
4247 _FREE(ioe, M_TEMP);
4248 error = EINVAL;
4249 goto fail;
4250 }
4251 rs = pf_find_ruleset(ioe->anchor);
4252 if (rs == NULL ||
4253 !rs->rules[ioe->rs_num].inactive.open ||
4254 rs->rules[ioe->rs_num].inactive.ticket !=
4255 ioe->ticket) {
4256 _FREE(table, M_TEMP);
4257 _FREE(ioe, M_TEMP);
4258 error = EBUSY;
4259 goto fail;
4260 }
4261 break;
4262 }
4263 }
4264 buf = _buf;
4265 /* now do the commit - no errors should happen here */
4266 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4267 if (copyin(buf, ioe, sizeof(*ioe))) {
4268 _FREE(table, M_TEMP);
4269 _FREE(ioe, M_TEMP);
4270 error = EFAULT;
4271 goto fail;
4272 }
4273 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4274 switch (ioe->rs_num) {
4275 case PF_RULESET_ALTQ:
4276 break;
4277 case PF_RULESET_TABLE:
4278 bzero(table, sizeof(*table));
4279 strlcpy(table->pfrt_anchor, ioe->anchor,
4280 sizeof(table->pfrt_anchor));
4281 if ((error = pfr_ina_commit(table, ioe->ticket,
4282 NULL, NULL, 0))) {
4283 _FREE(table, M_TEMP);
4284 _FREE(ioe, M_TEMP);
4285 goto fail; /* really bad */
4286 }
4287 break;
4288 default:
4289 if ((error = pf_commit_rules(ioe->ticket,
4290 ioe->rs_num, ioe->anchor))) {
4291 _FREE(table, M_TEMP);
4292 _FREE(ioe, M_TEMP);
4293 goto fail; /* really bad */
4294 }
4295 break;
4296 }
4297 }
4298 _FREE(table, M_TEMP);
4299 _FREE(ioe, M_TEMP);
4300 break;
4301 }
4302
4303 default:
4304 VERIFY(0);
4305 /* NOTREACHED */
4306 }
4307 fail:
4308 return error;
4309 }
4310
4311 static int
4312 pfioctl_ioc_src_nodes(u_long cmd, struct pfioc_src_nodes_32 *psn32,
4313 struct pfioc_src_nodes_64 *psn64, struct proc *p)
4314 {
4315 int p64 = proc_is64bit(p);
4316 int error = 0;
4317
4318 switch (cmd) {
4319 case DIOCGETSRCNODES: {
4320 struct pf_src_node *n, *pstore;
4321 user_addr_t buf;
4322 u_int32_t nr = 0;
4323 int space, size;
4324
4325 space = (p64 ? psn64->psn_len : psn32->psn_len);
4326 if (space == 0) {
4327 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
4328 nr++;
4329
4330 size = sizeof(struct pf_src_node) * nr;
4331 if (p64) {
4332 psn64->psn_len = size;
4333 } else {
4334 psn32->psn_len = size;
4335 }
4336 break;
4337 }
4338
4339 pstore = _MALLOC(sizeof(*pstore), M_TEMP, M_WAITOK);
4340 if (pstore == NULL) {
4341 error = ENOMEM;
4342 break;
4343 }
4344 #ifdef __LP64__
4345 buf = (p64 ? psn64->psn_buf : psn32->psn_buf);
4346 #else
4347 buf = psn32->psn_buf;
4348 #endif
4349
4350 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
4351 uint64_t secs = pf_time_second(), diff;
4352
4353 if ((nr + 1) * sizeof(*pstore) > (unsigned)space) {
4354 break;
4355 }
4356
4357 bcopy(n, pstore, sizeof(*pstore));
4358 if (n->rule.ptr != NULL) {
4359 pstore->rule.nr = n->rule.ptr->nr;
4360 }
4361 pstore->creation = secs - pstore->creation;
4362 if (pstore->expire > secs) {
4363 pstore->expire -= secs;
4364 } else {
4365 pstore->expire = 0;
4366 }
4367
4368 /* adjust the connection rate estimate */
4369 diff = secs - n->conn_rate.last;
4370 if (diff >= n->conn_rate.seconds) {
4371 pstore->conn_rate.count = 0;
4372 } else {
4373 pstore->conn_rate.count -=
4374 n->conn_rate.count * diff /
4375 n->conn_rate.seconds;
4376 }
4377
4378 _RB_PARENT(pstore, entry) = NULL;
4379 RB_LEFT(pstore, entry) = RB_RIGHT(pstore, entry) = NULL;
4380 pstore->kif = NULL;
4381
4382 error = copyout(pstore, buf, sizeof(*pstore));
4383 if (error) {
4384 _FREE(pstore, M_TEMP);
4385 goto fail;
4386 }
4387 buf += sizeof(*pstore);
4388 nr++;
4389 }
4390
4391 size = sizeof(struct pf_src_node) * nr;
4392 if (p64) {
4393 psn64->psn_len = size;
4394 } else {
4395 psn32->psn_len = size;
4396 }
4397
4398 _FREE(pstore, M_TEMP);
4399 break;
4400 }
4401
4402 default:
4403 VERIFY(0);
4404 /* NOTREACHED */
4405 }
4406 fail:
4407 return error;
4408 }
4409
4410 static int
4411 pfioctl_ioc_src_node_kill(u_long cmd, struct pfioc_src_node_kill *psnk,
4412 struct proc *p)
4413 {
4414 #pragma unused(p)
4415 int error = 0;
4416
4417 switch (cmd) {
4418 case DIOCKILLSRCNODES: {
4419 struct pf_src_node *sn;
4420 struct pf_state *s;
4421 int killed = 0;
4422
4423 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
4424 if (PF_MATCHA(psnk->psnk_src.neg,
4425 &psnk->psnk_src.addr.v.a.addr,
4426 &psnk->psnk_src.addr.v.a.mask,
4427 &sn->addr, sn->af) &&
4428 PF_MATCHA(psnk->psnk_dst.neg,
4429 &psnk->psnk_dst.addr.v.a.addr,
4430 &psnk->psnk_dst.addr.v.a.mask,
4431 &sn->raddr, sn->af)) {
4432 /* Handle state to src_node linkage */
4433 if (sn->states != 0) {
4434 RB_FOREACH(s, pf_state_tree_id,
4435 &tree_id) {
4436 if (s->src_node == sn) {
4437 s->src_node = NULL;
4438 }
4439 if (s->nat_src_node == sn) {
4440 s->nat_src_node = NULL;
4441 }
4442 }
4443 sn->states = 0;
4444 }
4445 sn->expire = 1;
4446 killed++;
4447 }
4448 }
4449
4450 if (killed > 0) {
4451 pf_purge_expired_src_nodes();
4452 }
4453
4454 psnk->psnk_af = (sa_family_t)killed;
4455 break;
4456 }
4457
4458 default:
4459 VERIFY(0);
4460 /* NOTREACHED */
4461 }
4462
4463 return error;
4464 }
4465
4466 static int
4467 pfioctl_ioc_iface(u_long cmd, struct pfioc_iface_32 *io32,
4468 struct pfioc_iface_64 *io64, struct proc *p)
4469 {
4470 int p64 = proc_is64bit(p);
4471 int error = 0;
4472
4473 switch (cmd) {
4474 case DIOCIGETIFACES: {
4475 user_addr_t buf;
4476 int esize;
4477
4478 #ifdef __LP64__
4479 buf = (p64 ? io64->pfiio_buffer : io32->pfiio_buffer);
4480 esize = (p64 ? io64->pfiio_esize : io32->pfiio_esize);
4481 #else
4482 buf = io32->pfiio_buffer;
4483 esize = io32->pfiio_esize;
4484 #endif
4485
4486 /* esize must be that of the user space version of pfi_kif */
4487 if (esize != sizeof(struct pfi_uif)) {
4488 error = ENODEV;
4489 break;
4490 }
4491 if (p64) {
4492 io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4493 } else {
4494 io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4495 }
4496 error = pfi_get_ifaces(
4497 p64 ? io64->pfiio_name : io32->pfiio_name, buf,
4498 p64 ? &io64->pfiio_size : &io32->pfiio_size);
4499 break;
4500 }
4501
4502 case DIOCSETIFFLAG: {
4503 if (p64) {
4504 io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4505 } else {
4506 io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4507 }
4508
4509 error = pfi_set_flags(
4510 p64 ? io64->pfiio_name : io32->pfiio_name,
4511 p64 ? io64->pfiio_flags : io32->pfiio_flags);
4512 break;
4513 }
4514
4515 case DIOCCLRIFFLAG: {
4516 if (p64) {
4517 io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4518 } else {
4519 io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4520 }
4521
4522 error = pfi_clear_flags(
4523 p64 ? io64->pfiio_name : io32->pfiio_name,
4524 p64 ? io64->pfiio_flags : io32->pfiio_flags);
4525 break;
4526 }
4527
4528 default:
4529 VERIFY(0);
4530 /* NOTREACHED */
4531 }
4532
4533 return error;
4534 }
4535
4536 int
4537 pf_af_hook(struct ifnet *ifp, struct mbuf **mppn, struct mbuf **mp,
4538 unsigned int af, int input, struct ip_fw_args *fwa)
4539 {
4540 int error = 0;
4541 struct mbuf *nextpkt;
4542 net_thread_marks_t marks;
4543 struct ifnet * pf_ifp = ifp;
4544
4545 /* Always allow traffic on co-processor interfaces. */
4546 if (!intcoproc_unrestricted && ifp && IFNET_IS_INTCOPROC(ifp)) {
4547 return 0;
4548 }
4549
4550 marks = net_thread_marks_push(NET_THREAD_HELD_PF);
4551
4552 if (marks != net_thread_marks_none) {
4553 lck_rw_lock_shared(pf_perim_lock);
4554 if (!pf_is_enabled) {
4555 goto done;
4556 }
4557 lck_mtx_lock(pf_lock);
4558 }
4559
4560 if (mppn != NULL && *mppn != NULL) {
4561 VERIFY(*mppn == *mp);
4562 }
4563 if ((nextpkt = (*mp)->m_nextpkt) != NULL) {
4564 (*mp)->m_nextpkt = NULL;
4565 }
4566
4567 /*
4568 * For packets destined to locally hosted IP address
4569 * ip_output_list sets Mbuf's pkt header's rcvif to
4570 * the interface hosting the IP address.
4571 * While on the output path ifp passed to pf_af_hook
4572 * to such local communication is the loopback interface,
4573 * the input path derives ifp from mbuf packet header's
4574 * rcvif.
4575 * This asymmetry caues issues with PF.
4576 * To handle that case, we have a limited change here to
4577 * pass interface as loopback if packets are looped in.
4578 */
4579 if (input && ((*mp)->m_pkthdr.pkt_flags & PKTF_LOOP)) {
4580 pf_ifp = lo_ifp;
4581 }
4582
4583 switch (af) {
4584 #if INET
4585 case AF_INET: {
4586 error = pf_inet_hook(pf_ifp, mp, input, fwa);
4587 break;
4588 }
4589 #endif /* INET */
4590 case AF_INET6:
4591 error = pf_inet6_hook(pf_ifp, mp, input, fwa);
4592 break;
4593 default:
4594 break;
4595 }
4596
4597 /* When packet valid, link to the next packet */
4598 if (*mp != NULL && nextpkt != NULL) {
4599 struct mbuf *m = *mp;
4600 while (m->m_nextpkt != NULL) {
4601 m = m->m_nextpkt;
4602 }
4603 m->m_nextpkt = nextpkt;
4604 }
4605 /* Fix up linkage of previous packet in the chain */
4606 if (mppn != NULL) {
4607 if (*mp != NULL) {
4608 *mppn = *mp;
4609 } else {
4610 *mppn = nextpkt;
4611 }
4612 }
4613
4614 if (marks != net_thread_marks_none) {
4615 lck_mtx_unlock(pf_lock);
4616 }
4617
4618 done:
4619 if (marks != net_thread_marks_none) {
4620 lck_rw_done(pf_perim_lock);
4621 }
4622
4623 net_thread_marks_pop(marks);
4624 return error;
4625 }
4626
4627
4628 #if INET
4629 static int
4630 pf_inet_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4631 struct ip_fw_args *fwa)
4632 {
4633 struct mbuf *m = *mp;
4634 #if BYTE_ORDER != BIG_ENDIAN
4635 struct ip *ip = mtod(m, struct ip *);
4636 #endif
4637 int error = 0;
4638
4639 /*
4640 * If the packet is outbound, is originated locally, is flagged for
4641 * delayed UDP/TCP checksum calculation, and is about to be processed
4642 * for an interface that doesn't support the appropriate checksum
4643 * offloading, then calculated the checksum here so that PF can adjust
4644 * it properly.
4645 */
4646 if (!input && m->m_pkthdr.rcvif == NULL) {
4647 static const int mask = CSUM_DELAY_DATA;
4648 const int flags = m->m_pkthdr.csum_flags &
4649 ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4650
4651 if (flags & mask) {
4652 in_delayed_cksum(m);
4653 m->m_pkthdr.csum_flags &= ~mask;
4654 }
4655 }
4656
4657 #if BYTE_ORDER != BIG_ENDIAN
4658 HTONS(ip->ip_len);
4659 HTONS(ip->ip_off);
4660 #endif
4661 if (pf_test_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4662 if (*mp != NULL) {
4663 m_freem(*mp);
4664 *mp = NULL;
4665 error = EHOSTUNREACH;
4666 } else {
4667 error = ENOBUFS;
4668 }
4669 }
4670 #if BYTE_ORDER != BIG_ENDIAN
4671 else {
4672 if (*mp != NULL) {
4673 ip = mtod(*mp, struct ip *);
4674 NTOHS(ip->ip_len);
4675 NTOHS(ip->ip_off);
4676 }
4677 }
4678 #endif
4679 return error;
4680 }
4681 #endif /* INET */
4682
4683 int
4684 pf_inet6_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4685 struct ip_fw_args *fwa)
4686 {
4687 int error = 0;
4688
4689 /*
4690 * If the packet is outbound, is originated locally, is flagged for
4691 * delayed UDP/TCP checksum calculation, and is about to be processed
4692 * for an interface that doesn't support the appropriate checksum
4693 * offloading, then calculated the checksum here so that PF can adjust
4694 * it properly.
4695 */
4696 if (!input && (*mp)->m_pkthdr.rcvif == NULL) {
4697 static const int mask = CSUM_DELAY_IPV6_DATA;
4698 const int flags = (*mp)->m_pkthdr.csum_flags &
4699 ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4700
4701 if (flags & mask) {
4702 /*
4703 * Checksum offload should not have been enabled
4704 * when extension headers exist, thus 0 for optlen.
4705 */
4706 in6_delayed_cksum(*mp);
4707 (*mp)->m_pkthdr.csum_flags &= ~mask;
4708 }
4709 }
4710
4711 if (pf_test6_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4712 if (*mp != NULL) {
4713 m_freem(*mp);
4714 *mp = NULL;
4715 error = EHOSTUNREACH;
4716 } else {
4717 error = ENOBUFS;
4718 }
4719 }
4720 return error;
4721 }
4722
4723 int
4724 pf_ifaddr_hook(struct ifnet *ifp)
4725 {
4726 struct pfi_kif *kif = ifp->if_pf_kif;
4727
4728 if (kif != NULL) {
4729 lck_rw_lock_shared(pf_perim_lock);
4730 lck_mtx_lock(pf_lock);
4731
4732 pfi_kifaddr_update(kif);
4733
4734 lck_mtx_unlock(pf_lock);
4735 lck_rw_done(pf_perim_lock);
4736 }
4737 return 0;
4738 }
4739
4740 /*
4741 * Caller acquires dlil lock as writer (exclusive)
4742 */
4743 void
4744 pf_ifnet_hook(struct ifnet *ifp, int attach)
4745 {
4746 lck_rw_lock_shared(pf_perim_lock);
4747 lck_mtx_lock(pf_lock);
4748 if (attach) {
4749 pfi_attach_ifnet(ifp);
4750 } else {
4751 pfi_detach_ifnet(ifp);
4752 }
4753 lck_mtx_unlock(pf_lock);
4754 lck_rw_done(pf_perim_lock);
4755 }
4756
4757 static void
4758 pf_attach_hooks(void)
4759 {
4760 ifnet_head_lock_shared();
4761 /*
4762 * Check against ifnet_addrs[] before proceeding, in case this
4763 * is called very early on, e.g. during dlil_init() before any
4764 * network interface is attached.
4765 */
4766 if (ifnet_addrs != NULL) {
4767 int i;
4768
4769 for (i = 0; i <= if_index; i++) {
4770 struct ifnet *ifp = ifindex2ifnet[i];
4771 if (ifp != NULL) {
4772 pfi_attach_ifnet(ifp);
4773 }
4774 }
4775 }
4776 ifnet_head_done();
4777 }
4778
4779 #if 0
4780 /* currently unused along with pfdetach() */
4781 static void
4782 pf_detach_hooks(void)
4783 {
4784 ifnet_head_lock_shared();
4785 if (ifnet_addrs != NULL) {
4786 for (i = 0; i <= if_index; i++) {
4787 int i;
4788
4789 struct ifnet *ifp = ifindex2ifnet[i];
4790 if (ifp != NULL && ifp->if_pf_kif != NULL) {
4791 pfi_detach_ifnet(ifp);
4792 }
4793 }
4794 }
4795 ifnet_head_done();
4796 }
4797 #endif
4798
4799 /*
4800 * 'D' group ioctls.
4801 *
4802 * The switch statement below does nothing at runtime, as it serves as a
4803 * compile time check to ensure that all of the socket 'D' ioctls (those
4804 * in the 'D' group going thru soo_ioctl) that are made available by the
4805 * networking stack is unique. This works as long as this routine gets
4806 * updated each time a new interface ioctl gets added.
4807 *
4808 * Any failures at compile time indicates duplicated ioctl values.
4809 */
4810 static __attribute__((unused)) void
4811 pfioctl_cassert(void)
4812 {
4813 /*
4814 * This is equivalent to _CASSERT() and the compiler wouldn't
4815 * generate any instructions, thus for compile time only.
4816 */
4817 switch ((u_long)0) {
4818 case 0:
4819
4820 /* bsd/net/pfvar.h */
4821 case DIOCSTART:
4822 case DIOCSTOP:
4823 case DIOCADDRULE:
4824 case DIOCGETSTARTERS:
4825 case DIOCGETRULES:
4826 case DIOCGETRULE:
4827 case DIOCSTARTREF:
4828 case DIOCSTOPREF:
4829 case DIOCCLRSTATES:
4830 case DIOCGETSTATE:
4831 case DIOCSETSTATUSIF:
4832 case DIOCGETSTATUS:
4833 case DIOCCLRSTATUS:
4834 case DIOCNATLOOK:
4835 case DIOCSETDEBUG:
4836 case DIOCGETSTATES:
4837 case DIOCCHANGERULE:
4838 case DIOCINSERTRULE:
4839 case DIOCDELETERULE:
4840 case DIOCSETTIMEOUT:
4841 case DIOCGETTIMEOUT:
4842 case DIOCADDSTATE:
4843 case DIOCCLRRULECTRS:
4844 case DIOCGETLIMIT:
4845 case DIOCSETLIMIT:
4846 case DIOCKILLSTATES:
4847 case DIOCSTARTALTQ:
4848 case DIOCSTOPALTQ:
4849 case DIOCADDALTQ:
4850 case DIOCGETALTQS:
4851 case DIOCGETALTQ:
4852 case DIOCCHANGEALTQ:
4853 case DIOCGETQSTATS:
4854 case DIOCBEGINADDRS:
4855 case DIOCADDADDR:
4856 case DIOCGETADDRS:
4857 case DIOCGETADDR:
4858 case DIOCCHANGEADDR:
4859 case DIOCGETRULESETS:
4860 case DIOCGETRULESET:
4861 case DIOCRCLRTABLES:
4862 case DIOCRADDTABLES:
4863 case DIOCRDELTABLES:
4864 case DIOCRGETTABLES:
4865 case DIOCRGETTSTATS:
4866 case DIOCRCLRTSTATS:
4867 case DIOCRCLRADDRS:
4868 case DIOCRADDADDRS:
4869 case DIOCRDELADDRS:
4870 case DIOCRSETADDRS:
4871 case DIOCRGETADDRS:
4872 case DIOCRGETASTATS:
4873 case DIOCRCLRASTATS:
4874 case DIOCRTSTADDRS:
4875 case DIOCRSETTFLAGS:
4876 case DIOCRINADEFINE:
4877 case DIOCOSFPFLUSH:
4878 case DIOCOSFPADD:
4879 case DIOCOSFPGET:
4880 case DIOCXBEGIN:
4881 case DIOCXCOMMIT:
4882 case DIOCXROLLBACK:
4883 case DIOCGETSRCNODES:
4884 case DIOCCLRSRCNODES:
4885 case DIOCSETHOSTID:
4886 case DIOCIGETIFACES:
4887 case DIOCSETIFFLAG:
4888 case DIOCCLRIFFLAG:
4889 case DIOCKILLSRCNODES:
4890 case DIOCGIFSPEED:
4891 ;
4892 }
4893 }