2 * Copyright (c) 2007-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */
30 /* $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * - Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * - Redistributions in binary form must reproduce the above
44 * copyright notice, this list of conditions and the following
45 * disclaimer in the documentation and/or other materials provided
46 * with the distribution.
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
71 #include <sys/filio.h>
72 #include <sys/fcntl.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/kernel.h>
77 #include <sys/proc_internal.h>
78 #include <sys/malloc.h>
79 #include <sys/kauth.h>
81 #include <sys/mcache.h>
82 #include <sys/queue.h>
84 #include <mach/vm_param.h>
87 #include <net/if_types.h>
88 #include <net/route.h>
90 #include <netinet/in.h>
91 #include <netinet/in_var.h>
92 #include <netinet/in_systm.h>
93 #include <netinet/ip.h>
94 #include <netinet/ip_var.h>
95 #include <netinet/ip_icmp.h>
96 #include <netinet/if_ether.h>
98 #include <libkern/crypto/md5.h>
100 #include <miscfs/devfs/devfs.h>
102 #include <net/pfvar.h>
105 #include <net/if_pfsync.h>
109 #include <net/if_pflog.h>
113 #include <netinet/ip6.h>
114 #include <netinet/in_pcb.h>
118 #include <altq/altq.h>
122 static void pfdetach(void);
124 static int pfopen(dev_t
, int, int, struct proc
*);
125 static int pfclose(dev_t
, int, int, struct proc
*);
126 static int pfioctl(dev_t
, u_long
, caddr_t
, int, struct proc
*);
127 static struct pf_pool
*pf_get_pool(char *, u_int32_t
, u_int8_t
, u_int32_t
,
128 u_int8_t
, u_int8_t
, u_int8_t
);
130 static void pf_mv_pool(struct pf_palist
*, struct pf_palist
*);
131 static void pf_empty_pool(struct pf_palist
*);
133 static int pf_begin_altq(u_int32_t
*);
134 static int pf_rollback_altq(u_int32_t
);
135 static int pf_commit_altq(u_int32_t
);
136 static int pf_enable_altq(struct pf_altq
*);
137 static int pf_disable_altq(struct pf_altq
*);
138 static void pf_altq_copyin(struct pf_altq
*, struct pf_altq
*);
139 static void pf_altq_copyout(struct pf_altq
*, struct pf_altq
*);
141 static int pf_begin_rules(u_int32_t
*, int, const char *);
142 static int pf_rollback_rules(u_int32_t
, int, char *);
143 static int pf_setup_pfsync_matching(struct pf_ruleset
*);
144 static void pf_hash_rule(MD5_CTX
*, struct pf_rule
*);
145 #ifndef NO_APPLE_EXTENSIONS
146 static void pf_hash_rule_addr(MD5_CTX
*, struct pf_rule_addr
*, u_int8_t
);
148 static void pf_hash_rule_addr(MD5_CTX
*, struct pf_rule_addr
*);
150 static int pf_commit_rules(u_int32_t
, int, char *);
151 static void pf_rule_copyin(struct pf_rule
*, struct pf_rule
*, struct proc
*);
152 static void pf_rule_copyout(struct pf_rule
*, struct pf_rule
*);
153 static void pf_state_export(struct pfsync_state
*, struct pf_state_key
*,
155 static void pf_state_import(struct pfsync_state
*, struct pf_state_key
*,
157 static void pf_pooladdr_copyin(struct pf_pooladdr
*, struct pf_pooladdr
*);
158 static void pf_pooladdr_copyout(struct pf_pooladdr
*, struct pf_pooladdr
*);
160 #define PF_CDEV_MAJOR (-1)
162 static struct cdevsw pf_cdevsw
= {
165 /* read */ eno_rdwrt
,
166 /* write */ eno_rdwrt
,
169 /* reset */ eno_reset
,
171 /* select */ eno_select
,
173 /* strategy */ eno_strat
,
179 static void pf_attach_hooks(void);
181 /* currently unused along with pfdetach() */
182 static void pf_detach_hooks(void);
186 * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer,
187 * and used in pf_af_hook() for performance optimization, such that packets
188 * will enter pf_test() or pf_test6() only when PF is running.
190 int pf_is_enabled
= 0;
193 * These are the pf enabled reference counting variables
195 static u_int64_t pf_enabled_ref_count
;
196 static u_int32_t nr_tokens
= 0;
198 SLIST_HEAD(list_head
, pfioc_kernel_token
);
199 static struct list_head token_list_head
;
201 struct pf_rule pf_default_rule
;
203 static int pf_altq_running
;
206 #define TAGID_MAX 50000
207 static TAILQ_HEAD(pf_tags
, pf_tagname
) pf_tags
=
208 TAILQ_HEAD_INITIALIZER(pf_tags
);
210 static TAILQ_HEAD(pf_tags
, pf_tagname
) pf_qids
=
211 TAILQ_HEAD_INITIALIZER(pf_qids
);
214 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
215 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
217 static u_int16_t
tagname2tag(struct pf_tags
*, char *);
218 static void tag2tagname(struct pf_tags
*, u_int16_t
, char *);
219 static void tag_unref(struct pf_tags
*, u_int16_t
);
220 static int pf_rtlabel_add(struct pf_addr_wrap
*);
221 static void pf_rtlabel_remove(struct pf_addr_wrap
*);
222 static void pf_rtlabel_copyout(struct pf_addr_wrap
*);
225 static int pf_inet_hook(struct ifnet
*, struct mbuf
**, int);
228 static int pf_inet6_hook(struct ifnet
*, struct mbuf
**, int);
231 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
233 #define PF_USER_ADDR(a, s, f) \
234 (proc_is64bit(current_proc()) ? \
235 ((struct s##_64 *)a)->f : ((struct s##_32 *)a)->f)
237 static lck_attr_t
*pf_perim_lock_attr
;
238 static lck_grp_t
*pf_perim_lock_grp
;
239 static lck_grp_attr_t
*pf_perim_lock_grp_attr
;
241 static lck_attr_t
*pf_lock_attr
;
242 static lck_grp_t
*pf_lock_grp
;
243 static lck_grp_attr_t
*pf_lock_grp_attr
;
245 struct thread
*pf_purge_thread
;
247 extern void pfi_kifaddr_update(void *);
249 /* pf enable ref-counting helper functions */
250 static u_int64_t
generate_token(void);
251 static int remove_token(struct pfioc_remove_token
*);
252 static void invalidate_all_tokens(void);
257 u_int64_t token_value
;
258 struct pfioc_kernel_token
*new_token
;
260 new_token
= _MALLOC(sizeof (struct pfioc_kernel_token
), M_TEMP
, M_WAITOK
|M_ZERO
);
262 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
264 if (new_token
== NULL
) {
265 /* malloc failed! bail! */
266 printf("%s: unable to allocate pf token structure!", __func__
);
270 token_value
= (u_int64_t
)(uintptr_t)new_token
;
272 new_token
->token
.token_value
= token_value
;
273 new_token
->token
.pid
= proc_pid(current_proc());
274 proc_name(new_token
->token
.pid
, new_token
->token
.proc_name
,
275 sizeof (new_token
->token
.proc_name
));
276 new_token
->token
.timestamp
= pf_calendar_time_second();
278 SLIST_INSERT_HEAD(&token_list_head
, new_token
, next
);
285 remove_token(struct pfioc_remove_token
*tok
)
287 struct pfioc_kernel_token
*entry
, *tmp
;
289 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
291 SLIST_FOREACH_SAFE(entry
, &token_list_head
, next
, tmp
) {
292 if (tok
->token_value
== entry
->token
.token_value
) {
293 SLIST_REMOVE(&token_list_head
, entry
, pfioc_kernel_token
, next
);
294 _FREE(entry
, M_TEMP
);
296 return 0; /* success */
300 printf("pf : remove failure\n");
301 return ESRCH
; /* failure */
305 invalidate_all_tokens(void)
307 struct pfioc_kernel_token
*entry
, *tmp
;
309 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
311 SLIST_FOREACH_SAFE(entry
, &token_list_head
, next
, tmp
) {
312 SLIST_REMOVE(&token_list_head
, entry
, pfioc_kernel_token
, next
);
313 _FREE(entry
, M_TEMP
);
324 u_int32_t
*t
= pf_default_rule
.timeout
;
327 pf_perim_lock_grp_attr
= lck_grp_attr_alloc_init();
328 pf_perim_lock_grp
= lck_grp_alloc_init("pf_perim",
329 pf_perim_lock_grp_attr
);
330 pf_perim_lock_attr
= lck_attr_alloc_init();
331 pf_perim_lock
= lck_rw_alloc_init(pf_perim_lock_grp
,
334 pf_lock_grp_attr
= lck_grp_attr_alloc_init();
335 pf_lock_grp
= lck_grp_alloc_init("pf", pf_lock_grp_attr
);
336 pf_lock_attr
= lck_attr_alloc_init();
337 pf_lock
= lck_mtx_alloc_init(pf_lock_grp
, pf_lock_attr
);
339 pool_init(&pf_rule_pl
, sizeof (struct pf_rule
), 0, 0, 0, "pfrulepl",
341 pool_init(&pf_src_tree_pl
, sizeof (struct pf_src_node
), 0, 0, 0,
343 pool_init(&pf_state_pl
, sizeof (struct pf_state
), 0, 0, 0, "pfstatepl",
345 pool_init(&pf_state_key_pl
, sizeof (struct pf_state_key
), 0, 0, 0,
346 "pfstatekeypl", NULL
);
347 #ifndef NO_APPLE_EXTENSIONS
348 pool_init(&pf_app_state_pl
, sizeof (struct pf_app_state
), 0, 0, 0,
349 "pfappstatepl", NULL
);
352 pool_init(&pf_altq_pl
, sizeof (struct pf_altq
), 0, 0, 0, "pfaltqpl",
355 pool_init(&pf_pooladdr_pl
, sizeof (struct pf_pooladdr
), 0, 0, 0,
356 "pfpooladdrpl", NULL
);
359 pf_osfp_initialize();
361 pool_sethardlimit(pf_pool_limits
[PF_LIMIT_STATES
].pp
,
362 pf_pool_limits
[PF_LIMIT_STATES
].limit
, NULL
, 0);
364 if (max_mem
<= 256*1024*1024)
365 pf_pool_limits
[PF_LIMIT_TABLE_ENTRIES
].limit
=
366 PFR_KENTRY_HIWAT_SMALL
;
368 RB_INIT(&tree_src_tracking
);
369 RB_INIT(&pf_anchors
);
370 pf_init_ruleset(&pf_main_ruleset
);
371 TAILQ_INIT(&pf_pabuf
);
372 TAILQ_INIT(&state_list
);
374 TAILQ_INIT(&pf_altqs
[0]);
375 TAILQ_INIT(&pf_altqs
[1]);
376 pf_altqs_active
= &pf_altqs
[0];
377 pf_altqs_inactive
= &pf_altqs
[1];
380 /* default rule should never be garbage collected */
381 pf_default_rule
.entries
.tqe_prev
= &pf_default_rule
.entries
.tqe_next
;
382 pf_default_rule
.action
= PF_PASS
;
383 pf_default_rule
.nr
= -1;
384 pf_default_rule
.rtableid
= IFSCOPE_NONE
;
386 /* initialize default timeouts */
387 t
[PFTM_TCP_FIRST_PACKET
] = PFTM_TCP_FIRST_PACKET_VAL
;
388 t
[PFTM_TCP_OPENING
] = PFTM_TCP_OPENING_VAL
;
389 t
[PFTM_TCP_ESTABLISHED
] = PFTM_TCP_ESTABLISHED_VAL
;
390 t
[PFTM_TCP_CLOSING
] = PFTM_TCP_CLOSING_VAL
;
391 t
[PFTM_TCP_FIN_WAIT
] = PFTM_TCP_FIN_WAIT_VAL
;
392 t
[PFTM_TCP_CLOSED
] = PFTM_TCP_CLOSED_VAL
;
393 t
[PFTM_UDP_FIRST_PACKET
] = PFTM_UDP_FIRST_PACKET_VAL
;
394 t
[PFTM_UDP_SINGLE
] = PFTM_UDP_SINGLE_VAL
;
395 t
[PFTM_UDP_MULTIPLE
] = PFTM_UDP_MULTIPLE_VAL
;
396 t
[PFTM_ICMP_FIRST_PACKET
] = PFTM_ICMP_FIRST_PACKET_VAL
;
397 t
[PFTM_ICMP_ERROR_REPLY
] = PFTM_ICMP_ERROR_REPLY_VAL
;
398 #ifndef NO_APPLE_EXTENSIONS
399 t
[PFTM_GREv1_FIRST_PACKET
] = PFTM_GREv1_FIRST_PACKET_VAL
;
400 t
[PFTM_GREv1_INITIATING
] = PFTM_GREv1_INITIATING_VAL
;
401 t
[PFTM_GREv1_ESTABLISHED
] = PFTM_GREv1_ESTABLISHED_VAL
;
402 t
[PFTM_ESP_FIRST_PACKET
] = PFTM_ESP_FIRST_PACKET_VAL
;
403 t
[PFTM_ESP_INITIATING
] = PFTM_ESP_INITIATING_VAL
;
404 t
[PFTM_ESP_ESTABLISHED
] = PFTM_ESP_ESTABLISHED_VAL
;
406 t
[PFTM_OTHER_FIRST_PACKET
] = PFTM_OTHER_FIRST_PACKET_VAL
;
407 t
[PFTM_OTHER_SINGLE
] = PFTM_OTHER_SINGLE_VAL
;
408 t
[PFTM_OTHER_MULTIPLE
] = PFTM_OTHER_MULTIPLE_VAL
;
409 t
[PFTM_FRAG
] = PFTM_FRAG_VAL
;
410 t
[PFTM_INTERVAL
] = PFTM_INTERVAL_VAL
;
411 t
[PFTM_SRC_NODE
] = PFTM_SRC_NODE_VAL
;
412 t
[PFTM_TS_DIFF
] = PFTM_TS_DIFF_VAL
;
413 t
[PFTM_ADAPTIVE_START
] = PFSTATE_ADAPT_START
;
414 t
[PFTM_ADAPTIVE_END
] = PFSTATE_ADAPT_END
;
417 bzero(&pf_status
, sizeof (pf_status
));
418 pf_status
.debug
= PF_DEBUG_URGENT
;
420 /* XXX do our best to avoid a conflict */
421 pf_status
.hostid
= random();
423 if (kernel_thread_start(pf_purge_thread_fn
, NULL
,
424 &pf_purge_thread
) != 0) {
425 printf("%s: unable to start purge thread!", __func__
);
429 maj
= cdevsw_add(PF_CDEV_MAJOR
, &pf_cdevsw
);
431 printf("%s: failed to allocate major number!\n", __func__
);
434 (void) devfs_make_node(makedev(maj
, 0), DEVFS_CHAR
,
435 UID_ROOT
, GID_WHEEL
, 0600, "pf", 0);
444 struct pf_anchor
*anchor
;
445 struct pf_state
*state
;
446 struct pf_src_node
*node
;
447 struct pfioc_table pt
;
454 pf_status
.running
= 0;
455 wakeup(pf_purge_thread_fn
);
457 /* clear the rulesets */
458 for (i
= 0; i
< PF_RULESET_MAX
; i
++)
459 if (pf_begin_rules(&ticket
, i
, &r
) == 0)
460 pf_commit_rules(ticket
, i
, &r
);
462 if (pf_begin_altq(&ticket
) == 0)
463 pf_commit_altq(ticket
);
467 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
468 state
->timeout
= PFTM_PURGE
;
470 state
->sync_flags
= PFSTATE_NOSYNC
;
473 pf_purge_expired_states(pf_status
.states
);
476 pfsync_clear_states(pf_status
.hostid
, NULL
);
479 /* clear source nodes */
480 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
481 state
->src_node
= NULL
;
482 state
->nat_src_node
= NULL
;
484 RB_FOREACH(node
, pf_src_tree
, &tree_src_tracking
) {
488 pf_purge_expired_src_nodes();
491 memset(&pt
, '\0', sizeof (pt
));
492 pfr_clr_tables(&pt
.pfrio_table
, &pt
.pfrio_ndel
, pt
.pfrio_flags
);
494 /* destroy anchors */
495 while ((anchor
= RB_MIN(pf_anchor_global
, &pf_anchors
)) != NULL
) {
496 for (i
= 0; i
< PF_RULESET_MAX
; i
++)
497 if (pf_begin_rules(&ticket
, i
, anchor
->name
) == 0)
498 pf_commit_rules(ticket
, i
, anchor
->name
);
501 /* destroy main ruleset */
502 pf_remove_if_empty_ruleset(&pf_main_ruleset
);
504 /* destroy the pools */
505 pool_destroy(&pf_pooladdr_pl
);
507 pool_destroy(&pf_altq_pl
);
509 pool_destroy(&pf_state_pl
);
510 pool_destroy(&pf_rule_pl
);
511 pool_destroy(&pf_src_tree_pl
);
513 /* destroy subsystems */
514 pf_normalize_destroy();
522 pfopen(dev_t dev
, int flags
, int fmt
, struct proc
*p
)
524 #pragma unused(flags, fmt, p)
531 pfclose(dev_t dev
, int flags
, int fmt
, struct proc
*p
)
533 #pragma unused(flags, fmt, p)
539 static struct pf_pool
*
540 pf_get_pool(char *anchor
, u_int32_t ticket
, u_int8_t rule_action
,
541 u_int32_t rule_number
, u_int8_t r_last
, u_int8_t active
,
542 u_int8_t check_ticket
)
544 struct pf_ruleset
*ruleset
;
545 struct pf_rule
*rule
;
548 ruleset
= pf_find_ruleset(anchor
);
551 rs_num
= pf_get_ruleset_number(rule_action
);
552 if (rs_num
>= PF_RULESET_MAX
)
555 if (check_ticket
&& ticket
!=
556 ruleset
->rules
[rs_num
].active
.ticket
)
559 rule
= TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
,
562 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
564 if (check_ticket
&& ticket
!=
565 ruleset
->rules
[rs_num
].inactive
.ticket
)
568 rule
= TAILQ_LAST(ruleset
->rules
[rs_num
].inactive
.ptr
,
571 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].inactive
.ptr
);
574 while ((rule
!= NULL
) && (rule
->nr
!= rule_number
))
575 rule
= TAILQ_NEXT(rule
, entries
);
580 return (&rule
->rpool
);
584 pf_mv_pool(struct pf_palist
*poola
, struct pf_palist
*poolb
)
586 struct pf_pooladdr
*mv_pool_pa
;
588 while ((mv_pool_pa
= TAILQ_FIRST(poola
)) != NULL
) {
589 TAILQ_REMOVE(poola
, mv_pool_pa
, entries
);
590 TAILQ_INSERT_TAIL(poolb
, mv_pool_pa
, entries
);
595 pf_empty_pool(struct pf_palist
*poola
)
597 struct pf_pooladdr
*empty_pool_pa
;
599 while ((empty_pool_pa
= TAILQ_FIRST(poola
)) != NULL
) {
600 pfi_dynaddr_remove(&empty_pool_pa
->addr
);
601 pf_tbladdr_remove(&empty_pool_pa
->addr
);
602 pfi_kif_unref(empty_pool_pa
->kif
, PFI_KIF_REF_RULE
);
603 TAILQ_REMOVE(poola
, empty_pool_pa
, entries
);
604 pool_put(&pf_pooladdr_pl
, empty_pool_pa
);
609 pf_rm_rule(struct pf_rulequeue
*rulequeue
, struct pf_rule
*rule
)
611 if (rulequeue
!= NULL
) {
612 if (rule
->states
<= 0) {
614 * XXX - we need to remove the table *before* detaching
615 * the rule to make sure the table code does not delete
616 * the anchor under our feet.
618 pf_tbladdr_remove(&rule
->src
.addr
);
619 pf_tbladdr_remove(&rule
->dst
.addr
);
620 if (rule
->overload_tbl
)
621 pfr_detach_table(rule
->overload_tbl
);
623 TAILQ_REMOVE(rulequeue
, rule
, entries
);
624 rule
->entries
.tqe_prev
= NULL
;
628 if (rule
->states
> 0 || rule
->src_nodes
> 0 ||
629 rule
->entries
.tqe_prev
!= NULL
)
631 pf_tag_unref(rule
->tag
);
632 pf_tag_unref(rule
->match_tag
);
634 if (rule
->pqid
!= rule
->qid
)
635 pf_qid_unref(rule
->pqid
);
636 pf_qid_unref(rule
->qid
);
638 pf_rtlabel_remove(&rule
->src
.addr
);
639 pf_rtlabel_remove(&rule
->dst
.addr
);
640 pfi_dynaddr_remove(&rule
->src
.addr
);
641 pfi_dynaddr_remove(&rule
->dst
.addr
);
642 if (rulequeue
== NULL
) {
643 pf_tbladdr_remove(&rule
->src
.addr
);
644 pf_tbladdr_remove(&rule
->dst
.addr
);
645 if (rule
->overload_tbl
)
646 pfr_detach_table(rule
->overload_tbl
);
648 pfi_kif_unref(rule
->kif
, PFI_KIF_REF_RULE
);
649 pf_anchor_remove(rule
);
650 pf_empty_pool(&rule
->rpool
.list
);
651 pool_put(&pf_rule_pl
, rule
);
655 tagname2tag(struct pf_tags
*head
, char *tagname
)
657 struct pf_tagname
*tag
, *p
= NULL
;
658 u_int16_t new_tagid
= 1;
660 TAILQ_FOREACH(tag
, head
, entries
)
661 if (strcmp(tagname
, tag
->name
) == 0) {
667 * to avoid fragmentation, we do a linear search from the beginning
668 * and take the first free slot we find. if there is none or the list
669 * is empty, append a new entry at the end.
673 if (!TAILQ_EMPTY(head
))
674 for (p
= TAILQ_FIRST(head
); p
!= NULL
&&
675 p
->tag
== new_tagid
; p
= TAILQ_NEXT(p
, entries
))
676 new_tagid
= p
->tag
+ 1;
678 if (new_tagid
> TAGID_MAX
)
681 /* allocate and fill new struct pf_tagname */
682 tag
= _MALLOC(sizeof (*tag
), M_TEMP
, M_WAITOK
|M_ZERO
);
685 strlcpy(tag
->name
, tagname
, sizeof (tag
->name
));
686 tag
->tag
= new_tagid
;
689 if (p
!= NULL
) /* insert new entry before p */
690 TAILQ_INSERT_BEFORE(p
, tag
, entries
);
691 else /* either list empty or no free slot in between */
692 TAILQ_INSERT_TAIL(head
, tag
, entries
);
698 tag2tagname(struct pf_tags
*head
, u_int16_t tagid
, char *p
)
700 struct pf_tagname
*tag
;
702 TAILQ_FOREACH(tag
, head
, entries
)
703 if (tag
->tag
== tagid
) {
704 strlcpy(p
, tag
->name
, PF_TAG_NAME_SIZE
);
710 tag_unref(struct pf_tags
*head
, u_int16_t tag
)
712 struct pf_tagname
*p
, *next
;
717 for (p
= TAILQ_FIRST(head
); p
!= NULL
; p
= next
) {
718 next
= TAILQ_NEXT(p
, entries
);
721 TAILQ_REMOVE(head
, p
, entries
);
730 pf_tagname2tag(char *tagname
)
732 return (tagname2tag(&pf_tags
, tagname
));
736 pf_tag2tagname(u_int16_t tagid
, char *p
)
738 tag2tagname(&pf_tags
, tagid
, p
);
742 pf_tag_ref(u_int16_t tag
)
744 struct pf_tagname
*t
;
746 TAILQ_FOREACH(t
, &pf_tags
, entries
)
754 pf_tag_unref(u_int16_t tag
)
756 tag_unref(&pf_tags
, tag
);
760 pf_rtlabel_add(struct pf_addr_wrap
*a
)
767 pf_rtlabel_remove(struct pf_addr_wrap
*a
)
773 pf_rtlabel_copyout(struct pf_addr_wrap
*a
)
780 pf_qname2qid(char *qname
)
782 return ((u_int32_t
)tagname2tag(&pf_qids
, qname
));
786 pf_qid2qname(u_int32_t qid
, char *p
)
788 tag2tagname(&pf_qids
, (u_int16_t
)qid
, p
);
792 pf_qid_unref(u_int32_t qid
)
794 tag_unref(&pf_qids
, (u_int16_t
)qid
);
798 pf_begin_altq(u_int32_t
*ticket
)
800 struct pf_altq
*altq
;
803 /* Purge the old altq list */
804 while ((altq
= TAILQ_FIRST(pf_altqs_inactive
)) != NULL
) {
805 TAILQ_REMOVE(pf_altqs_inactive
, altq
, entries
);
806 if (altq
->qname
[0] == 0) {
807 /* detach and destroy the discipline */
808 error
= altq_remove(altq
);
810 pf_qid_unref(altq
->qid
);
811 pool_put(&pf_altq_pl
, altq
);
815 *ticket
= ++ticket_altqs_inactive
;
816 altqs_inactive_open
= 1;
821 pf_rollback_altq(u_int32_t ticket
)
823 struct pf_altq
*altq
;
826 if (!altqs_inactive_open
|| ticket
!= ticket_altqs_inactive
)
828 /* Purge the old altq list */
829 while ((altq
= TAILQ_FIRST(pf_altqs_inactive
)) != NULL
) {
830 TAILQ_REMOVE(pf_altqs_inactive
, altq
, entries
);
831 if (altq
->qname
[0] == 0) {
832 /* detach and destroy the discipline */
833 error
= altq_remove(altq
);
835 pf_qid_unref(altq
->qid
);
836 pool_put(&pf_altq_pl
, altq
);
838 altqs_inactive_open
= 0;
843 pf_commit_altq(u_int32_t ticket
)
845 struct pf_altqqueue
*old_altqs
;
846 struct pf_altq
*altq
;
847 int s
, err
, error
= 0;
849 if (!altqs_inactive_open
|| ticket
!= ticket_altqs_inactive
)
852 /* swap altqs, keep the old. */
854 old_altqs
= pf_altqs_active
;
855 pf_altqs_active
= pf_altqs_inactive
;
856 pf_altqs_inactive
= old_altqs
;
857 ticket_altqs_active
= ticket_altqs_inactive
;
859 /* Attach new disciplines */
860 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
) {
861 if (altq
->qname
[0] == 0) {
862 /* attach the discipline */
863 error
= altq_pfattach(altq
);
864 if (error
== 0 && pf_altq_running
)
865 error
= pf_enable_altq(altq
);
873 /* Purge the old altq list */
874 while ((altq
= TAILQ_FIRST(pf_altqs_inactive
)) != NULL
) {
875 TAILQ_REMOVE(pf_altqs_inactive
, altq
, entries
);
876 if (altq
->qname
[0] == 0) {
877 /* detach and destroy the discipline */
879 error
= pf_disable_altq(altq
);
880 err
= altq_pfdetach(altq
);
881 if (err
!= 0 && error
== 0)
883 err
= altq_remove(altq
);
884 if (err
!= 0 && error
== 0)
887 pf_qid_unref(altq
->qid
);
888 pool_put(&pf_altq_pl
, altq
);
892 altqs_inactive_open
= 0;
897 pf_enable_altq(struct pf_altq
*altq
)
900 struct tb_profile tb
;
903 if ((ifp
= ifunit(altq
->ifname
)) == NULL
)
906 if (ifp
->if_snd
.altq_type
!= ALTQT_NONE
)
907 error
= altq_enable(&ifp
->if_snd
);
909 /* set tokenbucket regulator */
910 if (error
== 0 && ifp
!= NULL
&& ALTQ_IS_ENABLED(&ifp
->if_snd
)) {
911 tb
.rate
= altq
->ifbandwidth
;
912 tb
.depth
= altq
->tbrsize
;
914 error
= tbr_set(&ifp
->if_snd
, &tb
);
922 pf_disable_altq(struct pf_altq
*altq
)
925 struct tb_profile tb
;
928 if ((ifp
= ifunit(altq
->ifname
)) == NULL
)
932 * when the discipline is no longer referenced, it was overridden
933 * by a new one. if so, just return.
935 if (altq
->altq_disc
!= ifp
->if_snd
.altq_disc
)
938 error
= altq_disable(&ifp
->if_snd
);
941 /* clear tokenbucket regulator */
944 error
= tbr_set(&ifp
->if_snd
, &tb
);
952 pf_altq_copyin(struct pf_altq
*src
, struct pf_altq
*dst
)
954 bcopy(src
, dst
, sizeof (struct pf_altq
));
956 dst
->ifname
[sizeof (dst
->ifname
) - 1] = '\0';
957 dst
->qname
[sizeof (dst
->qname
) - 1] = '\0';
958 dst
->parent
[sizeof (dst
->parent
) - 1] = '\0';
959 dst
->altq_disc
= NULL
;
960 TAILQ_INIT(&dst
->entries
);
964 pf_altq_copyout(struct pf_altq
*src
, struct pf_altq
*dst
)
966 bcopy(src
, dst
, sizeof (struct pf_altq
));
968 dst
->altq_disc
= NULL
;
969 TAILQ_INIT(&dst
->entries
);
974 pf_begin_rules(u_int32_t
*ticket
, int rs_num
, const char *anchor
)
976 struct pf_ruleset
*rs
;
977 struct pf_rule
*rule
;
979 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
981 rs
= pf_find_or_create_ruleset(anchor
);
984 while ((rule
= TAILQ_FIRST(rs
->rules
[rs_num
].inactive
.ptr
)) != NULL
) {
985 pf_rm_rule(rs
->rules
[rs_num
].inactive
.ptr
, rule
);
986 rs
->rules
[rs_num
].inactive
.rcount
--;
988 *ticket
= ++rs
->rules
[rs_num
].inactive
.ticket
;
989 rs
->rules
[rs_num
].inactive
.open
= 1;
994 pf_rollback_rules(u_int32_t ticket
, int rs_num
, char *anchor
)
996 struct pf_ruleset
*rs
;
997 struct pf_rule
*rule
;
999 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
1001 rs
= pf_find_ruleset(anchor
);
1002 if (rs
== NULL
|| !rs
->rules
[rs_num
].inactive
.open
||
1003 rs
->rules
[rs_num
].inactive
.ticket
!= ticket
)
1005 while ((rule
= TAILQ_FIRST(rs
->rules
[rs_num
].inactive
.ptr
)) != NULL
) {
1006 pf_rm_rule(rs
->rules
[rs_num
].inactive
.ptr
, rule
);
1007 rs
->rules
[rs_num
].inactive
.rcount
--;
1009 rs
->rules
[rs_num
].inactive
.open
= 0;
1013 #define PF_MD5_UPD(st, elm) \
1014 MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm))
1016 #define PF_MD5_UPD_STR(st, elm) \
1017 MD5Update(ctx, (u_int8_t *)(st)->elm, strlen((st)->elm))
1019 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
1020 (stor) = htonl((st)->elm); \
1021 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t)); \
1024 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
1025 (stor) = htons((st)->elm); \
1026 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t)); \
1029 #ifndef NO_APPLE_EXTENSIONS
1031 pf_hash_rule_addr(MD5_CTX
*ctx
, struct pf_rule_addr
*pfr
, u_int8_t proto
)
1034 pf_hash_rule_addr(MD5_CTX
*ctx
, struct pf_rule_addr
*pfr
)
1037 PF_MD5_UPD(pfr
, addr
.type
);
1038 switch (pfr
->addr
.type
) {
1039 case PF_ADDR_DYNIFTL
:
1040 PF_MD5_UPD(pfr
, addr
.v
.ifname
);
1041 PF_MD5_UPD(pfr
, addr
.iflags
);
1044 PF_MD5_UPD(pfr
, addr
.v
.tblname
);
1046 case PF_ADDR_ADDRMASK
:
1047 /* XXX ignore af? */
1048 PF_MD5_UPD(pfr
, addr
.v
.a
.addr
.addr32
);
1049 PF_MD5_UPD(pfr
, addr
.v
.a
.mask
.addr32
);
1051 case PF_ADDR_RTLABEL
:
1052 PF_MD5_UPD(pfr
, addr
.v
.rtlabelname
);
1056 #ifndef NO_APPLE_EXTENSIONS
1060 PF_MD5_UPD(pfr
, xport
.range
.port
[0]);
1061 PF_MD5_UPD(pfr
, xport
.range
.port
[1]);
1062 PF_MD5_UPD(pfr
, xport
.range
.op
);
1069 PF_MD5_UPD(pfr
, neg
);
1071 PF_MD5_UPD(pfr
, port
[0]);
1072 PF_MD5_UPD(pfr
, port
[1]);
1073 PF_MD5_UPD(pfr
, neg
);
1074 PF_MD5_UPD(pfr
, port_op
);
1079 pf_hash_rule(MD5_CTX
*ctx
, struct pf_rule
*rule
)
1084 #ifndef NO_APPLE_EXTENSIONS
1085 pf_hash_rule_addr(ctx
, &rule
->src
, rule
->proto
);
1086 pf_hash_rule_addr(ctx
, &rule
->dst
, rule
->proto
);
1088 pf_hash_rule_addr(ctx
, &rule
->src
);
1089 pf_hash_rule_addr(ctx
, &rule
->dst
);
1091 PF_MD5_UPD_STR(rule
, label
);
1092 PF_MD5_UPD_STR(rule
, ifname
);
1093 PF_MD5_UPD_STR(rule
, match_tagname
);
1094 PF_MD5_UPD_HTONS(rule
, match_tag
, x
); /* dup? */
1095 PF_MD5_UPD_HTONL(rule
, os_fingerprint
, y
);
1096 PF_MD5_UPD_HTONL(rule
, prob
, y
);
1097 PF_MD5_UPD_HTONL(rule
, uid
.uid
[0], y
);
1098 PF_MD5_UPD_HTONL(rule
, uid
.uid
[1], y
);
1099 PF_MD5_UPD(rule
, uid
.op
);
1100 PF_MD5_UPD_HTONL(rule
, gid
.gid
[0], y
);
1101 PF_MD5_UPD_HTONL(rule
, gid
.gid
[1], y
);
1102 PF_MD5_UPD(rule
, gid
.op
);
1103 PF_MD5_UPD_HTONL(rule
, rule_flag
, y
);
1104 PF_MD5_UPD(rule
, action
);
1105 PF_MD5_UPD(rule
, direction
);
1106 PF_MD5_UPD(rule
, af
);
1107 PF_MD5_UPD(rule
, quick
);
1108 PF_MD5_UPD(rule
, ifnot
);
1109 PF_MD5_UPD(rule
, match_tag_not
);
1110 PF_MD5_UPD(rule
, natpass
);
1111 PF_MD5_UPD(rule
, keep_state
);
1112 PF_MD5_UPD(rule
, proto
);
1113 PF_MD5_UPD(rule
, type
);
1114 PF_MD5_UPD(rule
, code
);
1115 PF_MD5_UPD(rule
, flags
);
1116 PF_MD5_UPD(rule
, flagset
);
1117 PF_MD5_UPD(rule
, allow_opts
);
1118 PF_MD5_UPD(rule
, rt
);
1119 PF_MD5_UPD(rule
, tos
);
1123 pf_commit_rules(u_int32_t ticket
, int rs_num
, char *anchor
)
1125 struct pf_ruleset
*rs
;
1126 struct pf_rule
*rule
, **old_array
;
1127 struct pf_rulequeue
*old_rules
;
1129 u_int32_t old_rcount
;
1131 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1133 if (rs_num
< 0 || rs_num
>= PF_RULESET_MAX
)
1135 rs
= pf_find_ruleset(anchor
);
1136 if (rs
== NULL
|| !rs
->rules
[rs_num
].inactive
.open
||
1137 ticket
!= rs
->rules
[rs_num
].inactive
.ticket
)
1140 /* Calculate checksum for the main ruleset */
1141 if (rs
== &pf_main_ruleset
) {
1142 error
= pf_setup_pfsync_matching(rs
);
1147 /* Swap rules, keep the old. */
1148 old_rules
= rs
->rules
[rs_num
].active
.ptr
;
1149 old_rcount
= rs
->rules
[rs_num
].active
.rcount
;
1150 old_array
= rs
->rules
[rs_num
].active
.ptr_array
;
1152 rs
->rules
[rs_num
].active
.ptr
=
1153 rs
->rules
[rs_num
].inactive
.ptr
;
1154 rs
->rules
[rs_num
].active
.ptr_array
=
1155 rs
->rules
[rs_num
].inactive
.ptr_array
;
1156 rs
->rules
[rs_num
].active
.rcount
=
1157 rs
->rules
[rs_num
].inactive
.rcount
;
1158 rs
->rules
[rs_num
].inactive
.ptr
= old_rules
;
1159 rs
->rules
[rs_num
].inactive
.ptr_array
= old_array
;
1160 rs
->rules
[rs_num
].inactive
.rcount
= old_rcount
;
1162 rs
->rules
[rs_num
].active
.ticket
=
1163 rs
->rules
[rs_num
].inactive
.ticket
;
1164 pf_calc_skip_steps(rs
->rules
[rs_num
].active
.ptr
);
1167 /* Purge the old rule list. */
1168 while ((rule
= TAILQ_FIRST(old_rules
)) != NULL
)
1169 pf_rm_rule(old_rules
, rule
);
1170 if (rs
->rules
[rs_num
].inactive
.ptr_array
)
1171 _FREE(rs
->rules
[rs_num
].inactive
.ptr_array
, M_TEMP
);
1172 rs
->rules
[rs_num
].inactive
.ptr_array
= NULL
;
1173 rs
->rules
[rs_num
].inactive
.rcount
= 0;
1174 rs
->rules
[rs_num
].inactive
.open
= 0;
1175 pf_remove_if_empty_ruleset(rs
);
1180 pf_rule_copyin(struct pf_rule
*src
, struct pf_rule
*dst
, struct proc
*p
)
1182 bcopy(src
, dst
, sizeof (struct pf_rule
));
1184 dst
->label
[sizeof (dst
->label
) - 1] = '\0';
1185 dst
->ifname
[sizeof (dst
->ifname
) - 1] = '\0';
1186 dst
->qname
[sizeof (dst
->qname
) - 1] = '\0';
1187 dst
->pqname
[sizeof (dst
->pqname
) - 1] = '\0';
1188 dst
->tagname
[sizeof (dst
->tagname
) - 1] = '\0';
1189 dst
->match_tagname
[sizeof (dst
->match_tagname
) - 1] = '\0';
1190 dst
->overload_tblname
[sizeof (dst
->overload_tblname
) - 1] = '\0';
1192 dst
->cuid
= kauth_cred_getuid(p
->p_ucred
);
1193 dst
->cpid
= p
->p_pid
;
1197 dst
->overload_tbl
= NULL
;
1199 TAILQ_INIT(&dst
->rpool
.list
);
1200 dst
->rpool
.cur
= NULL
;
1202 /* initialize refcounting */
1206 dst
->entries
.tqe_prev
= NULL
;
1207 dst
->entries
.tqe_next
= NULL
;
1211 pf_rule_copyout(struct pf_rule
*src
, struct pf_rule
*dst
)
1213 bcopy(src
, dst
, sizeof (struct pf_rule
));
1217 dst
->overload_tbl
= NULL
;
1219 TAILQ_INIT(&dst
->rpool
.list
);
1220 dst
->rpool
.cur
= NULL
;
1222 dst
->entries
.tqe_prev
= NULL
;
1223 dst
->entries
.tqe_next
= NULL
;
1227 pf_state_export(struct pfsync_state
*sp
, struct pf_state_key
*sk
,
1230 uint64_t secs
= pf_time_second();
1231 bzero(sp
, sizeof (struct pfsync_state
));
1233 /* copy from state key */
1234 #ifndef NO_APPLE_EXTENSIONS
1235 sp
->lan
.addr
= sk
->lan
.addr
;
1236 sp
->lan
.xport
= sk
->lan
.xport
;
1237 sp
->gwy
.addr
= sk
->gwy
.addr
;
1238 sp
->gwy
.xport
= sk
->gwy
.xport
;
1239 sp
->ext
.addr
= sk
->ext
.addr
;
1240 sp
->ext
.xport
= sk
->ext
.xport
;
1241 sp
->proto_variant
= sk
->proto_variant
;
1244 sp
->lan
.addr
= sk
->lan
.addr
;
1245 sp
->lan
.port
= sk
->lan
.port
;
1246 sp
->gwy
.addr
= sk
->gwy
.addr
;
1247 sp
->gwy
.port
= sk
->gwy
.port
;
1248 sp
->ext
.addr
= sk
->ext
.addr
;
1249 sp
->ext
.port
= sk
->ext
.port
;
1251 sp
->proto
= sk
->proto
;
1253 sp
->direction
= sk
->direction
;
1255 /* copy from state */
1256 memcpy(&sp
->id
, &s
->id
, sizeof (sp
->id
));
1257 sp
->creatorid
= s
->creatorid
;
1258 strlcpy(sp
->ifname
, s
->kif
->pfik_name
, sizeof (sp
->ifname
));
1259 pf_state_peer_to_pfsync(&s
->src
, &sp
->src
);
1260 pf_state_peer_to_pfsync(&s
->dst
, &sp
->dst
);
1262 sp
->rule
= s
->rule
.ptr
->nr
;
1263 sp
->nat_rule
= (s
->nat_rule
.ptr
== NULL
) ?
1264 (unsigned)-1 : s
->nat_rule
.ptr
->nr
;
1265 sp
->anchor
= (s
->anchor
.ptr
== NULL
) ?
1266 (unsigned)-1 : s
->anchor
.ptr
->nr
;
1268 pf_state_counter_to_pfsync(s
->bytes
[0], sp
->bytes
[0]);
1269 pf_state_counter_to_pfsync(s
->bytes
[1], sp
->bytes
[1]);
1270 pf_state_counter_to_pfsync(s
->packets
[0], sp
->packets
[0]);
1271 pf_state_counter_to_pfsync(s
->packets
[1], sp
->packets
[1]);
1272 sp
->creation
= secs
- s
->creation
;
1273 sp
->expire
= pf_state_expires(s
);
1275 sp
->allow_opts
= s
->allow_opts
;
1276 sp
->timeout
= s
->timeout
;
1279 sp
->sync_flags
|= PFSYNC_FLAG_SRCNODE
;
1280 if (s
->nat_src_node
)
1281 sp
->sync_flags
|= PFSYNC_FLAG_NATSRCNODE
;
1283 if (sp
->expire
> secs
)
1291 pf_state_import(struct pfsync_state
*sp
, struct pf_state_key
*sk
,
1294 /* copy to state key */
1295 #ifndef NO_APPLE_EXTENSIONS
1296 sk
->lan
.addr
= sp
->lan
.addr
;
1297 sk
->lan
.xport
= sp
->lan
.xport
;
1298 sk
->gwy
.addr
= sp
->gwy
.addr
;
1299 sk
->gwy
.xport
= sp
->gwy
.xport
;
1300 sk
->ext
.addr
= sp
->ext
.addr
;
1301 sk
->ext
.xport
= sp
->ext
.xport
;
1302 sk
->proto_variant
= sp
->proto_variant
;
1305 sk
->lan
.addr
= sp
->lan
.addr
;
1306 sk
->lan
.port
= sp
->lan
.port
;
1307 sk
->gwy
.addr
= sp
->gwy
.addr
;
1308 sk
->gwy
.port
= sp
->gwy
.port
;
1309 sk
->ext
.addr
= sp
->ext
.addr
;
1310 sk
->ext
.port
= sp
->ext
.port
;
1312 sk
->proto
= sp
->proto
;
1314 sk
->direction
= sp
->direction
;
1317 memcpy(&s
->id
, &sp
->id
, sizeof (sp
->id
));
1318 s
->creatorid
= sp
->creatorid
;
1319 pf_state_peer_from_pfsync(&sp
->src
, &s
->src
);
1320 pf_state_peer_from_pfsync(&sp
->dst
, &s
->dst
);
1322 s
->rule
.ptr
= &pf_default_rule
;
1323 s
->nat_rule
.ptr
= NULL
;
1324 s
->anchor
.ptr
= NULL
;
1326 s
->creation
= pf_time_second();
1327 s
->expire
= pf_time_second();
1329 s
->expire
-= pf_default_rule
.timeout
[sp
->timeout
] - sp
->expire
;
1331 s
->packets
[0] = s
->packets
[1] = 0;
1332 s
->bytes
[0] = s
->bytes
[1] = 0;
1336 pf_pooladdr_copyin(struct pf_pooladdr
*src
, struct pf_pooladdr
*dst
)
1338 bcopy(src
, dst
, sizeof (struct pf_pooladdr
));
1340 dst
->entries
.tqe_prev
= NULL
;
1341 dst
->entries
.tqe_next
= NULL
;
1342 dst
->ifname
[sizeof (dst
->ifname
) - 1] = '\0';
1347 pf_pooladdr_copyout(struct pf_pooladdr
*src
, struct pf_pooladdr
*dst
)
1349 bcopy(src
, dst
, sizeof (struct pf_pooladdr
));
1351 dst
->entries
.tqe_prev
= NULL
;
1352 dst
->entries
.tqe_next
= NULL
;
1357 pf_setup_pfsync_matching(struct pf_ruleset
*rs
)
1360 struct pf_rule
*rule
;
1362 u_int8_t digest
[PF_MD5_DIGEST_LENGTH
];
1365 for (rs_cnt
= 0; rs_cnt
< PF_RULESET_MAX
; rs_cnt
++) {
1366 /* XXX PF_RULESET_SCRUB as well? */
1367 if (rs_cnt
== PF_RULESET_SCRUB
)
1370 if (rs
->rules
[rs_cnt
].inactive
.ptr_array
)
1371 _FREE(rs
->rules
[rs_cnt
].inactive
.ptr_array
, M_TEMP
);
1372 rs
->rules
[rs_cnt
].inactive
.ptr_array
= NULL
;
1374 if (rs
->rules
[rs_cnt
].inactive
.rcount
) {
1375 rs
->rules
[rs_cnt
].inactive
.ptr_array
=
1376 _MALLOC(sizeof (caddr_t
) *
1377 rs
->rules
[rs_cnt
].inactive
.rcount
,
1380 if (!rs
->rules
[rs_cnt
].inactive
.ptr_array
)
1384 TAILQ_FOREACH(rule
, rs
->rules
[rs_cnt
].inactive
.ptr
,
1386 pf_hash_rule(&ctx
, rule
);
1387 (rs
->rules
[rs_cnt
].inactive
.ptr_array
)[rule
->nr
] = rule
;
1391 MD5Final(digest
, &ctx
);
1392 memcpy(pf_status
.pf_chksum
, digest
, sizeof (pf_status
.pf_chksum
));
1399 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1401 VERIFY(pf_is_enabled
== 0);
1404 pf_status
.running
= 1;
1405 pf_status
.since
= pf_calendar_time_second();
1406 if (pf_status
.stateid
== 0) {
1407 pf_status
.stateid
= pf_time_second();
1408 pf_status
.stateid
= pf_status
.stateid
<< 32;
1410 wakeup(pf_purge_thread_fn
);
1411 DPFPRINTF(PF_DEBUG_MISC
, ("pf: started\n"));
1417 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1419 VERIFY(pf_is_enabled
);
1421 pf_status
.running
= 0;
1423 pf_status
.since
= pf_calendar_time_second();
1424 wakeup(pf_purge_thread_fn
);
1425 DPFPRINTF(PF_DEBUG_MISC
, ("pf: stopped\n"));
1429 pfioctl(dev_t dev
, u_long cmd
, caddr_t addr
, int flags
, struct proc
*p
)
1432 struct pf_pooladdr
*pa
= NULL
;
1433 struct pf_pool
*pool
= NULL
;
1436 if (kauth_cred_issuser(kauth_cred_get()) == 0)
1439 /* XXX keep in sync with switch() below */
1440 if (securelevel
> 1)
1447 case DIOCSETSTATUSIF
:
1453 case DIOCGETTIMEOUT
:
1454 case DIOCCLRRULECTRS
:
1459 case DIOCGETRULESETS
:
1460 case DIOCGETRULESET
:
1461 case DIOCRGETTABLES
:
1462 case DIOCRGETTSTATS
:
1463 case DIOCRCLRTSTATS
:
1469 case DIOCRGETASTATS
:
1470 case DIOCRCLRASTATS
:
1473 case DIOCGETSRCNODES
:
1474 case DIOCCLRSRCNODES
:
1475 case DIOCIGETIFACES
:
1479 case DIOCRCLRTABLES
:
1480 case DIOCRADDTABLES
:
1481 case DIOCRDELTABLES
:
1482 case DIOCRSETTFLAGS
:
1483 if (((struct pfioc_table
*)addr
)->pfrio_flags
&
1485 break; /* dummy operation ok */
1491 if (!(flags
& FWRITE
))
1497 case DIOCGETSTARTERS
:
1504 case DIOCGETTIMEOUT
:
1509 case DIOCGETRULESETS
:
1510 case DIOCGETRULESET
:
1512 case DIOCRGETTABLES
:
1513 case DIOCRGETTSTATS
:
1515 case DIOCRGETASTATS
:
1518 case DIOCGETSRCNODES
:
1519 case DIOCIGETIFACES
:
1521 case DIOCRCLRTABLES
:
1522 case DIOCRADDTABLES
:
1523 case DIOCRDELTABLES
:
1524 case DIOCRCLRTSTATS
:
1529 case DIOCRSETTFLAGS
:
1530 if (((struct pfioc_table
*)addr
)->pfrio_flags
&
1532 flags
|= FWRITE
; /* need write lock for dummy */
1533 break; /* dummy operation ok */
1537 if (((struct pfioc_rule
*)addr
)->action
==
1546 lck_rw_lock_exclusive(pf_perim_lock
);
1548 lck_rw_lock_shared(pf_perim_lock
);
1550 lck_mtx_lock(pf_lock
);
1555 if (pf_status
.running
) {
1557 * Increment the reference for a simple -e enable, so
1558 * that even if other processes drop their references,
1559 * pf will still be available to processes that turned
1560 * it on without taking a reference
1562 if (nr_tokens
== pf_enabled_ref_count
) {
1563 pf_enabled_ref_count
++;
1564 VERIFY(pf_enabled_ref_count
!= 0);
1567 } else if (pf_purge_thread
== NULL
) {
1571 pf_enabled_ref_count
++;
1572 VERIFY(pf_enabled_ref_count
!= 0);
1576 case DIOCSTARTREF
: /* returns a token */
1577 if (pf_purge_thread
== NULL
) {
1580 if ((*(u_int64_t
*)addr
= generate_token()) != 0) {
1581 if (pf_is_enabled
== 0) {
1584 pf_enabled_ref_count
++;
1585 VERIFY(pf_enabled_ref_count
!= 0);
1588 DPFPRINTF(PF_DEBUG_URGENT
,
1589 ("pf: unable to generate token\n"));
1595 if (!pf_status
.running
) {
1599 pf_enabled_ref_count
= 0;
1600 invalidate_all_tokens();
1605 if (!pf_status
.running
) {
1608 if ((error
= remove_token(
1609 (struct pfioc_remove_token
*)addr
))==0) {
1610 VERIFY(pf_enabled_ref_count
!= 0);
1611 pf_enabled_ref_count
--;
1612 // return currently held references
1613 ((struct pfioc_remove_token
*)addr
)->refcount
1614 = pf_enabled_ref_count
;
1615 DPFPRINTF(PF_DEBUG_MISC
,
1616 ("pf: enabled refcount decremented\n"));
1619 DPFPRINTF(PF_DEBUG_URGENT
,
1620 ("pf: token mismatch\n"));
1624 if (pf_enabled_ref_count
== 0)
1629 case DIOCGETSTARTERS
: {
1630 struct pfioc_tokens
*g_token
= (struct pfioc_tokens
*)addr
;
1631 struct pfioc_token
*tokens
;
1632 struct pfioc_kernel_token
*entry
, *tmp
;
1633 user_addr_t token_buf
;
1634 int g_token_size_copy
;
1637 if (nr_tokens
== 0) {
1642 g_token_size_copy
= g_token
->size
;
1644 if (g_token
->size
== 0) {
1645 g_token
->size
= sizeof (struct pfioc_token
) * nr_tokens
;
1649 token_buf
= PF_USER_ADDR(addr
, pfioc_tokens
, pgt_buf
);
1650 tokens
= _MALLOC(sizeof(struct pfioc_token
) * nr_tokens
,
1653 if (tokens
== NULL
) {
1658 ptr
= (void *)tokens
;
1659 SLIST_FOREACH_SAFE(entry
, &token_list_head
, next
, tmp
) {
1660 if ((unsigned)g_token_size_copy
1661 < sizeof(struct pfioc_token
))
1662 break; /* no more buffer space left */
1664 ((struct pfioc_token
*)(ptr
))->token_value
= entry
->token
.token_value
;
1665 ((struct pfioc_token
*)(ptr
))->timestamp
= entry
->token
.timestamp
;
1666 ((struct pfioc_token
*)(ptr
))->pid
= entry
->token
.pid
;
1667 memcpy(((struct pfioc_token
*)(ptr
))->proc_name
, entry
->token
.proc_name
,
1668 PFTOK_PROCNAME_LEN
);
1669 ptr
+= sizeof(struct pfioc_token
);
1671 g_token_size_copy
-= sizeof(struct pfioc_token
);
1674 if (g_token_size_copy
< g_token
->size
) {
1675 error
= copyout(tokens
, token_buf
,
1676 g_token
->size
- g_token_size_copy
);
1679 g_token
->size
-= g_token_size_copy
;
1680 _FREE(tokens
, M_TEMP
);
1686 struct pfioc_rule
*pr
= (struct pfioc_rule
*)addr
;
1687 struct pf_ruleset
*ruleset
;
1688 struct pf_rule
*rule
, *tail
;
1689 struct pf_pooladdr
*apa
;
1692 pr
->anchor
[sizeof (pr
->anchor
) - 1] = '\0';
1693 pr
->anchor_call
[sizeof (pr
->anchor_call
) - 1] = '\0';
1694 ruleset
= pf_find_ruleset(pr
->anchor
);
1695 if (ruleset
== NULL
) {
1699 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
1700 if (rs_num
>= PF_RULESET_MAX
) {
1704 if (pr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
1708 if (pr
->ticket
!= ruleset
->rules
[rs_num
].inactive
.ticket
) {
1712 if (pr
->pool_ticket
!= ticket_pabuf
) {
1716 rule
= pool_get(&pf_rule_pl
, PR_WAITOK
);
1721 pf_rule_copyin(&pr
->rule
, rule
, p
);
1723 if (rule
->af
== AF_INET
) {
1724 pool_put(&pf_rule_pl
, rule
);
1725 error
= EAFNOSUPPORT
;
1730 if (rule
->af
== AF_INET6
) {
1731 pool_put(&pf_rule_pl
, rule
);
1732 error
= EAFNOSUPPORT
;
1736 tail
= TAILQ_LAST(ruleset
->rules
[rs_num
].inactive
.ptr
,
1739 rule
->nr
= tail
->nr
+ 1;
1742 if (rule
->ifname
[0]) {
1743 rule
->kif
= pfi_kif_get(rule
->ifname
);
1744 if (rule
->kif
== NULL
) {
1745 pool_put(&pf_rule_pl
, rule
);
1749 pfi_kif_ref(rule
->kif
, PFI_KIF_REF_RULE
);
1754 if (rule
->qname
[0] != 0) {
1755 if ((rule
->qid
= pf_qname2qid(rule
->qname
)) == 0)
1757 else if (rule
->pqname
[0] != 0) {
1759 pf_qname2qid(rule
->pqname
)) == 0)
1762 rule
->pqid
= rule
->qid
;
1765 if (rule
->tagname
[0])
1766 if ((rule
->tag
= pf_tagname2tag(rule
->tagname
)) == 0)
1768 if (rule
->match_tagname
[0])
1769 if ((rule
->match_tag
=
1770 pf_tagname2tag(rule
->match_tagname
)) == 0)
1772 if (rule
->rt
&& !rule
->direction
)
1777 if (rule
->logif
>= PFLOGIFS_MAX
)
1780 if (pf_rtlabel_add(&rule
->src
.addr
) ||
1781 pf_rtlabel_add(&rule
->dst
.addr
))
1783 if (pfi_dynaddr_setup(&rule
->src
.addr
, rule
->af
))
1785 if (pfi_dynaddr_setup(&rule
->dst
.addr
, rule
->af
))
1787 if (pf_tbladdr_setup(ruleset
, &rule
->src
.addr
))
1789 if (pf_tbladdr_setup(ruleset
, &rule
->dst
.addr
))
1791 if (pf_anchor_setup(rule
, ruleset
, pr
->anchor_call
))
1793 TAILQ_FOREACH(apa
, &pf_pabuf
, entries
)
1794 if (pf_tbladdr_setup(ruleset
, &apa
->addr
))
1797 if (rule
->overload_tblname
[0]) {
1798 if ((rule
->overload_tbl
= pfr_attach_table(ruleset
,
1799 rule
->overload_tblname
)) == NULL
)
1802 rule
->overload_tbl
->pfrkt_flags
|=
1806 pf_mv_pool(&pf_pabuf
, &rule
->rpool
.list
);
1807 if (((((rule
->action
== PF_NAT
) || (rule
->action
== PF_RDR
) ||
1808 (rule
->action
== PF_BINAT
)) && rule
->anchor
== NULL
) ||
1809 (rule
->rt
> PF_FASTROUTE
)) &&
1810 (TAILQ_FIRST(&rule
->rpool
.list
) == NULL
))
1814 pf_rm_rule(NULL
, rule
);
1817 rule
->rpool
.cur
= TAILQ_FIRST(&rule
->rpool
.list
);
1818 rule
->evaluations
= rule
->packets
[0] = rule
->packets
[1] =
1819 rule
->bytes
[0] = rule
->bytes
[1] = 0;
1820 TAILQ_INSERT_TAIL(ruleset
->rules
[rs_num
].inactive
.ptr
,
1822 ruleset
->rules
[rs_num
].inactive
.rcount
++;
1826 case DIOCGETRULES
: {
1827 struct pfioc_rule
*pr
= (struct pfioc_rule
*)addr
;
1828 struct pf_ruleset
*ruleset
;
1829 struct pf_rule
*tail
;
1832 pr
->anchor
[sizeof (pr
->anchor
) - 1] = '\0';
1833 pr
->anchor_call
[sizeof (pr
->anchor_call
) - 1] = '\0';
1834 ruleset
= pf_find_ruleset(pr
->anchor
);
1835 if (ruleset
== NULL
) {
1839 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
1840 if (rs_num
>= PF_RULESET_MAX
) {
1844 tail
= TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
,
1847 pr
->nr
= tail
->nr
+ 1;
1850 pr
->ticket
= ruleset
->rules
[rs_num
].active
.ticket
;
1855 struct pfioc_rule
*pr
= (struct pfioc_rule
*)addr
;
1856 struct pf_ruleset
*ruleset
;
1857 struct pf_rule
*rule
;
1860 pr
->anchor
[sizeof (pr
->anchor
) - 1] = '\0';
1861 pr
->anchor_call
[sizeof (pr
->anchor_call
) - 1] = '\0';
1862 ruleset
= pf_find_ruleset(pr
->anchor
);
1863 if (ruleset
== NULL
) {
1867 rs_num
= pf_get_ruleset_number(pr
->rule
.action
);
1868 if (rs_num
>= PF_RULESET_MAX
) {
1872 if (pr
->ticket
!= ruleset
->rules
[rs_num
].active
.ticket
) {
1876 rule
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
);
1877 while ((rule
!= NULL
) && (rule
->nr
!= pr
->nr
))
1878 rule
= TAILQ_NEXT(rule
, entries
);
1883 pf_rule_copyout(rule
, &pr
->rule
);
1884 if (pf_anchor_copyout(ruleset
, rule
, pr
)) {
1888 pfi_dynaddr_copyout(&pr
->rule
.src
.addr
);
1889 pfi_dynaddr_copyout(&pr
->rule
.dst
.addr
);
1890 pf_tbladdr_copyout(&pr
->rule
.src
.addr
);
1891 pf_tbladdr_copyout(&pr
->rule
.dst
.addr
);
1892 pf_rtlabel_copyout(&pr
->rule
.src
.addr
);
1893 pf_rtlabel_copyout(&pr
->rule
.dst
.addr
);
1894 for (i
= 0; i
< PF_SKIP_COUNT
; ++i
)
1895 if (rule
->skip
[i
].ptr
== NULL
)
1896 pr
->rule
.skip
[i
].nr
= -1;
1898 pr
->rule
.skip
[i
].nr
=
1899 rule
->skip
[i
].ptr
->nr
;
1901 if (pr
->action
== PF_GET_CLR_CNTR
) {
1902 rule
->evaluations
= 0;
1903 rule
->packets
[0] = rule
->packets
[1] = 0;
1904 rule
->bytes
[0] = rule
->bytes
[1] = 0;
1909 case DIOCCHANGERULE
: {
1910 struct pfioc_rule
*pcr
= (struct pfioc_rule
*)addr
;
1911 struct pf_ruleset
*ruleset
;
1912 struct pf_rule
*oldrule
= NULL
, *newrule
= NULL
;
1916 if (!(pcr
->action
== PF_CHANGE_REMOVE
||
1917 pcr
->action
== PF_CHANGE_GET_TICKET
) &&
1918 pcr
->pool_ticket
!= ticket_pabuf
) {
1923 if (pcr
->action
< PF_CHANGE_ADD_HEAD
||
1924 pcr
->action
> PF_CHANGE_GET_TICKET
) {
1928 pcr
->anchor
[sizeof (pcr
->anchor
) - 1] = '\0';
1929 pcr
->anchor_call
[sizeof (pcr
->anchor_call
) - 1] = '\0';
1930 ruleset
= pf_find_ruleset(pcr
->anchor
);
1931 if (ruleset
== NULL
) {
1935 rs_num
= pf_get_ruleset_number(pcr
->rule
.action
);
1936 if (rs_num
>= PF_RULESET_MAX
) {
1941 if (pcr
->action
== PF_CHANGE_GET_TICKET
) {
1942 pcr
->ticket
= ++ruleset
->rules
[rs_num
].active
.ticket
;
1946 ruleset
->rules
[rs_num
].active
.ticket
) {
1950 if (pcr
->rule
.return_icmp
>> 8 > ICMP_MAXTYPE
) {
1956 if (pcr
->action
!= PF_CHANGE_REMOVE
) {
1957 newrule
= pool_get(&pf_rule_pl
, PR_WAITOK
);
1958 if (newrule
== NULL
) {
1962 pf_rule_copyin(&pcr
->rule
, newrule
, p
);
1964 if (newrule
->af
== AF_INET
) {
1965 pool_put(&pf_rule_pl
, newrule
);
1966 error
= EAFNOSUPPORT
;
1971 if (newrule
->af
== AF_INET6
) {
1972 pool_put(&pf_rule_pl
, newrule
);
1973 error
= EAFNOSUPPORT
;
1977 if (newrule
->ifname
[0]) {
1978 newrule
->kif
= pfi_kif_get(newrule
->ifname
);
1979 if (newrule
->kif
== NULL
) {
1980 pool_put(&pf_rule_pl
, newrule
);
1984 pfi_kif_ref(newrule
->kif
, PFI_KIF_REF_RULE
);
1986 newrule
->kif
= NULL
;
1990 if (newrule
->qname
[0] != 0) {
1992 pf_qname2qid(newrule
->qname
)) == 0)
1994 else if (newrule
->pqname
[0] != 0) {
1995 if ((newrule
->pqid
=
1996 pf_qname2qid(newrule
->pqname
)) == 0)
1999 newrule
->pqid
= newrule
->qid
;
2002 if (newrule
->tagname
[0])
2004 pf_tagname2tag(newrule
->tagname
)) == 0)
2006 if (newrule
->match_tagname
[0])
2007 if ((newrule
->match_tag
= pf_tagname2tag(
2008 newrule
->match_tagname
)) == 0)
2010 if (newrule
->rt
&& !newrule
->direction
)
2015 if (newrule
->logif
>= PFLOGIFS_MAX
)
2018 if (pf_rtlabel_add(&newrule
->src
.addr
) ||
2019 pf_rtlabel_add(&newrule
->dst
.addr
))
2021 if (pfi_dynaddr_setup(&newrule
->src
.addr
, newrule
->af
))
2023 if (pfi_dynaddr_setup(&newrule
->dst
.addr
, newrule
->af
))
2025 if (pf_tbladdr_setup(ruleset
, &newrule
->src
.addr
))
2027 if (pf_tbladdr_setup(ruleset
, &newrule
->dst
.addr
))
2029 if (pf_anchor_setup(newrule
, ruleset
, pcr
->anchor_call
))
2031 TAILQ_FOREACH(pa
, &pf_pabuf
, entries
)
2032 if (pf_tbladdr_setup(ruleset
, &pa
->addr
))
2035 if (newrule
->overload_tblname
[0]) {
2036 if ((newrule
->overload_tbl
= pfr_attach_table(
2037 ruleset
, newrule
->overload_tblname
)) ==
2041 newrule
->overload_tbl
->pfrkt_flags
|=
2045 pf_mv_pool(&pf_pabuf
, &newrule
->rpool
.list
);
2046 if (((((newrule
->action
== PF_NAT
) ||
2047 (newrule
->action
== PF_RDR
) ||
2048 (newrule
->action
== PF_BINAT
) ||
2049 (newrule
->rt
> PF_FASTROUTE
)) &&
2050 !newrule
->anchor
)) &&
2051 (TAILQ_FIRST(&newrule
->rpool
.list
) == NULL
))
2055 pf_rm_rule(NULL
, newrule
);
2058 newrule
->rpool
.cur
= TAILQ_FIRST(&newrule
->rpool
.list
);
2059 newrule
->evaluations
= 0;
2060 newrule
->packets
[0] = newrule
->packets
[1] = 0;
2061 newrule
->bytes
[0] = newrule
->bytes
[1] = 0;
2063 pf_empty_pool(&pf_pabuf
);
2065 if (pcr
->action
== PF_CHANGE_ADD_HEAD
)
2066 oldrule
= TAILQ_FIRST(
2067 ruleset
->rules
[rs_num
].active
.ptr
);
2068 else if (pcr
->action
== PF_CHANGE_ADD_TAIL
)
2069 oldrule
= TAILQ_LAST(
2070 ruleset
->rules
[rs_num
].active
.ptr
, pf_rulequeue
);
2072 oldrule
= TAILQ_FIRST(
2073 ruleset
->rules
[rs_num
].active
.ptr
);
2074 while ((oldrule
!= NULL
) && (oldrule
->nr
!= pcr
->nr
))
2075 oldrule
= TAILQ_NEXT(oldrule
, entries
);
2076 if (oldrule
== NULL
) {
2077 if (newrule
!= NULL
)
2078 pf_rm_rule(NULL
, newrule
);
2084 if (pcr
->action
== PF_CHANGE_REMOVE
) {
2085 pf_rm_rule(ruleset
->rules
[rs_num
].active
.ptr
, oldrule
);
2086 ruleset
->rules
[rs_num
].active
.rcount
--;
2088 if (oldrule
== NULL
)
2090 ruleset
->rules
[rs_num
].active
.ptr
,
2092 else if (pcr
->action
== PF_CHANGE_ADD_HEAD
||
2093 pcr
->action
== PF_CHANGE_ADD_BEFORE
)
2094 TAILQ_INSERT_BEFORE(oldrule
, newrule
, entries
);
2097 ruleset
->rules
[rs_num
].active
.ptr
,
2098 oldrule
, newrule
, entries
);
2099 ruleset
->rules
[rs_num
].active
.rcount
++;
2103 TAILQ_FOREACH(oldrule
,
2104 ruleset
->rules
[rs_num
].active
.ptr
, entries
)
2107 ruleset
->rules
[rs_num
].active
.ticket
++;
2109 pf_calc_skip_steps(ruleset
->rules
[rs_num
].active
.ptr
);
2110 pf_remove_if_empty_ruleset(ruleset
);
2115 case DIOCCLRSTATES
: {
2116 struct pf_state
*s
, *nexts
;
2117 struct pfioc_state_kill
*psk
= (struct pfioc_state_kill
*)addr
;
2120 psk
->psk_ifname
[sizeof (psk
->psk_ifname
) - 1] = '\0';
2121 for (s
= RB_MIN(pf_state_tree_id
, &tree_id
); s
; s
= nexts
) {
2122 nexts
= RB_NEXT(pf_state_tree_id
, &tree_id
, s
);
2124 if (!psk
->psk_ifname
[0] || strcmp(psk
->psk_ifname
,
2125 s
->kif
->pfik_name
) == 0) {
2127 /* don't send out individual delete messages */
2128 s
->sync_flags
= PFSTATE_NOSYNC
;
2134 psk
->psk_af
= killed
;
2136 pfsync_clear_states(pf_status
.hostid
, psk
->psk_ifname
);
2141 case DIOCKILLSTATES
: {
2142 struct pf_state
*s
, *nexts
;
2143 struct pf_state_key
*sk
;
2144 struct pf_state_host
*src
, *dst
;
2145 struct pfioc_state_kill
*psk
= (struct pfioc_state_kill
*)addr
;
2148 for (s
= RB_MIN(pf_state_tree_id
, &tree_id
); s
;
2150 nexts
= RB_NEXT(pf_state_tree_id
, &tree_id
, s
);
2153 if (sk
->direction
== PF_OUT
) {
2160 if ((!psk
->psk_af
|| sk
->af
== psk
->psk_af
) &&
2161 (!psk
->psk_proto
|| psk
->psk_proto
== sk
->proto
) &&
2162 PF_MATCHA(psk
->psk_src
.neg
,
2163 &psk
->psk_src
.addr
.v
.a
.addr
,
2164 &psk
->psk_src
.addr
.v
.a
.mask
,
2165 &src
->addr
, sk
->af
) &&
2166 PF_MATCHA(psk
->psk_dst
.neg
,
2167 &psk
->psk_dst
.addr
.v
.a
.addr
,
2168 &psk
->psk_dst
.addr
.v
.a
.mask
,
2169 &dst
->addr
, sk
->af
) &&
2170 #ifndef NO_APPLE_EXTENSIONS
2171 (pf_match_xport(psk
->psk_proto
,
2172 psk
->psk_proto_variant
, &psk
->psk_src
.xport
,
2174 (pf_match_xport(psk
->psk_proto
,
2175 psk
->psk_proto_variant
, &psk
->psk_dst
.xport
,
2178 (psk
->psk_src
.port_op
== 0 ||
2179 pf_match_port(psk
->psk_src
.port_op
,
2180 psk
->psk_src
.port
[0], psk
->psk_src
.port
[1],
2182 (psk
->psk_dst
.port_op
== 0 ||
2183 pf_match_port(psk
->psk_dst
.port_op
,
2184 psk
->psk_dst
.port
[0], psk
->psk_dst
.port
[1],
2187 (!psk
->psk_ifname
[0] || strcmp(psk
->psk_ifname
,
2188 s
->kif
->pfik_name
) == 0)) {
2190 /* send immediate delete of state */
2191 pfsync_delete_state(s
);
2192 s
->sync_flags
|= PFSTATE_NOSYNC
;
2198 psk
->psk_af
= killed
;
2202 case DIOCADDSTATE
: {
2203 struct pfioc_state
*ps
= (struct pfioc_state
*)addr
;
2204 struct pfsync_state
*sp
= &ps
->state
;
2206 struct pf_state_key
*sk
;
2207 struct pfi_kif
*kif
;
2209 if (sp
->timeout
>= PFTM_MAX
&&
2210 sp
->timeout
!= PFTM_UNTIL_PACKET
) {
2214 s
= pool_get(&pf_state_pl
, PR_WAITOK
);
2219 bzero(s
, sizeof (struct pf_state
));
2220 if ((sk
= pf_alloc_state_key(s
)) == NULL
) {
2221 pool_put(&pf_state_pl
, s
);
2225 pf_state_import(sp
, sk
, s
);
2226 kif
= pfi_kif_get(sp
->ifname
);
2228 pool_put(&pf_state_pl
, s
);
2229 pool_put(&pf_state_key_pl
, sk
);
2233 #ifndef NO_APPLE_EXTENSIONS
2234 TAILQ_INIT(&s
->unlink_hooks
);
2235 s
->state_key
->app_state
= 0;
2237 if (pf_insert_state(kif
, s
)) {
2238 pfi_kif_unref(kif
, PFI_KIF_REF_NONE
);
2239 pool_put(&pf_state_pl
, s
);
2243 pf_default_rule
.states
++;
2244 VERIFY(pf_default_rule
.states
!= 0);
2248 case DIOCGETSTATE
: {
2249 struct pfioc_state
*ps
= (struct pfioc_state
*)addr
;
2251 struct pf_state_cmp id_key
;
2253 bcopy(ps
->state
.id
, &id_key
.id
, sizeof (id_key
.id
));
2254 id_key
.creatorid
= ps
->state
.creatorid
;
2256 s
= pf_find_state_byid(&id_key
);
2262 pf_state_export(&ps
->state
, s
->state_key
, s
);
2266 case DIOCGETSTATES
: {
2267 struct pfioc_states
*ps
= (struct pfioc_states
*)addr
;
2268 struct pf_state
*state
;
2269 struct pfsync_state
*pstore
;
2273 if (ps
->ps_len
== 0) {
2274 nr
= pf_status
.states
;
2275 ps
->ps_len
= sizeof (struct pfsync_state
) * nr
;
2279 pstore
= _MALLOC(sizeof (*pstore
), M_TEMP
, M_WAITOK
);
2280 buf
= PF_USER_ADDR(addr
, pfioc_states
, ps_buf
);
2282 state
= TAILQ_FIRST(&state_list
);
2284 if (state
->timeout
!= PFTM_UNLINKED
) {
2285 if ((nr
+ 1) * sizeof (*pstore
) >
2286 (unsigned)ps
->ps_len
)
2289 pf_state_export(pstore
,
2290 state
->state_key
, state
);
2291 error
= copyout(pstore
, buf
, sizeof (*pstore
));
2293 _FREE(pstore
, M_TEMP
);
2296 buf
+= sizeof (*pstore
);
2299 state
= TAILQ_NEXT(state
, entry_list
);
2302 ps
->ps_len
= sizeof (struct pfsync_state
) * nr
;
2304 _FREE(pstore
, M_TEMP
);
2308 case DIOCGETSTATUS
: {
2309 struct pf_status
*s
= (struct pf_status
*)addr
;
2310 bcopy(&pf_status
, s
, sizeof (struct pf_status
));
2311 pfi_update_status(s
->ifname
, s
);
2315 case DIOCSETSTATUSIF
: {
2316 struct pfioc_if
*pi
= (struct pfioc_if
*)addr
;
2318 if (pi
->ifname
[0] == 0) {
2319 bzero(pf_status
.ifname
, IFNAMSIZ
);
2322 strlcpy(pf_status
.ifname
, pi
->ifname
, IFNAMSIZ
);
2326 case DIOCCLRSTATUS
: {
2327 bzero(pf_status
.counters
, sizeof (pf_status
.counters
));
2328 bzero(pf_status
.fcounters
, sizeof (pf_status
.fcounters
));
2329 bzero(pf_status
.scounters
, sizeof (pf_status
.scounters
));
2330 pf_status
.since
= pf_calendar_time_second();
2331 if (*pf_status
.ifname
)
2332 pfi_update_status(pf_status
.ifname
, NULL
);
2337 struct pfioc_natlook
*pnl
= (struct pfioc_natlook
*)addr
;
2338 struct pf_state_key
*sk
;
2339 struct pf_state
*state
;
2340 struct pf_state_key_cmp key
;
2341 int m
= 0, direction
= pnl
->direction
;
2344 key
.proto
= pnl
->proto
;
2346 #ifndef NO_APPLE_EXTENSIONS
2347 key
.proto_variant
= pnl
->proto_variant
;
2351 PF_AZERO(&pnl
->saddr
, pnl
->af
) ||
2352 PF_AZERO(&pnl
->daddr
, pnl
->af
) ||
2353 ((pnl
->proto
== IPPROTO_TCP
||
2354 pnl
->proto
== IPPROTO_UDP
) &&
2355 #ifndef NO_APPLE_EXTENSIONS
2356 (!pnl
->dxport
.port
|| !pnl
->sxport
.port
)))
2358 (!pnl
->dport
|| !pnl
->sport
)))
2363 * userland gives us source and dest of connection,
2364 * reverse the lookup so we ask for what happens with
2365 * the return traffic, enabling us to find it in the
2368 if (direction
== PF_IN
) {
2369 PF_ACPY(&key
.ext
.addr
, &pnl
->daddr
, pnl
->af
);
2370 #ifndef NO_APPLE_EXTENSIONS
2371 memcpy(&key
.ext
.xport
, &pnl
->dxport
,
2372 sizeof (key
.ext
.xport
));
2374 key
.ext
.port
= pnl
->dport
;
2376 PF_ACPY(&key
.gwy
.addr
, &pnl
->saddr
, pnl
->af
);
2377 #ifndef NO_APPLE_EXTENSIONS
2378 memcpy(&key
.gwy
.xport
, &pnl
->sxport
,
2379 sizeof (key
.gwy
.xport
));
2381 key
.gwy
.port
= pnl
->sport
;
2383 state
= pf_find_state_all(&key
, PF_IN
, &m
);
2385 PF_ACPY(&key
.lan
.addr
, &pnl
->daddr
, pnl
->af
);
2386 #ifndef NO_APPLE_EXTENSIONS
2387 memcpy(&key
.lan
.xport
, &pnl
->dxport
,
2388 sizeof (key
.lan
.xport
));
2390 key
.lan
.port
= pnl
->dport
;
2392 PF_ACPY(&key
.ext
.addr
, &pnl
->saddr
, pnl
->af
);
2393 #ifndef NO_APPLE_EXTENSIONS
2394 memcpy(&key
.ext
.xport
, &pnl
->sxport
,
2395 sizeof (key
.ext
.xport
));
2397 key
.ext
.port
= pnl
->sport
;
2399 state
= pf_find_state_all(&key
, PF_OUT
, &m
);
2402 error
= E2BIG
; /* more than one state */
2403 else if (state
!= NULL
) {
2404 sk
= state
->state_key
;
2405 if (direction
== PF_IN
) {
2406 PF_ACPY(&pnl
->rsaddr
, &sk
->lan
.addr
,
2408 #ifndef NO_APPLE_EXTENSIONS
2409 memcpy(&pnl
->rsxport
, &sk
->lan
.xport
,
2410 sizeof (pnl
->rsxport
));
2412 pnl
->rsport
= sk
->lan
.port
;
2414 PF_ACPY(&pnl
->rdaddr
, &pnl
->daddr
,
2416 #ifndef NO_APPLE_EXTENSIONS
2417 memcpy(&pnl
->rdxport
, &pnl
->dxport
,
2418 sizeof (pnl
->rdxport
));
2420 pnl
->rdport
= pnl
->dport
;
2423 PF_ACPY(&pnl
->rdaddr
, &sk
->gwy
.addr
,
2425 #ifndef NO_APPLE_EXTENSIONS
2426 memcpy(&pnl
->rdxport
, &sk
->gwy
.xport
,
2427 sizeof (pnl
->rdxport
));
2429 pnl
->rdport
= sk
->gwy
.port
;
2431 PF_ACPY(&pnl
->rsaddr
, &pnl
->saddr
,
2433 #ifndef NO_APPLE_EXTENSIONS
2434 memcpy(&pnl
->rsxport
, &pnl
->sxport
,
2435 sizeof (pnl
->rsxport
));
2437 pnl
->rsport
= pnl
->sport
;
2446 case DIOCSETTIMEOUT
: {
2447 struct pfioc_tm
*pt
= (struct pfioc_tm
*)addr
;
2450 if (pt
->timeout
< 0 || pt
->timeout
>= PFTM_MAX
||
2455 old
= pf_default_rule
.timeout
[pt
->timeout
];
2456 if (pt
->timeout
== PFTM_INTERVAL
&& pt
->seconds
== 0)
2458 pf_default_rule
.timeout
[pt
->timeout
] = pt
->seconds
;
2459 if (pt
->timeout
== PFTM_INTERVAL
&& pt
->seconds
< old
)
2460 wakeup(pf_purge_thread_fn
);
2465 case DIOCGETTIMEOUT
: {
2466 struct pfioc_tm
*pt
= (struct pfioc_tm
*)addr
;
2468 if (pt
->timeout
< 0 || pt
->timeout
>= PFTM_MAX
) {
2472 pt
->seconds
= pf_default_rule
.timeout
[pt
->timeout
];
2476 case DIOCGETLIMIT
: {
2477 struct pfioc_limit
*pl
= (struct pfioc_limit
*)addr
;
2479 if (pl
->index
< 0 || pl
->index
>= PF_LIMIT_MAX
) {
2483 pl
->limit
= pf_pool_limits
[pl
->index
].limit
;
2487 case DIOCSETLIMIT
: {
2488 struct pfioc_limit
*pl
= (struct pfioc_limit
*)addr
;
2491 if (pl
->index
< 0 || pl
->index
>= PF_LIMIT_MAX
||
2492 pf_pool_limits
[pl
->index
].pp
== NULL
) {
2496 pool_sethardlimit(pf_pool_limits
[pl
->index
].pp
,
2497 pl
->limit
, NULL
, 0);
2498 old_limit
= pf_pool_limits
[pl
->index
].limit
;
2499 pf_pool_limits
[pl
->index
].limit
= pl
->limit
;
2500 pl
->limit
= old_limit
;
2504 case DIOCSETDEBUG
: {
2505 u_int32_t
*level
= (u_int32_t
*)addr
;
2507 pf_status
.debug
= *level
;
2511 case DIOCCLRRULECTRS
: {
2512 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
2513 struct pf_ruleset
*ruleset
= &pf_main_ruleset
;
2514 struct pf_rule
*rule
;
2517 ruleset
->rules
[PF_RULESET_FILTER
].active
.ptr
, entries
) {
2518 rule
->evaluations
= 0;
2519 rule
->packets
[0] = rule
->packets
[1] = 0;
2520 rule
->bytes
[0] = rule
->bytes
[1] = 0;
2526 case DIOCSTARTALTQ
: {
2527 struct pf_altq
*altq
;
2529 /* enable all altq interfaces on active list */
2530 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
) {
2531 if (altq
->qname
[0] == 0) {
2532 error
= pf_enable_altq(altq
);
2538 pf_altq_running
= 1;
2539 DPFPRINTF(PF_DEBUG_MISC
, ("altq: started\n"));
2543 case DIOCSTOPALTQ
: {
2544 struct pf_altq
*altq
;
2546 /* disable all altq interfaces on active list */
2547 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
) {
2548 if (altq
->qname
[0] == 0) {
2549 error
= pf_disable_altq(altq
);
2555 pf_altq_running
= 0;
2556 DPFPRINTF(PF_DEBUG_MISC
, ("altq: stopped\n"));
2561 struct pfioc_altq
*pa
= (struct pfioc_altq
*)addr
;
2562 struct pf_altq
*altq
, *a
;
2564 if (pa
->ticket
!= ticket_altqs_inactive
) {
2568 altq
= pool_get(&pf_altq_pl
, PR_WAITOK
);
2573 pf_altq_copyin(&pa
->altq
, altq
);
2576 * if this is for a queue, find the discipline and
2577 * copy the necessary fields
2579 if (altq
->qname
[0] != 0) {
2580 if ((altq
->qid
= pf_qname2qid(altq
->qname
)) == 0) {
2582 pool_put(&pf_altq_pl
, altq
);
2585 altq
->altq_disc
= NULL
;
2586 TAILQ_FOREACH(a
, pf_altqs_inactive
, entries
) {
2587 if (strncmp(a
->ifname
, altq
->ifname
,
2588 IFNAMSIZ
) == 0 && a
->qname
[0] == 0) {
2589 altq
->altq_disc
= a
->altq_disc
;
2595 error
= altq_add(altq
);
2597 pool_put(&pf_altq_pl
, altq
);
2601 TAILQ_INSERT_TAIL(pf_altqs_inactive
, altq
, entries
);
2602 pf_altq_copyout(altq
, &pa
->altq
);
2606 case DIOCGETALTQS
: {
2607 struct pfioc_altq
*pa
= (struct pfioc_altq
*)addr
;
2608 struct pf_altq
*altq
;
2611 TAILQ_FOREACH(altq
, pf_altqs_active
, entries
)
2613 pa
->ticket
= ticket_altqs_active
;
2618 struct pfioc_altq
*pa
= (struct pfioc_altq
*)addr
;
2619 struct pf_altq
*altq
;
2622 if (pa
->ticket
!= ticket_altqs_active
) {
2627 altq
= TAILQ_FIRST(pf_altqs_active
);
2628 while ((altq
!= NULL
) && (nr
< pa
->nr
)) {
2629 altq
= TAILQ_NEXT(altq
, entries
);
2636 pf_altq_copyout(altq
, &pa
->altq
);
2640 case DIOCCHANGEALTQ
:
2641 /* CHANGEALTQ not supported yet! */
2645 case DIOCGETQSTATS
: {
2646 struct pfioc_qstats
*pq
= (struct pfioc_qstats
*)addr
;
2647 struct pf_altq
*altq
;
2651 if (pq
->ticket
!= ticket_altqs_active
) {
2655 nbytes
= pq
->nbytes
;
2657 altq
= TAILQ_FIRST(pf_altqs_active
);
2658 while ((altq
!= NULL
) && (nr
< pq
->nr
)) {
2659 altq
= TAILQ_NEXT(altq
, entries
);
2666 error
= altq_getqstats(altq
, pq
->buf
, &nbytes
);
2668 pq
->scheduler
= altq
->scheduler
;
2669 pq
->nbytes
= nbytes
;
2675 case DIOCBEGINADDRS
: {
2676 struct pfioc_pooladdr
*pp
= (struct pfioc_pooladdr
*)addr
;
2678 pf_empty_pool(&pf_pabuf
);
2679 pp
->ticket
= ++ticket_pabuf
;
2684 struct pfioc_pooladdr
*pp
= (struct pfioc_pooladdr
*)addr
;
2686 pp
->anchor
[sizeof (pp
->anchor
) - 1] = '\0';
2687 if (pp
->ticket
!= ticket_pabuf
) {
2692 if (pp
->af
== AF_INET
) {
2693 error
= EAFNOSUPPORT
;
2698 if (pp
->af
== AF_INET6
) {
2699 error
= EAFNOSUPPORT
;
2703 if (pp
->addr
.addr
.type
!= PF_ADDR_ADDRMASK
&&
2704 pp
->addr
.addr
.type
!= PF_ADDR_DYNIFTL
&&
2705 pp
->addr
.addr
.type
!= PF_ADDR_TABLE
) {
2709 pa
= pool_get(&pf_pooladdr_pl
, PR_WAITOK
);
2714 pf_pooladdr_copyin(&pp
->addr
, pa
);
2715 if (pa
->ifname
[0]) {
2716 pa
->kif
= pfi_kif_get(pa
->ifname
);
2717 if (pa
->kif
== NULL
) {
2718 pool_put(&pf_pooladdr_pl
, pa
);
2722 pfi_kif_ref(pa
->kif
, PFI_KIF_REF_RULE
);
2724 if (pfi_dynaddr_setup(&pa
->addr
, pp
->af
)) {
2725 pfi_dynaddr_remove(&pa
->addr
);
2726 pfi_kif_unref(pa
->kif
, PFI_KIF_REF_RULE
);
2727 pool_put(&pf_pooladdr_pl
, pa
);
2731 TAILQ_INSERT_TAIL(&pf_pabuf
, pa
, entries
);
2735 case DIOCGETADDRS
: {
2736 struct pfioc_pooladdr
*pp
= (struct pfioc_pooladdr
*)addr
;
2739 pp
->anchor
[sizeof (pp
->anchor
) - 1] = '\0';
2740 pool
= pf_get_pool(pp
->anchor
, pp
->ticket
, pp
->r_action
,
2741 pp
->r_num
, 0, 1, 0);
2746 TAILQ_FOREACH(pa
, &pool
->list
, entries
)
2752 struct pfioc_pooladdr
*pp
= (struct pfioc_pooladdr
*)addr
;
2755 pp
->anchor
[sizeof (pp
->anchor
) - 1] = '\0';
2756 pool
= pf_get_pool(pp
->anchor
, pp
->ticket
, pp
->r_action
,
2757 pp
->r_num
, 0, 1, 1);
2762 pa
= TAILQ_FIRST(&pool
->list
);
2763 while ((pa
!= NULL
) && (nr
< pp
->nr
)) {
2764 pa
= TAILQ_NEXT(pa
, entries
);
2771 pf_pooladdr_copyout(pa
, &pp
->addr
);
2772 pfi_dynaddr_copyout(&pp
->addr
.addr
);
2773 pf_tbladdr_copyout(&pp
->addr
.addr
);
2774 pf_rtlabel_copyout(&pp
->addr
.addr
);
2778 case DIOCCHANGEADDR
: {
2779 struct pfioc_pooladdr
*pca
= (struct pfioc_pooladdr
*)addr
;
2780 struct pf_pooladdr
*oldpa
= NULL
, *newpa
= NULL
;
2781 struct pf_ruleset
*ruleset
;
2783 if (pca
->action
< PF_CHANGE_ADD_HEAD
||
2784 pca
->action
> PF_CHANGE_REMOVE
) {
2788 if (pca
->addr
.addr
.type
!= PF_ADDR_ADDRMASK
&&
2789 pca
->addr
.addr
.type
!= PF_ADDR_DYNIFTL
&&
2790 pca
->addr
.addr
.type
!= PF_ADDR_TABLE
) {
2795 pca
->anchor
[sizeof (pca
->anchor
) - 1] = '\0';
2796 ruleset
= pf_find_ruleset(pca
->anchor
);
2797 if (ruleset
== NULL
) {
2801 pool
= pf_get_pool(pca
->anchor
, pca
->ticket
, pca
->r_action
,
2802 pca
->r_num
, pca
->r_last
, 1, 1);
2807 if (pca
->action
!= PF_CHANGE_REMOVE
) {
2808 newpa
= pool_get(&pf_pooladdr_pl
, PR_WAITOK
);
2809 if (newpa
== NULL
) {
2813 pf_pooladdr_copyin(&pca
->addr
, newpa
);
2815 if (pca
->af
== AF_INET
) {
2816 pool_put(&pf_pooladdr_pl
, newpa
);
2817 error
= EAFNOSUPPORT
;
2822 if (pca
->af
== AF_INET6
) {
2823 pool_put(&pf_pooladdr_pl
, newpa
);
2824 error
= EAFNOSUPPORT
;
2828 if (newpa
->ifname
[0]) {
2829 newpa
->kif
= pfi_kif_get(newpa
->ifname
);
2830 if (newpa
->kif
== NULL
) {
2831 pool_put(&pf_pooladdr_pl
, newpa
);
2835 pfi_kif_ref(newpa
->kif
, PFI_KIF_REF_RULE
);
2838 if (pfi_dynaddr_setup(&newpa
->addr
, pca
->af
) ||
2839 pf_tbladdr_setup(ruleset
, &newpa
->addr
)) {
2840 pfi_dynaddr_remove(&newpa
->addr
);
2841 pfi_kif_unref(newpa
->kif
, PFI_KIF_REF_RULE
);
2842 pool_put(&pf_pooladdr_pl
, newpa
);
2848 if (pca
->action
== PF_CHANGE_ADD_HEAD
)
2849 oldpa
= TAILQ_FIRST(&pool
->list
);
2850 else if (pca
->action
== PF_CHANGE_ADD_TAIL
)
2851 oldpa
= TAILQ_LAST(&pool
->list
, pf_palist
);
2855 oldpa
= TAILQ_FIRST(&pool
->list
);
2856 while ((oldpa
!= NULL
) && (i
< (int)pca
->nr
)) {
2857 oldpa
= TAILQ_NEXT(oldpa
, entries
);
2860 if (oldpa
== NULL
) {
2866 if (pca
->action
== PF_CHANGE_REMOVE
) {
2867 TAILQ_REMOVE(&pool
->list
, oldpa
, entries
);
2868 pfi_dynaddr_remove(&oldpa
->addr
);
2869 pf_tbladdr_remove(&oldpa
->addr
);
2870 pfi_kif_unref(oldpa
->kif
, PFI_KIF_REF_RULE
);
2871 pool_put(&pf_pooladdr_pl
, oldpa
);
2874 TAILQ_INSERT_TAIL(&pool
->list
, newpa
, entries
);
2875 else if (pca
->action
== PF_CHANGE_ADD_HEAD
||
2876 pca
->action
== PF_CHANGE_ADD_BEFORE
)
2877 TAILQ_INSERT_BEFORE(oldpa
, newpa
, entries
);
2879 TAILQ_INSERT_AFTER(&pool
->list
, oldpa
,
2883 pool
->cur
= TAILQ_FIRST(&pool
->list
);
2884 PF_ACPY(&pool
->counter
, &pool
->cur
->addr
.v
.a
.addr
,
2889 case DIOCGETRULESETS
: {
2890 struct pfioc_ruleset
*pr
= (struct pfioc_ruleset
*)addr
;
2891 struct pf_ruleset
*ruleset
;
2892 struct pf_anchor
*anchor
;
2894 pr
->path
[sizeof (pr
->path
) - 1] = '\0';
2895 pr
->name
[sizeof (pr
->name
) - 1] = '\0';
2896 if ((ruleset
= pf_find_ruleset(pr
->path
)) == NULL
) {
2901 if (ruleset
->anchor
== NULL
) {
2902 /* XXX kludge for pf_main_ruleset */
2903 RB_FOREACH(anchor
, pf_anchor_global
, &pf_anchors
)
2904 if (anchor
->parent
== NULL
)
2907 RB_FOREACH(anchor
, pf_anchor_node
,
2908 &ruleset
->anchor
->children
)
2914 case DIOCGETRULESET
: {
2915 struct pfioc_ruleset
*pr
= (struct pfioc_ruleset
*)addr
;
2916 struct pf_ruleset
*ruleset
;
2917 struct pf_anchor
*anchor
;
2920 pr
->path
[sizeof (pr
->path
) - 1] = '\0';
2921 if ((ruleset
= pf_find_ruleset(pr
->path
)) == NULL
) {
2926 if (ruleset
->anchor
== NULL
) {
2927 /* XXX kludge for pf_main_ruleset */
2928 RB_FOREACH(anchor
, pf_anchor_global
, &pf_anchors
)
2929 if (anchor
->parent
== NULL
&& nr
++ == pr
->nr
) {
2930 strlcpy(pr
->name
, anchor
->name
,
2935 RB_FOREACH(anchor
, pf_anchor_node
,
2936 &ruleset
->anchor
->children
)
2937 if (nr
++ == pr
->nr
) {
2938 strlcpy(pr
->name
, anchor
->name
,
2948 case DIOCRCLRTABLES
: {
2949 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2951 if (io
->pfrio_esize
!= 0) {
2955 pfr_table_copyin_cleanup(&io
->pfrio_table
);
2956 error
= pfr_clr_tables(&io
->pfrio_table
, &io
->pfrio_ndel
,
2957 io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2961 case DIOCRADDTABLES
: {
2962 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2963 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
2965 if (io
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2969 error
= pfr_add_tables(buf
, io
->pfrio_size
,
2970 &io
->pfrio_nadd
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2974 case DIOCRDELTABLES
: {
2975 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2976 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
2978 if (io
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2982 error
= pfr_del_tables(buf
, io
->pfrio_size
,
2983 &io
->pfrio_ndel
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
2987 case DIOCRGETTABLES
: {
2988 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
2989 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
2991 if (io
->pfrio_esize
!= sizeof (struct pfr_table
)) {
2995 pfr_table_copyin_cleanup(&io
->pfrio_table
);
2996 error
= pfr_get_tables(&io
->pfrio_table
, buf
,
2997 &io
->pfrio_size
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
3001 case DIOCRGETTSTATS
: {
3002 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
3003 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
3005 if (io
->pfrio_esize
!= sizeof (struct pfr_tstats
)) {
3009 pfr_table_copyin_cleanup(&io
->pfrio_table
);
3010 error
= pfr_get_tstats(&io
->pfrio_table
, buf
,
3011 &io
->pfrio_size
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
3015 case DIOCRCLRTSTATS
: {
3016 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
3017 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
3019 if (io
->pfrio_esize
!= sizeof (struct pfr_table
)) {
3023 error
= pfr_clr_tstats(buf
, io
->pfrio_size
,
3024 &io
->pfrio_nzero
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
3028 case DIOCRSETTFLAGS
: {
3029 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
3030 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
3032 if (io
->pfrio_esize
!= sizeof (struct pfr_table
)) {
3036 error
= pfr_set_tflags(buf
, io
->pfrio_size
,
3037 io
->pfrio_setflag
, io
->pfrio_clrflag
, &io
->pfrio_nchange
,
3038 &io
->pfrio_ndel
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
3042 case DIOCRCLRADDRS
: {
3043 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
3045 if (io
->pfrio_esize
!= 0) {
3049 pfr_table_copyin_cleanup(&io
->pfrio_table
);
3050 error
= pfr_clr_addrs(&io
->pfrio_table
, &io
->pfrio_ndel
,
3051 io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
3055 case DIOCRADDADDRS
: {
3056 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
3057 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
3059 if (io
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
3063 pfr_table_copyin_cleanup(&io
->pfrio_table
);
3064 error
= pfr_add_addrs(&io
->pfrio_table
, buf
,
3065 io
->pfrio_size
, &io
->pfrio_nadd
, io
->pfrio_flags
|
3066 PFR_FLAG_USERIOCTL
);
3070 case DIOCRDELADDRS
: {
3071 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
3072 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
3074 if (io
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
3078 pfr_table_copyin_cleanup(&io
->pfrio_table
);
3079 error
= pfr_del_addrs(&io
->pfrio_table
, buf
,
3080 io
->pfrio_size
, &io
->pfrio_ndel
, io
->pfrio_flags
|
3081 PFR_FLAG_USERIOCTL
);
3085 case DIOCRSETADDRS
: {
3086 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
3087 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
3089 if (io
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
3093 pfr_table_copyin_cleanup(&io
->pfrio_table
);
3094 error
= pfr_set_addrs(&io
->pfrio_table
, buf
,
3095 io
->pfrio_size
, &io
->pfrio_size2
, &io
->pfrio_nadd
,
3096 &io
->pfrio_ndel
, &io
->pfrio_nchange
, io
->pfrio_flags
|
3097 PFR_FLAG_USERIOCTL
, 0);
3101 case DIOCRGETADDRS
: {
3102 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
3103 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
3105 if (io
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
3109 pfr_table_copyin_cleanup(&io
->pfrio_table
);
3110 error
= pfr_get_addrs(&io
->pfrio_table
, buf
,
3111 &io
->pfrio_size
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
3115 case DIOCRGETASTATS
: {
3116 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
3117 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
3119 if (io
->pfrio_esize
!= sizeof (struct pfr_astats
)) {
3123 pfr_table_copyin_cleanup(&io
->pfrio_table
);
3124 error
= pfr_get_astats(&io
->pfrio_table
, buf
,
3125 &io
->pfrio_size
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
3129 case DIOCRCLRASTATS
: {
3130 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
3131 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
3133 if (io
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
3137 pfr_table_copyin_cleanup(&io
->pfrio_table
);
3138 error
= pfr_clr_astats(&io
->pfrio_table
, buf
,
3139 io
->pfrio_size
, &io
->pfrio_nzero
, io
->pfrio_flags
|
3140 PFR_FLAG_USERIOCTL
);
3144 case DIOCRTSTADDRS
: {
3145 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
3146 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
3148 if (io
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
3152 pfr_table_copyin_cleanup(&io
->pfrio_table
);
3153 error
= pfr_tst_addrs(&io
->pfrio_table
, buf
,
3154 io
->pfrio_size
, &io
->pfrio_nmatch
, io
->pfrio_flags
|
3155 PFR_FLAG_USERIOCTL
);
3159 case DIOCRINADEFINE
: {
3160 struct pfioc_table
*io
= (struct pfioc_table
*)addr
;
3161 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_table
, pfrio_buffer
);
3163 if (io
->pfrio_esize
!= sizeof (struct pfr_addr
)) {
3167 pfr_table_copyin_cleanup(&io
->pfrio_table
);
3168 error
= pfr_ina_define(&io
->pfrio_table
, buf
,
3169 io
->pfrio_size
, &io
->pfrio_nadd
, &io
->pfrio_naddr
,
3170 io
->pfrio_ticket
, io
->pfrio_flags
| PFR_FLAG_USERIOCTL
);
3175 struct pf_osfp_ioctl
*io
= (struct pf_osfp_ioctl
*)addr
;
3176 error
= pf_osfp_add(io
);
3181 struct pf_osfp_ioctl
*io
= (struct pf_osfp_ioctl
*)addr
;
3182 error
= pf_osfp_get(io
);
3187 struct pfioc_trans
*io
= (struct pfioc_trans
*)addr
;
3188 struct pfioc_trans_e
*ioe
;
3189 struct pfr_table
*table
;
3193 if (io
->esize
!= sizeof (*ioe
)) {
3197 ioe
= _MALLOC(sizeof (*ioe
), M_TEMP
, M_WAITOK
);
3198 table
= _MALLOC(sizeof (*table
), M_TEMP
, M_WAITOK
);
3199 buf
= PF_USER_ADDR(addr
, pfioc_trans
, array
);
3200 for (i
= 0; i
< io
->size
; i
++, buf
+= sizeof (*ioe
)) {
3201 if (copyin(buf
, ioe
, sizeof (*ioe
))) {
3202 _FREE(table
, M_TEMP
);
3207 ioe
->anchor
[sizeof (ioe
->anchor
) - 1] = '\0';
3208 switch (ioe
->rs_num
) {
3209 case PF_RULESET_ALTQ
:
3211 if (ioe
->anchor
[0]) {
3212 _FREE(table
, M_TEMP
);
3217 if ((error
= pf_begin_altq(&ioe
->ticket
))) {
3218 _FREE(table
, M_TEMP
);
3224 case PF_RULESET_TABLE
:
3225 bzero(table
, sizeof (*table
));
3226 strlcpy(table
->pfrt_anchor
, ioe
->anchor
,
3227 sizeof (table
->pfrt_anchor
));
3228 if ((error
= pfr_ina_begin(table
,
3229 &ioe
->ticket
, NULL
, 0))) {
3230 _FREE(table
, M_TEMP
);
3236 if ((error
= pf_begin_rules(&ioe
->ticket
,
3237 ioe
->rs_num
, ioe
->anchor
))) {
3238 _FREE(table
, M_TEMP
);
3244 if (copyout(ioe
, buf
, sizeof (*ioe
))) {
3245 _FREE(table
, M_TEMP
);
3251 _FREE(table
, M_TEMP
);
3256 case DIOCXROLLBACK
: {
3257 struct pfioc_trans
*io
= (struct pfioc_trans
*)addr
;
3258 struct pfioc_trans_e
*ioe
;
3259 struct pfr_table
*table
;
3263 if (io
->esize
!= sizeof (*ioe
)) {
3267 ioe
= _MALLOC(sizeof (*ioe
), M_TEMP
, M_WAITOK
);
3268 table
= _MALLOC(sizeof (*table
), M_TEMP
, M_WAITOK
);
3269 buf
= PF_USER_ADDR(addr
, pfioc_trans
, array
);
3270 for (i
= 0; i
< io
->size
; i
++, buf
+= sizeof (*ioe
)) {
3271 if (copyin(buf
, ioe
, sizeof (*ioe
))) {
3272 _FREE(table
, M_TEMP
);
3277 ioe
->anchor
[sizeof (ioe
->anchor
) - 1] = '\0';
3278 switch (ioe
->rs_num
) {
3279 case PF_RULESET_ALTQ
:
3281 if (ioe
->anchor
[0]) {
3282 _FREE(table
, M_TEMP
);
3287 if ((error
= pf_rollback_altq(ioe
->ticket
))) {
3288 _FREE(table
, M_TEMP
);
3290 goto fail
; /* really bad */
3294 case PF_RULESET_TABLE
:
3295 bzero(table
, sizeof (*table
));
3296 strlcpy(table
->pfrt_anchor
, ioe
->anchor
,
3297 sizeof (table
->pfrt_anchor
));
3298 if ((error
= pfr_ina_rollback(table
,
3299 ioe
->ticket
, NULL
, 0))) {
3300 _FREE(table
, M_TEMP
);
3302 goto fail
; /* really bad */
3306 if ((error
= pf_rollback_rules(ioe
->ticket
,
3307 ioe
->rs_num
, ioe
->anchor
))) {
3308 _FREE(table
, M_TEMP
);
3310 goto fail
; /* really bad */
3315 _FREE(table
, M_TEMP
);
3321 struct pfioc_trans
*io
= (struct pfioc_trans
*)addr
;
3322 struct pfioc_trans_e
*ioe
;
3323 struct pfr_table
*table
;
3324 struct pf_ruleset
*rs
;
3325 user_addr_t _buf
, buf
;
3328 if (io
->esize
!= sizeof (*ioe
)) {
3332 ioe
= _MALLOC(sizeof (*ioe
), M_TEMP
, M_WAITOK
);
3333 table
= _MALLOC(sizeof (*table
), M_TEMP
, M_WAITOK
);
3334 buf
= _buf
= PF_USER_ADDR(addr
, pfioc_trans
, array
);
3335 /* first makes sure everything will succeed */
3336 for (i
= 0; i
< io
->size
; i
++, buf
+= sizeof (*ioe
)) {
3337 if (copyin(buf
, ioe
, sizeof (*ioe
))) {
3338 _FREE(table
, M_TEMP
);
3343 ioe
->anchor
[sizeof (ioe
->anchor
) - 1] = '\0';
3344 switch (ioe
->rs_num
) {
3345 case PF_RULESET_ALTQ
:
3347 if (ioe
->anchor
[0]) {
3348 _FREE(table
, M_TEMP
);
3353 if (!altqs_inactive_open
|| ioe
->ticket
!=
3354 ticket_altqs_inactive
) {
3355 _FREE(table
, M_TEMP
);
3362 case PF_RULESET_TABLE
:
3363 rs
= pf_find_ruleset(ioe
->anchor
);
3364 if (rs
== NULL
|| !rs
->topen
|| ioe
->ticket
!=
3366 _FREE(table
, M_TEMP
);
3373 if (ioe
->rs_num
< 0 || ioe
->rs_num
>=
3375 _FREE(table
, M_TEMP
);
3380 rs
= pf_find_ruleset(ioe
->anchor
);
3382 !rs
->rules
[ioe
->rs_num
].inactive
.open
||
3383 rs
->rules
[ioe
->rs_num
].inactive
.ticket
!=
3385 _FREE(table
, M_TEMP
);
3394 /* now do the commit - no errors should happen here */
3395 for (i
= 0; i
< io
->size
; i
++, buf
+= sizeof (*ioe
)) {
3396 if (copyin(buf
, ioe
, sizeof (*ioe
))) {
3397 _FREE(table
, M_TEMP
);
3402 ioe
->anchor
[sizeof (ioe
->anchor
) - 1] = '\0';
3403 switch (ioe
->rs_num
) {
3404 case PF_RULESET_ALTQ
:
3406 if ((error
= pf_commit_altq(ioe
->ticket
))) {
3407 _FREE(table
, M_TEMP
);
3409 goto fail
; /* really bad */
3413 case PF_RULESET_TABLE
:
3414 bzero(table
, sizeof (*table
));
3415 strlcpy(table
->pfrt_anchor
, ioe
->anchor
,
3416 sizeof (table
->pfrt_anchor
));
3417 if ((error
= pfr_ina_commit(table
, ioe
->ticket
,
3419 _FREE(table
, M_TEMP
);
3421 goto fail
; /* really bad */
3425 if ((error
= pf_commit_rules(ioe
->ticket
,
3426 ioe
->rs_num
, ioe
->anchor
))) {
3427 _FREE(table
, M_TEMP
);
3429 goto fail
; /* really bad */
3434 _FREE(table
, M_TEMP
);
3439 case DIOCGETSRCNODES
: {
3440 struct pfioc_src_nodes
*psn
= (struct pfioc_src_nodes
*)addr
;
3441 struct pf_src_node
*n
, *pstore
;
3444 int space
= psn
->psn_len
;
3447 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
)
3449 psn
->psn_len
= sizeof (struct pf_src_node
) * nr
;
3453 pstore
= _MALLOC(sizeof (*pstore
), M_TEMP
, M_WAITOK
);
3454 buf
= PF_USER_ADDR(addr
, pfioc_src_nodes
, psn_buf
);
3456 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
) {
3457 uint64_t secs
= pf_time_second(), diff
;
3459 if ((nr
+ 1) * sizeof (*pstore
) >
3460 (unsigned)psn
->psn_len
)
3463 bcopy(n
, pstore
, sizeof (*pstore
));
3464 if (n
->rule
.ptr
!= NULL
)
3465 pstore
->rule
.nr
= n
->rule
.ptr
->nr
;
3466 pstore
->creation
= secs
- pstore
->creation
;
3467 if (pstore
->expire
> secs
)
3468 pstore
->expire
-= secs
;
3472 /* adjust the connection rate estimate */
3473 diff
= secs
- n
->conn_rate
.last
;
3474 if (diff
>= n
->conn_rate
.seconds
)
3475 pstore
->conn_rate
.count
= 0;
3477 pstore
->conn_rate
.count
-=
3478 n
->conn_rate
.count
* diff
/
3479 n
->conn_rate
.seconds
;
3481 _RB_PARENT(pstore
, entry
) = NULL
;
3482 RB_LEFT(pstore
, entry
) = RB_RIGHT(pstore
, entry
) = NULL
;
3485 error
= copyout(pstore
, buf
, sizeof (*pstore
));
3487 _FREE(pstore
, M_TEMP
);
3490 buf
+= sizeof (*pstore
);
3493 psn
->psn_len
= sizeof (struct pf_src_node
) * nr
;
3495 _FREE(pstore
, M_TEMP
);
3499 case DIOCCLRSRCNODES
: {
3500 struct pf_src_node
*n
;
3501 struct pf_state
*state
;
3503 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) {
3504 state
->src_node
= NULL
;
3505 state
->nat_src_node
= NULL
;
3507 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
) {
3511 pf_purge_expired_src_nodes();
3512 pf_status
.src_nodes
= 0;
3516 case DIOCKILLSRCNODES
: {
3517 struct pf_src_node
*sn
;
3519 struct pfioc_src_node_kill
*psnk
=
3520 (struct pfioc_src_node_kill
*)addr
;
3523 RB_FOREACH(sn
, pf_src_tree
, &tree_src_tracking
) {
3524 if (PF_MATCHA(psnk
->psnk_src
.neg
,
3525 &psnk
->psnk_src
.addr
.v
.a
.addr
,
3526 &psnk
->psnk_src
.addr
.v
.a
.mask
,
3527 &sn
->addr
, sn
->af
) &&
3528 PF_MATCHA(psnk
->psnk_dst
.neg
,
3529 &psnk
->psnk_dst
.addr
.v
.a
.addr
,
3530 &psnk
->psnk_dst
.addr
.v
.a
.mask
,
3531 &sn
->raddr
, sn
->af
)) {
3532 /* Handle state to src_node linkage */
3533 if (sn
->states
!= 0) {
3534 RB_FOREACH(s
, pf_state_tree_id
,
3536 if (s
->src_node
== sn
)
3538 if (s
->nat_src_node
== sn
)
3539 s
->nat_src_node
= NULL
;
3549 pf_purge_expired_src_nodes();
3551 psnk
->psnk_af
= killed
;
3555 case DIOCSETHOSTID
: {
3556 u_int32_t
*hid
= (u_int32_t
*)addr
;
3559 pf_status
.hostid
= random();
3561 pf_status
.hostid
= *hid
;
3569 case DIOCIGETIFACES
: {
3570 struct pfioc_iface
*io
= (struct pfioc_iface
*)addr
;
3571 user_addr_t buf
= PF_USER_ADDR(addr
, pfioc_iface
, pfiio_buffer
);
3573 /* esize must be that of the user space version of pfi_kif */
3574 if (io
->pfiio_esize
!= sizeof (struct pfi_uif
)) {
3578 io
->pfiio_name
[sizeof (io
->pfiio_name
) - 1] = '\0';
3579 error
= pfi_get_ifaces(io
->pfiio_name
, buf
, &io
->pfiio_size
);
3583 case DIOCSETIFFLAG
: {
3584 struct pfioc_iface
*io
= (struct pfioc_iface
*)addr
;
3586 io
->pfiio_name
[sizeof (io
->pfiio_name
) - 1] = '\0';
3587 error
= pfi_set_flags(io
->pfiio_name
, io
->pfiio_flags
);
3591 case DIOCCLRIFFLAG
: {
3592 struct pfioc_iface
*io
= (struct pfioc_iface
*)addr
;
3594 io
->pfiio_name
[sizeof (io
->pfiio_name
) - 1] = '\0';
3595 error
= pfi_clear_flags(io
->pfiio_name
, io
->pfiio_flags
);
3604 lck_mtx_unlock(pf_lock
);
3605 lck_rw_done(pf_perim_lock
);
3611 pf_af_hook(struct ifnet
*ifp
, struct mbuf
**mppn
, struct mbuf
**mp
,
3612 unsigned int af
, int input
)
3614 int error
= 0, reentry
;
3615 struct thread
*curthread
= current_thread();
3616 struct mbuf
*nextpkt
;
3618 reentry
= (ifp
->if_pf_curthread
== curthread
);
3620 lck_rw_lock_shared(pf_perim_lock
);
3624 lck_mtx_lock(pf_lock
);
3625 ifp
->if_pf_curthread
= curthread
;
3628 if (mppn
!= NULL
&& *mppn
!= NULL
)
3629 VERIFY(*mppn
== *mp
);
3630 if ((nextpkt
= (*mp
)->m_nextpkt
) != NULL
)
3631 (*mp
)->m_nextpkt
= NULL
;
3636 error
= pf_inet_hook(ifp
, mp
, input
);
3642 error
= pf_inet6_hook(ifp
, mp
, input
);
3649 if (nextpkt
!= NULL
) {
3651 struct mbuf
*m
= *mp
;
3652 while (m
->m_nextpkt
!= NULL
)
3654 m
->m_nextpkt
= nextpkt
;
3659 if (mppn
!= NULL
&& *mppn
!= NULL
)
3663 ifp
->if_pf_curthread
= NULL
;
3664 lck_mtx_unlock(pf_lock
);
3668 lck_rw_done(pf_perim_lock
);
3676 pf_inet_hook(struct ifnet
*ifp
, struct mbuf
**mp
, int input
)
3678 struct mbuf
*m
= *mp
;
3679 #if BYTE_ORDER != BIG_ENDIAN
3680 struct ip
*ip
= mtod(m
, struct ip
*);
3685 * If the packet is outbound, is originated locally, is flagged for
3686 * delayed UDP/TCP checksum calculation, and is about to be processed
3687 * for an interface that doesn't support the appropriate checksum
3688 * offloading, then calculated the checksum here so that PF can adjust
3691 if (!input
&& m
->m_pkthdr
.rcvif
== NULL
) {
3692 static const int mask
= CSUM_DELAY_DATA
;
3693 const int flags
= m
->m_pkthdr
.csum_flags
&
3694 ~IF_HWASSIST_CSUM_FLAGS(ifp
->if_hwassist
);
3697 in_delayed_cksum(m
);
3698 m
->m_pkthdr
.csum_flags
&= ~mask
;
3702 #if BYTE_ORDER != BIG_ENDIAN
3706 if (pf_test(input
? PF_IN
: PF_OUT
, ifp
, mp
, NULL
) != PF_PASS
) {
3710 error
= EHOSTUNREACH
;
3715 #if BYTE_ORDER != BIG_ENDIAN
3718 ip
= mtod(*mp
, struct ip
*);
3730 pf_inet6_hook(struct ifnet
*ifp
, struct mbuf
**mp
, int input
)
3735 * If the packet is outbound, is originated locally, is flagged for
3736 * delayed UDP/TCP checksum calculation, and is about to be processed
3737 * for an interface that doesn't support the appropriate checksum
3738 * offloading, then calculated the checksum here so that PF can adjust
3741 if (!input
&& (*mp
)->m_pkthdr
.rcvif
== NULL
) {
3742 static const int mask
= CSUM_DELAY_IPV6_DATA
;
3743 const int flags
= (*mp
)->m_pkthdr
.csum_flags
&
3744 ~IF_HWASSIST_CSUM_FLAGS(ifp
->if_hwassist
);
3747 in6_delayed_cksum(*mp
, sizeof(struct ip6_hdr
));
3748 (*mp
)->m_pkthdr
.csum_flags
&= ~mask
;
3752 if (pf_test6(input
? PF_IN
: PF_OUT
, ifp
, mp
, NULL
) != PF_PASS
) {
3756 error
= EHOSTUNREACH
;
3766 pf_ifaddr_hook(struct ifnet
*ifp
, unsigned long cmd
)
3768 lck_rw_lock_shared(pf_perim_lock
);
3769 lck_mtx_lock(pf_lock
);
3776 case SIOCAIFADDR_IN6_32
:
3777 case SIOCAIFADDR_IN6_64
:
3778 case SIOCDIFADDR_IN6
:
3780 if (ifp
->if_pf_kif
!= NULL
)
3781 pfi_kifaddr_update(ifp
->if_pf_kif
);
3784 panic("%s: unexpected ioctl %lu", __func__
, cmd
);
3788 lck_mtx_unlock(pf_lock
);
3789 lck_rw_done(pf_perim_lock
);
3794 * Caller acquires dlil lock as writer (exclusive)
3797 pf_ifnet_hook(struct ifnet
*ifp
, int attach
)
3799 lck_rw_lock_shared(pf_perim_lock
);
3800 lck_mtx_lock(pf_lock
);
3802 pfi_attach_ifnet(ifp
);
3804 pfi_detach_ifnet(ifp
);
3805 lck_mtx_unlock(pf_lock
);
3806 lck_rw_done(pf_perim_lock
);
3810 pf_attach_hooks(void)
3812 ifnet_head_lock_shared();
3814 * Check against ifnet_addrs[] before proceeding, in case this
3815 * is called very early on, e.g. during dlil_init() before any
3816 * network interface is attached.
3818 if (ifnet_addrs
!= NULL
) {
3821 for (i
= 0; i
<= if_index
; i
++) {
3822 struct ifnet
*ifp
= ifindex2ifnet
[i
];
3824 pfi_attach_ifnet(ifp
);
3832 /* currently unused along with pfdetach() */
3834 pf_detach_hooks(void)
3836 ifnet_head_lock_shared();
3837 if (ifnet_addrs
!= NULL
) {
3838 for (i
= 0; i
<= if_index
; i
++) {
3841 struct ifnet
*ifp
= ifindex2ifnet
[i
];
3842 if (ifp
!= NULL
&& ifp
->if_pf_kif
!= NULL
) {
3843 pfi_detach_ifnet(ifp
);