/*
- * Copyright (c) 2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2007-2010 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#define COPYIN(from, to, size, flags) \
((flags & PFR_FLAG_USERIOCTL) ? \
- copyin(CAST_USER_ADDR_T(from), (to), (size)) : \
- (bcopy((from), (to), (size)), 0))
+ copyin((from), (to), (size)) : \
+ (bcopy((void *)(uintptr_t)(from), (to), (size)), 0))
#define COPYOUT(from, to, size, flags) \
((flags & PFR_FLAG_USERIOCTL) ? \
- copyout((from), CAST_USER_ADDR_T(to), (size)) : \
- (bcopy((from), (to), (size)), 0))
+ copyout((from), (to), (size)) : \
+ (bcopy((from), (void *)(uintptr_t)(to), (size)), 0))
#define FILLIN_SIN(sin, addr) \
do { \
PFRW_DYNADDR_UPDATE
} pfrw_op;
union {
- struct pfr_addr *pfrw1_addr;
- struct pfr_astats *pfrw1_astats;
+ user_addr_t pfrw1_addr;
+ user_addr_t pfrw1_astats;
struct pfr_kentryworkq *pfrw1_workq;
struct pfr_kentry *pfrw1_kentry;
struct pfi_dynaddr *pfrw1_dyn;
struct pfr_kentryworkq *, u_int64_t);
static void pfr_remove_kentries(struct pfr_ktable *, struct pfr_kentryworkq *);
static void pfr_clstats_kentries(struct pfr_kentryworkq *, u_int64_t, int);
-static void pfr_reset_feedback(struct pfr_addr *, int, int);
+static void pfr_reset_feedback(user_addr_t, int, int);
static void pfr_prepare_network(union sockaddr_union *, int, int);
static int pfr_route_kentry(struct pfr_ktable *, struct pfr_kentry *);
static int pfr_unroute_kentry(struct pfr_ktable *, struct pfr_kentry *);
}
int
-pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
+pfr_add_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
int *nadd, int flags)
{
struct pfr_ktable *kt, *tmpkt;
struct pfr_kentry *p, *q;
struct pfr_addr ad;
int i, rv, xadd = 0;
- u_int64_t tzero = pf_time_second();
+ user_addr_t addr = _addr;
+ u_int64_t tzero = pf_calendar_time_second();
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
PFR_FLAG_FEEDBACK);
if (tmpkt == NULL)
return (ENOMEM);
SLIST_INIT(&workq);
- for (i = 0; i < size; i++) {
- if (COPYIN(addr+i, &ad, sizeof (ad), flags))
+ for (i = 0; i < size; i++, addr += sizeof (ad)) {
+ if (COPYIN(addr, &ad, sizeof (ad), flags))
senderr(EFAULT);
if (pfr_validate_addr(&ad))
senderr(EINVAL);
}
}
if (flags & PFR_FLAG_FEEDBACK)
- if (COPYOUT(&ad, addr+i, sizeof (ad), flags))
+ if (COPYOUT(&ad, addr, sizeof (ad), flags))
senderr(EFAULT);
}
pfr_clean_node_mask(tmpkt, &workq);
pfr_clean_node_mask(tmpkt, &workq);
pfr_destroy_kentries(&workq);
if (flags & PFR_FLAG_FEEDBACK)
- pfr_reset_feedback(addr, size, flags);
+ pfr_reset_feedback(_addr, size, flags);
pfr_destroy_ktable(tmpkt, 0);
return (rv);
}
int
-pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
+pfr_del_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
int *ndel, int flags)
{
struct pfr_ktable *kt;
struct pfr_kentryworkq workq;
struct pfr_kentry *p;
struct pfr_addr ad;
+ user_addr_t addr = _addr;
int i, rv, xdel = 0, log = 1;
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
pfr_mark_addrs(kt);
} else {
/* iterate over addresses to delete */
- for (i = 0; i < size; i++) {
- if (COPYIN(addr+i, &ad, sizeof (ad), flags))
+ for (i = 0; i < size; i++, addr += sizeof (ad)) {
+ if (COPYIN(addr, &ad, sizeof (ad), flags))
return (EFAULT);
if (pfr_validate_addr(&ad))
return (EINVAL);
}
}
SLIST_INIT(&workq);
- for (i = 0; i < size; i++) {
- if (COPYIN(addr+i, &ad, sizeof (ad), flags))
+ for (addr = _addr, i = 0; i < size; i++, addr += sizeof (ad)) {
+ if (COPYIN(addr, &ad, sizeof (ad), flags))
senderr(EFAULT);
if (pfr_validate_addr(&ad))
senderr(EINVAL);
xdel++;
}
if (flags & PFR_FLAG_FEEDBACK)
- if (COPYOUT(&ad, addr+i, sizeof (ad), flags))
+ if (COPYOUT(&ad, addr, sizeof (ad), flags))
senderr(EFAULT);
}
if (!(flags & PFR_FLAG_DUMMY)) {
return (0);
_bad:
if (flags & PFR_FLAG_FEEDBACK)
- pfr_reset_feedback(addr, size, flags);
+ pfr_reset_feedback(_addr, size, flags);
return (rv);
}
int
-pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
+pfr_set_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
int *size2, int *nadd, int *ndel, int *nchange, int flags,
u_int32_t ignore_pfrt_flags)
{
struct pfr_kentryworkq addq, delq, changeq;
struct pfr_kentry *p, *q;
struct pfr_addr ad;
+ user_addr_t addr = _addr;
int i, rv, xadd = 0, xdel = 0, xchange = 0;
- u_int64_t tzero = pf_time_second();
+ u_int64_t tzero = pf_calendar_time_second();
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
PFR_FLAG_FEEDBACK);
SLIST_INIT(&addq);
SLIST_INIT(&delq);
SLIST_INIT(&changeq);
- for (i = 0; i < size; i++) {
- if (COPYIN(addr+i, &ad, sizeof (ad), flags))
+ for (i = 0; i < size; i++, addr += sizeof (ad)) {
+ if (COPYIN(addr, &ad, sizeof (ad), flags))
senderr(EFAULT);
if (pfr_validate_addr(&ad))
senderr(EINVAL);
}
_skip:
if (flags & PFR_FLAG_FEEDBACK)
- if (COPYOUT(&ad, addr+i, sizeof (ad), flags))
+ if (COPYOUT(&ad, addr, sizeof (ad), flags))
senderr(EFAULT);
}
pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
senderr(0);
}
i = 0;
+ addr = _addr + size;
SLIST_FOREACH(p, &delq, pfrke_workq) {
pfr_copyout_addr(&ad, p);
ad.pfra_fback = PFR_FB_DELETED;
- if (COPYOUT(&ad, addr+size+i, sizeof (ad), flags))
+ if (COPYOUT(&ad, addr, sizeof (ad), flags))
senderr(EFAULT);
+ addr += sizeof (ad);
i++;
}
}
pfr_clean_node_mask(tmpkt, &addq);
pfr_destroy_kentries(&addq);
if (flags & PFR_FLAG_FEEDBACK)
- pfr_reset_feedback(addr, size, flags);
+ pfr_reset_feedback(_addr, size, flags);
pfr_destroy_ktable(tmpkt, 0);
return (rv);
}
int
-pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
+pfr_tst_addrs(struct pfr_table *tbl, user_addr_t addr, int size,
int *nmatch, int flags)
{
struct pfr_ktable *kt;
if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
return (ESRCH);
- for (i = 0; i < size; i++) {
- if (COPYIN(addr+i, &ad, sizeof (ad), flags))
+ for (i = 0; i < size; i++, addr += sizeof (ad)) {
+ if (COPYIN(addr, &ad, sizeof (ad), flags))
return (EFAULT);
if (pfr_validate_addr(&ad))
return (EINVAL);
(p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
if (p != NULL && !p->pfrke_not)
xmatch++;
- if (COPYOUT(&ad, addr+i, sizeof (ad), flags))
+ if (COPYOUT(&ad, addr, sizeof (ad), flags))
return (EFAULT);
}
if (nmatch != NULL)
}
int
-pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
+pfr_get_addrs(struct pfr_table *tbl, user_addr_t addr, int *size,
int flags)
{
struct pfr_ktable *kt;
}
int
-pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
+pfr_get_astats(struct pfr_table *tbl, user_addr_t addr, int *size,
int flags)
{
struct pfr_ktable *kt;
struct pfr_walktree w;
struct pfr_kentryworkq workq;
int rv;
- u_int64_t tzero = pf_time_second();
+ u_int64_t tzero = pf_calendar_time_second();
/* XXX PFR_FLAG_CLSTATS disabled */
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
}
int
-pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
+pfr_clr_astats(struct pfr_table *tbl, user_addr_t _addr, int size,
int *nzero, int flags)
{
struct pfr_ktable *kt;
struct pfr_kentryworkq workq;
struct pfr_kentry *p;
struct pfr_addr ad;
+ user_addr_t addr = _addr;
int i, rv, xzero = 0;
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
return (ESRCH);
SLIST_INIT(&workq);
- for (i = 0; i < size; i++) {
- if (COPYIN(addr+i, &ad, sizeof (ad), flags))
+ for (i = 0; i < size; i++, addr += sizeof (ad)) {
+ if (COPYIN(addr, &ad, sizeof (ad), flags))
senderr(EFAULT);
if (pfr_validate_addr(&ad))
senderr(EINVAL);
if (flags & PFR_FLAG_FEEDBACK) {
ad.pfra_fback = (p != NULL) ?
PFR_FB_CLEARED : PFR_FB_NONE;
- if (COPYOUT(&ad, addr+i, sizeof (ad), flags))
+ if (COPYOUT(&ad, addr, sizeof (ad), flags))
senderr(EFAULT);
}
if (p != NULL) {
return (0);
_bad:
if (flags & PFR_FLAG_FEEDBACK)
- pfr_reset_feedback(addr, size, flags);
+ pfr_reset_feedback(_addr, size, flags);
return (rv);
}
-int
+static int
pfr_validate_addr(struct pfr_addr *ad)
{
int i;
return (0);
}
-void
+static void
pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
int *naddr, int sweep)
{
*naddr = w.pfrw_cnt;
}
-void
+static void
pfr_mark_addrs(struct pfr_ktable *kt)
{
struct pfr_walktree w;
}
-struct pfr_kentry *
+static struct pfr_kentry *
pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
{
union sockaddr_union sa, mask;
struct radix_node_head *head;
struct pfr_kentry *ke;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
bzero(&sa, sizeof (sa));
if (ad->pfra_af == AF_INET) {
return (ke);
}
-struct pfr_kentry *
+static struct pfr_kentry *
pfr_create_kentry(struct pfr_addr *ad, int intr)
{
struct pfr_kentry *ke;
return (ke);
}
-void
+static void
pfr_destroy_kentries(struct pfr_kentryworkq *workq)
{
struct pfr_kentry *p, *q;
}
}
-void
+static void
pfr_destroy_kentry(struct pfr_kentry *ke)
{
if (ke->pfrke_intrpool)
pool_put(&pfr_kentry_pl, ke);
}
-void
+static void
pfr_insert_kentries(struct pfr_ktable *kt,
struct pfr_kentryworkq *workq, u_int64_t tzero)
{
return (0);
}
-void
+static void
pfr_remove_kentries(struct pfr_ktable *kt,
struct pfr_kentryworkq *workq)
{
pfr_destroy_kentries(workq);
}
-void
+static void
pfr_clean_node_mask(struct pfr_ktable *kt,
struct pfr_kentryworkq *workq)
{
pfr_unroute_kentry(kt, p);
}
-void
+static void
pfr_clstats_kentries(struct pfr_kentryworkq *workq, u_int64_t tzero,
int negchange)
{
struct pfr_kentry *p;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
SLIST_FOREACH(p, workq, pfrke_workq) {
if (negchange)
}
}
-void
-pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
+static void
+pfr_reset_feedback(user_addr_t addr, int size, int flags)
{
struct pfr_addr ad;
int i;
- for (i = 0; i < size; i++) {
- if (COPYIN(addr+i, &ad, sizeof (ad), flags))
+ for (i = 0; i < size; i++, addr += sizeof (ad)) {
+ if (COPYIN(addr, &ad, sizeof (ad), flags))
break;
ad.pfra_fback = PFR_FB_NONE;
- if (COPYOUT(&ad, addr+i, sizeof (ad), flags))
+ if (COPYOUT(&ad, addr, sizeof (ad), flags))
break;
}
}
-void
+static void
pfr_prepare_network(union sockaddr_union *sa, int af, int net)
{
int i;
}
}
-int
+static int
pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
{
union sockaddr_union mask;
struct radix_node *rn;
struct radix_node_head *head;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
bzero(ke->pfrke_node, sizeof (ke->pfrke_node));
if (ke->pfrke_af == AF_INET)
return (rn == NULL ? -1 : 0);
}
-int
+static int
pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
{
union sockaddr_union mask;
struct radix_node *rn;
struct radix_node_head *head;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
if (ke->pfrke_af == AF_INET)
head = kt->pfrkt_ip4;
return (0);
}
-void
+static void
pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
{
bzero(ad, sizeof (*ad));
ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
}
-int
+static int
pfr_walktree(struct radix_node *rn, void *arg)
{
struct pfr_kentry *ke = (struct pfr_kentry *)rn;
struct pfr_walktree *w = arg;
int flags = w->pfrw_flags;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
switch (w->pfrw_op) {
case PFRW_MARK:
struct pfr_addr ad;
pfr_copyout_addr(&ad, ke);
- if (copyout(&ad,
- CAST_USER_ADDR_T(w->pfrw_addr),
- sizeof (ad)))
+ if (copyout(&ad, w->pfrw_addr, sizeof (ad)))
return (EFAULT);
- w->pfrw_addr++;
+ w->pfrw_addr += sizeof (ad);
}
break;
case PFRW_GET_ASTATS:
pfr_copyout_addr(&as.pfras_a, ke);
+#if !defined(__LP64__)
+ /* Initialized to avoid potential info leak to
+ * userspace */
+ as._pad = 0;
+#endif
bcopy(ke->pfrke_packets, as.pfras_packets,
sizeof (as.pfras_packets));
bcopy(ke->pfrke_bytes, as.pfras_bytes,
if (COPYOUT(&as, w->pfrw_astats, sizeof (as), flags))
return (EFAULT);
- w->pfrw_astats++;
+ w->pfrw_astats += sizeof (as);
}
break;
case PFRW_POOL_GET:
struct pfr_ktable *p;
int xdel = 0;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
PFR_FLAG_ALLRSETS);
}
int
-pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
+pfr_add_tables(user_addr_t tbl, int size, int *nadd, int flags)
{
struct pfr_ktableworkq addq, changeq;
struct pfr_ktable *p, *q, *r, key;
int i, rv, xadd = 0;
- u_int64_t tzero = pf_time_second();
+ u_int64_t tzero = pf_calendar_time_second();
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
SLIST_INIT(&addq);
SLIST_INIT(&changeq);
- for (i = 0; i < size; i++) {
- if (COPYIN(tbl+i, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
+ for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) {
+ if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
senderr(EFAULT);
+ pfr_table_copyin_cleanup(&key.pfrkt_t);
if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
flags & PFR_FLAG_USERIOCTL))
senderr(EINVAL);
}
int
-pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
+pfr_del_tables(user_addr_t tbl, int size, int *ndel, int flags)
{
struct pfr_ktableworkq workq;
struct pfr_ktable *p, *q, key;
int i, xdel = 0;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
SLIST_INIT(&workq);
- for (i = 0; i < size; i++) {
- if (COPYIN(tbl+i, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
+ for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) {
+ if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
return (EFAULT);
+ pfr_table_copyin_cleanup(&key.pfrkt_t);
if (pfr_validate_table(&key.pfrkt_t, 0,
flags & PFR_FLAG_USERIOCTL))
return (EINVAL);
}
int
-pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
+pfr_get_tables(struct pfr_table *filter, user_addr_t tbl, int *size,
int flags)
{
struct pfr_ktable *p;
continue;
if (n-- <= 0)
continue;
- if (COPYOUT(&p->pfrkt_t, tbl++, sizeof (*tbl), flags))
+ if (COPYOUT(&p->pfrkt_t, tbl, sizeof (p->pfrkt_t), flags))
return (EFAULT);
+ tbl += sizeof (p->pfrkt_t);
}
if (n) {
printf("pfr_get_tables: corruption detected (%d).\n", n);
}
int
-pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
+pfr_get_tstats(struct pfr_table *filter, user_addr_t tbl, int *size,
int flags)
{
struct pfr_ktable *p;
struct pfr_ktableworkq workq;
int n, nn;
- u_int64_t tzero = pf_time_second();
+ u_int64_t tzero = pf_calendar_time_second();
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
/* XXX PFR_FLAG_CLSTATS disabled */
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
continue;
if (n-- <= 0)
continue;
- if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof (*tbl), flags)) {
+ if (COPYOUT(&p->pfrkt_ts, tbl, sizeof (p->pfrkt_ts), flags)) {
return (EFAULT);
}
+ tbl += sizeof (p->pfrkt_ts);
SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
}
if (flags & PFR_FLAG_CLSTATS)
}
int
-pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
+pfr_clr_tstats(user_addr_t tbl, int size, int *nzero, int flags)
{
struct pfr_ktableworkq workq;
struct pfr_ktable *p, key;
int i, xzero = 0;
- u_int64_t tzero = pf_time_second();
+ u_int64_t tzero = pf_calendar_time_second();
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
PFR_FLAG_ADDRSTOO);
SLIST_INIT(&workq);
- for (i = 0; i < size; i++) {
- if (COPYIN(tbl+i, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
+ for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) {
+ if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
return (EFAULT);
+ pfr_table_copyin_cleanup(&key.pfrkt_t);
if (pfr_validate_table(&key.pfrkt_t, 0, 0))
return (EINVAL);
p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
}
int
-pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
+pfr_set_tflags(user_addr_t tbl, int size, int setflag, int clrflag,
int *nchange, int *ndel, int flags)
{
struct pfr_ktableworkq workq;
struct pfr_ktable *p, *q, key;
int i, xchange = 0, xdel = 0;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
if ((setflag & ~PFR_TFLAG_USRMASK) ||
(setflag & clrflag))
return (EINVAL);
SLIST_INIT(&workq);
- for (i = 0; i < size; i++) {
- if (COPYIN(tbl+i, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
+ for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) {
+ if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
return (EFAULT);
+ pfr_table_copyin_cleanup(&key.pfrkt_t);
if (pfr_validate_table(&key.pfrkt_t, 0,
flags & PFR_FLAG_USERIOCTL))
return (EINVAL);
struct pf_ruleset *rs;
int xdel = 0;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
}
int
-pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
+pfr_ina_define(struct pfr_table *tbl, user_addr_t addr, int size,
int *nadd, int *naddr, u_int32_t ticket, int flags)
{
struct pfr_ktableworkq tableq;
struct pf_ruleset *rs;
int i, rv, xadd = 0, xaddr = 0;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
if (size && !(flags & PFR_FLAG_ADDRSTOO))
return (EBUSY);
tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
SLIST_INIT(&tableq);
- kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
+ kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)(void *)tbl);
if (kt == NULL) {
kt = pfr_create_ktable(tbl, 0, 1);
if (kt == NULL)
return (ENOMEM);
}
SLIST_INIT(&addrq);
- for (i = 0; i < size; i++) {
- if (COPYIN(addr+i, &ad, sizeof (ad), flags))
+ for (i = 0; i < size; i++, addr += sizeof (ad)) {
+ if (COPYIN(addr, &ad, sizeof (ad), flags))
senderr(EFAULT);
if (pfr_validate_addr(&ad))
senderr(EINVAL);
struct pf_ruleset *rs;
int xdel = 0;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
rs = pf_find_ruleset(trs->pfrt_anchor);
struct pfr_ktableworkq workq;
struct pf_ruleset *rs;
int xadd = 0, xchange = 0;
- u_int64_t tzero = pf_time_second();
+ u_int64_t tzero = pf_calendar_time_second();
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
rs = pf_find_ruleset(trs->pfrt_anchor);
return (0);
}
-void
+static void
pfr_commit_ktable(struct pfr_ktable *kt, u_int64_t tzero)
{
struct pfr_ktable *shadow = kt->pfrkt_shadow;
int nflags;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
if (shadow->pfrkt_cnt == NO_ADDRESSES) {
if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
pfr_setflags_ktable(kt, nflags);
}
-int
+void
+pfr_table_copyin_cleanup(struct pfr_table *tbl)
+{
+ tbl->pfrt_anchor[sizeof (tbl->pfrt_anchor) - 1] = '\0';
+ tbl->pfrt_name[sizeof (tbl->pfrt_name) - 1] = '\0';
+}
+
+static int
pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
{
int i;
* Rewrite anchors referenced by tables to remove slashes
* and check for validity.
*/
-int
+static int
pfr_fix_anchor(char *anchor)
{
size_t siz = MAXPATHLEN;
return (0);
}
-int
+static int
pfr_table_count(struct pfr_table *filter, int flags)
{
struct pf_ruleset *rs;
return (pf_main_ruleset.tables);
}
-int
+static int
pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
{
if (flags & PFR_FLAG_ALLRSETS)
return (0);
}
-void
+static void
pfr_insert_ktables(struct pfr_ktableworkq *workq)
{
struct pfr_ktable *p;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
SLIST_FOREACH(p, workq, pfrkt_workq)
pfr_insert_ktable(p);
}
-void
+static void
pfr_insert_ktable(struct pfr_ktable *kt)
{
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
pfr_ktable_cnt++;
kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
}
-void
+static void
pfr_setflags_ktables(struct pfr_ktableworkq *workq)
{
struct pfr_ktable *p, *q;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
for (p = SLIST_FIRST(workq); p; p = q) {
q = SLIST_NEXT(p, pfrkt_workq);
}
}
-void
+static void
pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
{
struct pfr_kentryworkq addrq;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
if (!(newf & PFR_TFLAG_REFERENCED) &&
!(newf & PFR_TFLAG_PERSIST))
kt->pfrkt_flags = newf;
}
-void
+static void
pfr_clstats_ktables(struct pfr_ktableworkq *workq, u_int64_t tzero, int recurse)
{
struct pfr_ktable *p;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
SLIST_FOREACH(p, workq, pfrkt_workq)
pfr_clstats_ktable(p, tzero, recurse);
}
-void
+static void
pfr_clstats_ktable(struct pfr_ktable *kt, u_int64_t tzero, int recurse)
{
struct pfr_kentryworkq addrq;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
if (recurse) {
pfr_enqueue_addrs(kt, &addrq, NULL, 0);
kt->pfrkt_tzero = tzero;
}
-struct pfr_ktable *
+static struct pfr_ktable *
pfr_create_ktable(struct pfr_table *tbl, u_int64_t tzero, int attachruleset)
{
struct pfr_ktable *kt;
struct pf_ruleset *rs;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
kt = pool_get(&pfr_ktable_pl, PR_WAITOK);
if (kt == NULL)
return (kt);
}
-void
+static void
pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
{
struct pfr_ktable *p, *q;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
for (p = SLIST_FIRST(workq); p; p = q) {
q = SLIST_NEXT(p, pfrkt_workq);
}
}
-void
+static void
pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
{
struct pfr_kentryworkq addrq;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
if (flushaddr) {
pfr_enqueue_addrs(kt, &addrq, NULL, 0);
pool_put(&pfr_ktable_pl, kt);
}
-int
+static int
pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
{
int d;
return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
}
-struct pfr_ktable *
+static struct pfr_ktable *
pfr_lookup_table(struct pfr_table *tbl)
{
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
/* struct pfr_ktable start like a struct pfr_table */
return (RB_FIND(pfr_ktablehead, &pfr_ktables,
- (struct pfr_ktable *)tbl));
+ (struct pfr_ktable *)(void *)tbl));
}
int
struct pfr_kentry *ke = NULL;
int match;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
kt = kt->pfrkt_root;
{
struct pfr_kentry *ke = NULL;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
kt = kt->pfrkt_root;
struct pfr_table tbl;
struct pf_anchor *ac = rs->anchor;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
bzero(&tbl, sizeof (tbl));
strlcpy(tbl.pfrt_name, name, sizeof (tbl.pfrt_name));
strlcpy(tbl.pfrt_anchor, ac->path, sizeof (tbl.pfrt_anchor));
kt = pfr_lookup_table(&tbl);
if (kt == NULL) {
- kt = pfr_create_ktable(&tbl, pf_time_second(), 1);
+ kt = pfr_create_ktable(&tbl, pf_calendar_time_second(), 1);
if (kt == NULL)
return (NULL);
if (ac != NULL) {
void
pfr_detach_table(struct pfr_ktable *kt)
{
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
printf("pfr_detach_table: refcount = %d.\n",
union sockaddr_union mask;
int idx = -1, use_counter = 0;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
if (af == AF_INET)
addr = (struct pf_addr *)&pfr_sin.sin_addr;
}
}
-struct pfr_kentry *
+static struct pfr_kentry *
pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
{
struct pfr_walktree w;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
bzero(&w, sizeof (w));
w.pfrw_op = PFRW_POOL_GET;
{
struct pfr_walktree w;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
bzero(&w, sizeof (w));
w.pfrw_op = PFRW_DYNADDR_UPDATE;