/*
- * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
+ * Copyright (c) 2007-2010 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
struct radix_node_head *head;
struct pfr_kentry *ke;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
bzero(&sa, sizeof (sa));
if (ad->pfra_af == AF_INET) {
{
struct pfr_kentry *p;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
SLIST_FOREACH(p, workq, pfrke_workq) {
if (negchange)
struct radix_node *rn;
struct radix_node_head *head;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
bzero(ke->pfrke_node, sizeof (ke->pfrke_node));
if (ke->pfrke_af == AF_INET)
struct radix_node *rn;
struct radix_node_head *head;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
if (ke->pfrke_af == AF_INET)
head = kt->pfrkt_ip4;
struct pfr_walktree *w = arg;
int flags = w->pfrw_flags;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
switch (w->pfrw_op) {
case PFRW_MARK:
pfr_copyout_addr(&as.pfras_a, ke);
+#if !defined(__LP64__)
+ /* Initialized to avoid potential info leak to
+ * userspace */
+ as._pad = 0;
+#endif
bcopy(ke->pfrke_packets, as.pfras_packets,
sizeof (as.pfras_packets));
bcopy(ke->pfrke_bytes, as.pfras_bytes,
struct pfr_ktable *p;
int xdel = 0;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
PFR_FLAG_ALLRSETS);
int i, rv, xadd = 0;
u_int64_t tzero = pf_calendar_time_second();
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
SLIST_INIT(&addq);
for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) {
if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
senderr(EFAULT);
+ pfr_table_copyin_cleanup(&key.pfrkt_t);
if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
flags & PFR_FLAG_USERIOCTL))
senderr(EINVAL);
struct pfr_ktable *p, *q, key;
int i, xdel = 0;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
SLIST_INIT(&workq);
for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) {
if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
return (EFAULT);
+ pfr_table_copyin_cleanup(&key.pfrkt_t);
if (pfr_validate_table(&key.pfrkt_t, 0,
flags & PFR_FLAG_USERIOCTL))
return (EINVAL);
int n, nn;
u_int64_t tzero = pf_calendar_time_second();
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
/* XXX PFR_FLAG_CLSTATS disabled */
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
int i, xzero = 0;
u_int64_t tzero = pf_calendar_time_second();
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
PFR_FLAG_ADDRSTOO);
for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) {
if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
return (EFAULT);
+ pfr_table_copyin_cleanup(&key.pfrkt_t);
if (pfr_validate_table(&key.pfrkt_t, 0, 0))
return (EINVAL);
p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
struct pfr_ktable *p, *q, key;
int i, xchange = 0, xdel = 0;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
if ((setflag & ~PFR_TFLAG_USRMASK) ||
for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) {
if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
return (EFAULT);
+ pfr_table_copyin_cleanup(&key.pfrkt_t);
if (pfr_validate_table(&key.pfrkt_t, 0,
flags & PFR_FLAG_USERIOCTL))
return (EINVAL);
struct pf_ruleset *rs;
int xdel = 0;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
struct pf_ruleset *rs;
int i, rv, xadd = 0, xaddr = 0;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
if (size && !(flags & PFR_FLAG_ADDRSTOO))
return (EBUSY);
tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
SLIST_INIT(&tableq);
- kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
+ kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)(void *)tbl);
if (kt == NULL) {
kt = pfr_create_ktable(tbl, 0, 1);
if (kt == NULL)
struct pf_ruleset *rs;
int xdel = 0;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
rs = pf_find_ruleset(trs->pfrt_anchor);
int xadd = 0, xchange = 0;
u_int64_t tzero = pf_calendar_time_second();
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
rs = pf_find_ruleset(trs->pfrt_anchor);
struct pfr_ktable *shadow = kt->pfrkt_shadow;
int nflags;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
if (shadow->pfrkt_cnt == NO_ADDRESSES) {
if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
pfr_setflags_ktable(kt, nflags);
}
+void
+pfr_table_copyin_cleanup(struct pfr_table *tbl)
+{
+ tbl->pfrt_anchor[sizeof (tbl->pfrt_anchor) - 1] = '\0';
+ tbl->pfrt_name[sizeof (tbl->pfrt_name) - 1] = '\0';
+}
+
static int
pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
{
{
struct pfr_ktable *p;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
SLIST_FOREACH(p, workq, pfrkt_workq)
pfr_insert_ktable(p);
static void
pfr_insert_ktable(struct pfr_ktable *kt)
{
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
pfr_ktable_cnt++;
{
struct pfr_ktable *p, *q;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
for (p = SLIST_FIRST(workq); p; p = q) {
q = SLIST_NEXT(p, pfrkt_workq);
{
struct pfr_kentryworkq addrq;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
if (!(newf & PFR_TFLAG_REFERENCED) &&
!(newf & PFR_TFLAG_PERSIST))
{
struct pfr_ktable *p;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
SLIST_FOREACH(p, workq, pfrkt_workq)
pfr_clstats_ktable(p, tzero, recurse);
{
struct pfr_kentryworkq addrq;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
if (recurse) {
pfr_enqueue_addrs(kt, &addrq, NULL, 0);
struct pfr_ktable *kt;
struct pf_ruleset *rs;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
kt = pool_get(&pfr_ktable_pl, PR_WAITOK);
if (kt == NULL)
{
struct pfr_ktable *p, *q;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
for (p = SLIST_FIRST(workq); p; p = q) {
q = SLIST_NEXT(p, pfrkt_workq);
{
struct pfr_kentryworkq addrq;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
if (flushaddr) {
pfr_enqueue_addrs(kt, &addrq, NULL, 0);
static struct pfr_ktable *
pfr_lookup_table(struct pfr_table *tbl)
{
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
/* struct pfr_ktable start like a struct pfr_table */
return (RB_FIND(pfr_ktablehead, &pfr_ktables,
- (struct pfr_ktable *)tbl));
+ (struct pfr_ktable *)(void *)tbl));
}
int
struct pfr_kentry *ke = NULL;
int match;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
kt = kt->pfrkt_root;
{
struct pfr_kentry *ke = NULL;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
kt = kt->pfrkt_root;
struct pfr_table tbl;
struct pf_anchor *ac = rs->anchor;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
bzero(&tbl, sizeof (tbl));
strlcpy(tbl.pfrt_name, name, sizeof (tbl.pfrt_name));
void
pfr_detach_table(struct pfr_ktable *kt)
{
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
printf("pfr_detach_table: refcount = %d.\n",
union sockaddr_union mask;
int idx = -1, use_counter = 0;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
if (af == AF_INET)
addr = (struct pf_addr *)&pfr_sin.sin_addr;
{
struct pfr_walktree w;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
bzero(&w, sizeof (w));
w.pfrw_op = PFRW_POOL_GET;
{
struct pfr_walktree w;
- lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
bzero(&w, sizeof (w));
w.pfrw_op = PFRW_DYNADDR_UPDATE;