options BOND # # <bond>
options PF # Packet Filter # <pf>
options PF_PKTHDR # PF tag inside mbuf pkthdr # <pf_pkthdr>
+options PKT_PRIORITY # Packet priority support # <pkt_priority>
options PFLOG # PF log interface # <pflog>
options IPDIVERT # Divert sockets (for NAT) # <ipdivert>
options IPFLOW # IP fast forwarding # <ipflow>
options TCP_DROP_SYNFIN # Drop TCP packets with SYN+FIN set # <tcpdrop_synfin>
options ICMP_BANDLIM # ICMP bandwidth limiting sysctl
options IFNET_INPUT_SANITY_CHK # allow dlil/ifnet input sanity check # <ifnet_input_chk>
+options IFNET_ROUTE_REFCNT # count route references to ifnet # <ifnet_route_refcnt>
options SYSV_SEM # SVID semaphores # <sysv_sem>
options SYSV_MSG # SVID messages # <sysv_msg>
options SYSV_SHM # SVID shared mem # <sysv_shm>
options ZLIB # inflate/deflate support # <zlib>
-options IF_BRIDGE # <if_bridge>
makeoptions LIBDRIVER = "libDriver_kern.o" # <libdriver>
makeoptions LIBOBJC = "libkobjc.o" # <kernobjc>
#
options CONFIG_CODE_DECRYPTION # <config_embedded>
+#
+# User Content Protection, used on embedded
+#
+
+options CONFIG_PROTECT # <config_protect>
+
#
# Ethernet (ARP)
OPTIONS/ipfirewall optional ipfirewall
OPTIONS/ipv6firewall optional ipv6firewall
OPTIONS/tcpdebug optional tcpdebug
-OPTIONS/if_bridge optional if_bridge
OPTIONS/faith optional faith
OPTIONS/gif optional gif
OPTIONS/netat optional netat
bsd/net/bpf.c optional bpfilter
bsd/net/bpf_filter.c optional bpfilter
-bsd/net/if_bridge.c optional if_bridge
-bsd/net/bridgestp.c optional if_bridge
bsd/net/bsd_comp.c optional ppp_bsdcomp
bsd/net/if.c optional networking
bsd/net/if_atmsubr.c optional atm
bsd/hfs/hfs_vnops.c optional hfs
bsd/hfs/hfs_xattr.c optional hfs
bsd/hfs/MacOSStubs.c optional hfs
+bsd/hfs/cprotect.c optional hfs
bsd/hfs/rangelist.c optional hfs
bsd/hfs/hfscommon/BTree/BTree.c optional hfs
bsd/hfs/hfscommon/BTree/BTreeAllocate.c optional hfs
munged_rv1 = ((u_int)rv[1]);
break;
case _SYSCALL_RET_OFF_T:
+ case _SYSCALL_RET_UINT64_T:
munged_rv0 = *(u_int64_t *)rv;
munged_rv1 = 0LL;
break;
munged_rv1 = ((u_int)rv[1]);
break;
case _SYSCALL_RET_OFF_T:
+ case _SYSCALL_RET_UINT64_T:
munged_rv0 = *(u_int64_t *)rv;
munged_rv1 = 0LL;
break;
case _SYSCALL_RET_ADDR_T:
case _SYSCALL_RET_SIZE_T:
case _SYSCALL_RET_SSIZE_T:
+ case _SYSCALL_RET_UINT64_T:
regs->rax = *((uint64_t *)(&uthread->uu_rval[0]));
regs->rdx = 0;
break;
case _SYSCALL_RET_ADDR_T:
case _SYSCALL_RET_SIZE_T:
case _SYSCALL_RET_SSIZE_T:
+ case _SYSCALL_RET_UINT64_T:
regs->rax = *((uint64_t *)(&uthread->uu_rval[0]));
regs->rdx = 0;
break;
regs->save_r4 = ((u_int)uthread->uu_rval[1]);
break;
case _SYSCALL_RET_OFF_T:
- /* off_t returns 64 bits split across two registers for 32 bit */
+ case _SYSCALL_RET_UINT64_T:
+ /* return 64 bits split across two registers for 32 bit */
/* process and in one register for 64 bit process */
if (IS_64BIT_PROCESS(proc)) {
u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
regs->save_r4 = ((u_int)uthread->uu_rval[1]);
break;
case _SYSCALL_RET_OFF_T:
- /* off_t returns 64 bits split across two registers for 32 bit */
+ case _SYSCALL_RET_UINT64_T:
+ /* return 64 bits split across two registers for 32 bit */
/* process and in one register for 64 bit process */
if (IS_64BIT_PROCESS(proc)) {
u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
0 /* type */
};
+
+/*
+ WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!
+
+ ANY CODE PROTECTED UNDER "#ifdef __arm__" IS SERIOUSLY SUPPOSED TO BE THERE!
+ IF YOU REMOVE ARM CODE, RANDOM WILL NOT MEAN ANYTHING FOR iPHONES ALL OVER.
+ PLEASE DON'T TOUCH __arm__ CODE IN THIS FILE!
+
+ WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!
+*/
+
+
/* Used to detect whether we've already been initialized */
static int gRandomInstalled = 0;
static PrngRef gPrngRef;
/* lock down the mutex */
lck_mtx_lock(gYarrowMutex);
+
int bytes_remaining = uio_resid(uio);
while (bytes_remaining > 0 && retCode == 0) {
/* get the user's data */
}
lck_mtx_lock(gYarrowMutex);
-
int bytes_read = 0;
int bytes_remaining = numbytes;
read_random(&buf, sizeof (buf));
return (buf);
}
+
--- /dev/null
+/*
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+#include <sys/mount.h>
+#include <sys/vnode_if.h>
+#include <sys/vnode_internal.h>
+
+#include <sys/cprotect.h>
+#include <sys/random.h>
+#include <sys/xattr.h>
+#include <sys/uio_internal.h>
+
+#include "hfs.h"
+#include "hfs_cnode.h"
+
+int cp_key_store_action(int action __unused)
+{
+ return ENOTSUP;
+}
+
+
+int cp_register_wraps(cp_wrap_func_t key_store_func __unused)
+{
+ return ENOTSUP;
+}
+
} else {
buf_brelse(bp); /* note: B-tree code will clear blockPtr->blockHeader and blockPtr->buffer */
}
+
+ /* Don't let anyone else try to use this bp, it's been consumed */
+ blockPtr->blockHeader = NULL;
+
} else {
if (options & kForceWriteBlock) {
if (hfsmp->jnl) {
} else {
retval = VNOP_BWRITE(bp);
}
+
+ /* Don't let anyone else try to use this bp, it's been consumed */
+ blockPtr->blockHeader = NULL;
+
} else if (options & kMarkBlockDirty) {
struct timeval tv;
microuptime(&tv);
buf_clearflags(bp, B_LOCKED);
buf_bawrite(bp);
}
+
+ /* Don't let anyone else try to use this bp, it's been consumed */
+ blockPtr->blockHeader = NULL;
+
} else {
// check if we had previously called journal_modify_block_start()
// on this block and if so, abort it (which will call buf_brelse()).
} else {
buf_brelse(bp); /* note: B-tree code will clear blockPtr->blockHeader and blockPtr->buffer */
}
- };
- };
+
+ /* Don't let anyone else try to use this bp, it's been consumed */
+ blockPtr->blockHeader = NULL;
+ }
+ }
exit:
return (retval);
u_int32_t phys_sectorsize;
daddr64_t prev_alt_sector;
daddr_t bitmapblks;
- int lockflags;
+ int lockflags = 0;
int error;
int64_t oldBitmapSize;
Boolean usedExtendFileC = false;
+ int transaction_begun = 0;
devvp = hfsmp->hfs_devvp;
vcb = HFSTOVCB(hfsmp);
addblks = newblkcnt - vcb->totalBlocks;
printf("hfs_extendfs: growing %s by %d blocks\n", vcb->vcbVN, addblks);
+
+ HFS_MOUNT_LOCK(hfsmp, TRUE);
+ if (hfsmp->hfs_flags & HFS_RESIZE_IN_PROGRESS) {
+ HFS_MOUNT_UNLOCK(hfsmp, TRUE);
+ error = EALREADY;
+ goto out;
+ }
+ hfsmp->hfs_flags |= HFS_RESIZE_IN_PROGRESS;
+ HFS_MOUNT_UNLOCK(hfsmp, TRUE);
+
+ /* Invalidate the current free extent cache */
+ invalidate_free_extent_cache(hfsmp);
+
/*
* Enclose changes inside a transaction.
*/
if (hfs_start_transaction(hfsmp) != 0) {
- return (EINVAL);
+ error = EINVAL;
+ goto out;
}
+ transaction_begun = 1;
/*
* Note: we take the attributes lock in case we have an attribute data vnode
we should reset the allocLimit field. If it changed, it will
get updated; if not, it will remain the same.
*/
+ HFS_MOUNT_LOCK(hfsmp, TRUE);
+ hfsmp->hfs_flags &= ~HFS_RESIZE_IN_PROGRESS;
hfsmp->allocLimit = vcb->totalBlocks;
- hfs_systemfile_unlock(hfsmp, lockflags);
- hfs_end_transaction(hfsmp);
+ HFS_MOUNT_UNLOCK(hfsmp, TRUE);
+ if (lockflags) {
+ hfs_systemfile_unlock(hfsmp, lockflags);
+ }
+ if (transaction_begun) {
+ hfs_end_transaction(hfsmp);
+ }
return (error);
}
goto out;
}
+ /* Invalidate the current free extent cache */
+ invalidate_free_extent_cache(hfsmp);
+
/* Start with a clean journal. */
hfs_journal_flush(hfsmp);
hfs_journal_flush(struct hfsmount *hfsmp)
{
int ret;
-
+
+ /* Only peek at hfsmp->jnl while holding the global lock */
+ lck_rw_lock_shared(&hfsmp->hfs_global_lock);
if (hfsmp->jnl) {
- lck_rw_lock_shared(&hfsmp->hfs_global_lock);
ret = journal_flush(hfsmp->jnl);
- lck_rw_unlock_shared(&hfsmp->hfs_global_lock);
} else {
ret = 0;
}
-
+ lck_rw_unlock_shared(&hfsmp->hfs_global_lock);
+
return ret;
}
}
out:
- if (hfsmp->jnl && user_start) {
+ if (user_start) {
vsunlock(user_start, user_len, TRUE);
}
/* If we didn't do anything then go ahead and dump the hint. */
u_int16_t index;
Boolean validHint;
-
////////////////////////// Priliminary Checks ///////////////////////////////
nodeRec.buffer = nil; // so we can call ReleaseNode
////////////////////////////// Error Exit ///////////////////////////////////
ErrorExit:
-
+
(void) ReleaseNode (btreePtr, &nodeRec);
-
+
iterator->hint.writeCount = 0;
iterator->hint.nodeNum = 0;
iterator->hint.index = 0;
BlockDeallocate
Deallocate a contiguous run of allocation blocks.
+ invalidate_free_extent_cache Invalidate free extent cache for a given volume.
Internal routines:
BlockMarkFree
u_int32_t *actualStartBlock,
u_int32_t *actualNumBlocks);
+static int free_extent_cache_active(
+ ExtendedVCB *vcb);
/*
;________________________________________________________________________________
HFS_UPDATE_NEXT_ALLOCATION(vcb, (vcb->nextAllocation - numBlocks));
}
+ if (free_extent_cache_active(vcb) == 0) {
+ goto skip_cache;
+ }
+
tempWord = vcb->vcbFreeExtCnt;
// Add this free chunk to the free extent list
if (vcb->hfs_flags & HFS_HAS_SPARSE_DEVICE) {
}
}
+skip_cache:
MarkVCBDirty(vcb);
HFS_MOUNT_UNLOCK(vcb, TRUE);
u_int32_t foundBlocks;
u_int32_t newStartBlock, newBlockCount;
- if (vcb->vcbFreeExtCnt == 0 || vcb->vcbFreeExt[0].blockCount == 0)
+ HFS_MOUNT_LOCK(vcb, TRUE);
+ if (free_extent_cache_active(vcb) == 0 ||
+ vcb->vcbFreeExtCnt == 0 ||
+ vcb->vcbFreeExt[0].blockCount == 0) {
+ HFS_MOUNT_UNLOCK(vcb, TRUE);
return dskFulErr;
+ }
+ HFS_MOUNT_UNLOCK(vcb, TRUE);
// Just grab up to maxBlocks of the first (largest) free exent.
*actualStartBlock = vcb->vcbFreeExt[0].startBlock;
if (foundBlocks >= minBlocks)
break; // Found what we needed!
+ HFS_MOUNT_LOCK(vcb, TRUE);
+ if (free_extent_cache_active(vcb) == 0) {
+ HFS_MOUNT_UNLOCK(vcb, TRUE);
+ goto skip_cache;
+ }
+ HFS_MOUNT_UNLOCK(vcb, TRUE);
+
// This free chunk wasn't big enough. Try inserting it into the free extent cache in case
// the allocation wasn't forced contiguous.
really_add = 0;
updated_free_extents = 1;
}
}
-
+skip_cache:
sanity_check_free_ext(vcb, 0);
} while (currentBlock < stopBlock);
return (inuse);
}
+/* Invalidate free extent cache for a given volume.
+ * This cache is invalidated and disabled when a volume is being resized
+ * (via hfs_trucatefs() or hfs_extendefs()).
+ *
+ * Returns: Nothing
+ */
+void invalidate_free_extent_cache(ExtendedVCB *vcb)
+{
+ u_int32_t i;
+ HFS_MOUNT_LOCK(vcb, TRUE);
+ for (i = 0; i < vcb->vcbFreeExtCnt; i++) {
+ vcb->vcbFreeExt[i].startBlock = 0;
+ vcb->vcbFreeExt[i].blockCount = 0;
+ }
+ vcb->vcbFreeExtCnt = 0;
+ HFS_MOUNT_UNLOCK(vcb, TRUE);
+
+ return;
+}
+
+/* Check whether free extent cache is active or not.
+ * This cache is invalidated and disabled when a volume is being resized
+ * (via hfs_trucatefs() or hfs_extendefs()).
+ *
+ * This function assumes that the caller is holding the lock on
+ * the mount point.
+ *
+ * Returns: 0 if the cache is not active,
+ * 1 if the cache is active.
+ */
+static int free_extent_cache_active(ExtendedVCB *vcb)
+{
+ int retval = 1;
+
+ if (vcb->hfs_flags & HFS_RESIZE_IN_PROGRESS) {
+ retval = 0;
+ }
+ return retval;
+}
u_int32_t firstBlock,
u_int32_t numBlocks);
+EXTERN_API_C ( void )
+invalidate_free_extent_cache (ExtendedVCB * vcb);
+
EXTERN_API_C( OSErr )
BlockMarkAllocated(ExtendedVCB *vcb, u_int32_t startingBlock, u_int32_t numBlocks);
bsd_init_kprintf("calling knote_init\n");
knote_init();
-#if CONFIG_EMBEDDED
- /* Initialize kernel memory status notifications */
- bsd_init_kprintf("calling kern_memorystatus_init\n");
- kern_memorystatus_init();
-#endif
-
/* Initialize for async IO */
bsd_init_kprintf("calling aio_init\n");
aio_init();
kernproc->p_fd->fd_cdir = NULL;
kernproc->p_fd->fd_rdir = NULL;
+#if CONFIG_EMBEDDED
+ /* Initialize kernel memory status notifications */
+ bsd_init_kprintf("calling kern_memorystatus_init\n");
+ kern_memorystatus_init();
+#endif
+
#ifdef GPROF
/* Initialize kernel profiling. */
kmstartup();
return funcPtr[0];
}
+extern void IOServicePublishResource( const char * property, boolean_t value );
+extern boolean_t IOServiceWaitForMatchingResource( const char * property, uint64_t timeout );
+extern boolean_t IOCatalogueMatchingDriversPresent( const char * property );
+
static void *
_decmp_get_func(uint32_t type, int offset)
{
return _func_from_offset(type, offset);
}
+ // does IOKit know about a kext that is supposed to provide this type?
+ char providesName[80];
+ snprintf(providesName, sizeof(providesName), "com.apple.AppleFSCompression.providesType%u", type);
+ if (IOCatalogueMatchingDriversPresent(providesName)) {
+ // there is a kext that says it will register for this type, so let's wait for it
+ char resourceName[80];
+ snprintf(resourceName, sizeof(resourceName), "com.apple.AppleFSCompression.Type%u", type);
+ printf("waiting for %s\n", resourceName);
+ while(decompressors[type] == NULL) {
+ lck_rw_done(decompressorsLock); // we have to unlock to allow the kext to register
+ if (IOServiceWaitForMatchingResource(resourceName, 60)) {
+ break;
+ }
+ if (!IOCatalogueMatchingDriversPresent(providesName)) {
+ //
+ printf("the kext with %s is no longer present\n", providesName);
+ break;
+ }
+ printf("still waiting for %s\n", resourceName);
+ lck_rw_lock_shared(decompressorsLock);
+ }
+ // IOKit says the kext is loaded, so it should be registered too!
+ if (decompressors[type] == NULL) {
+ ErrorLog("we found %s, but the type still isn't registered\n", providesName);
+ return NULL;
+ }
+ // it's now registered, so let's return the function
+ return _func_from_offset(type, offset);
+ }
+
// the compressor hasn't registered, so it never will unless someone manually kextloads it
ErrorLog("tried to access a compressed file of unregistered type %d\n", type);
return NULL;
}
-#define decmp_get_func(type, func) _decmp_get_func(type, offsetof_func(func))
+#define decmp_get_func(type, func) ((typeof(((decmpfs_registration*)NULL)->func))_decmp_get_func(type, offsetof_func(func)))
#pragma mark --- utilities ---
errno_t ret = 0;
int locked = 0;
+ char resourceName[80];
if ((compression_type >= CMP_MAX) ||
(!registration) ||
goto out;
}
decompressors[compression_type] = registration;
+ snprintf(resourceName, sizeof(resourceName), "com.apple.AppleFSCompression.Type%u", compression_type);
+ IOServicePublishResource(resourceName, TRUE);
wakeup((caddr_t)&decompressors);
out:
errno_t ret = 0;
int locked = 0;
-
+ char resourceName[80];
+
if ((compression_type >= CMP_MAX) ||
(!registration) ||
(registration->decmpfs_registration != DECMPFS_REGISTRATION_VERSION)) {
goto out;
}
decompressors[compression_type] = NULL;
+ snprintf(resourceName, sizeof(resourceName), "com.apple.AppleFSCompression.Type%u", compression_type);
+ IOServicePublishResource(resourceName, FALSE);
wakeup((caddr_t)&decompressors);
out:
#include <sys/ubc_internal.h>
+#include <kern/ipc_misc.h>
+#include <vm/vm_protos.h>
+
+#include <mach/mach_port.h>
+
+kern_return_t ipc_object_copyin(ipc_space_t, mach_port_name_t,
+ mach_msg_type_name_t, ipc_port_t *);
+void ipc_port_release_send(ipc_port_t);
+
struct psemnode;
struct pshmnode;
void fg_drop(struct fileproc * fp);
void fg_free(struct fileglob *fg);
void fg_ref(struct fileproc * fp);
+#if CONFIG_EMBEDDED
+void fileport_releasefg(struct fileglob *fg);
+#endif /* CONFIG_EMBEDDED */
/* flags for close_internal_locked */
#define FD_DUP2RESV 1
break;
}
+ case F_GETPROTECTIONCLASS: {
+ // stub to make the API work
+ printf("Reached F_GETPROTECTIONCLASS, returning without action\n");
+ error = 0;
+ goto out;
+ }
+
+ case F_SETPROTECTIONCLASS: {
+ // stub to make the API work
+ printf("Reached F_SETPROTECTIONCLASS, returning without action\n");
+ error = 0;
+ goto out;
+ }
+
+
default:
/*
* This is an fcntl() that we d not recognize at this level;
}
+#if CONFIG_EMBEDDED
+/*
+ * fileport_makeport
+ *
+ * Description: Obtain a Mach send right for a given file descriptor.
+ *
+ * Parameters: p Process calling fileport
+ * uap->fd The fd to reference
+ * uap->portnamep User address at which to place port name.
+ *
+ * Returns: 0 Success.
+ * EBADF Bad file descriptor.
+ * EINVAL File descriptor had type that cannot be sent, misc. other errors.
+ * EFAULT Address at which to store port name is not valid.
+ * EAGAIN Resource shortage.
+ *
+ * Implicit returns:
+ * On success, name of send right is stored at user-specified address.
+ */
+int
+fileport_makeport(proc_t p, struct fileport_makeport_args *uap,
+ __unused int *retval)
+{
+ int err;
+ int fd = uap->fd;
+ user_addr_t user_portaddr = uap->portnamep;
+ struct fileproc *fp = FILEPROC_NULL;
+ struct fileglob *fg = NULL;
+ ipc_port_t fileport;
+ mach_port_name_t name = MACH_PORT_NULL;
+
+ err = fp_lookup(p, fd, &fp, 0);
+ if (err != 0) {
+ goto out;
+ }
+
+ if (!filetype_issendable(fp->f_type)) {
+ err = EINVAL;
+ goto out;
+ }
+
+ /* Dropped when port is deallocated */
+ fg = fp->f_fglob;
+ fg_ref(fp);
+
+ /* Allocate and initialize a port */
+ fileport = fileport_alloc(fg);
+ if (fileport == IPC_PORT_NULL) {
+ err = EAGAIN;
+ fg_drop(fp);
+ goto out;
+ }
+
+ /* Add an entry. Deallocates port on failure. */
+ name = ipc_port_copyout_send(fileport, get_task_ipcspace(p->task));
+ if (!MACH_PORT_VALID(name)) {
+ err = EINVAL;
+ goto out;
+ }
+
+ err = copyout(&name, user_portaddr, sizeof(mach_port_name_t));
+ if (err != 0) {
+ goto out;
+ }
+
+ /* Tag the fileglob for debugging purposes */
+ lck_mtx_lock_spin(&fg->fg_lock);
+ fg->fg_lflags |= FG_PORTMADE;
+ lck_mtx_unlock(&fg->fg_lock);
+
+ fp_drop(p, fd, fp, 0);
+
+ return 0;
+
+out:
+ if (MACH_PORT_VALID(name)) {
+ /* Don't care if another thread races us to deallocate the entry */
+ (void) mach_port_deallocate(get_task_ipcspace(p->task), name);
+ }
+
+ if (fp != FILEPROC_NULL) {
+ fp_drop(p, fd, fp, 0);
+ }
+
+ return err;
+}
+
+void
+fileport_releasefg(struct fileglob *fg)
+{
+ (void)closef_locked(NULL, fg, PROC_NULL);
+
+ return;
+}
+
+
+/*
+ * fileport_makefd
+ *
+ * Description: Obtain the file descriptor for a given Mach send right.
+ *
+ * Parameters: p Process calling fileport
+ * uap->port Name of send right to file port.
+ *
+ * Returns: 0 Success
+ * EINVAL Invalid Mach port name, or port is not for a file.
+ * fdalloc:EMFILE
+ * fdalloc:ENOMEM Unable to allocate fileproc or extend file table.
+ *
+ * Implicit returns:
+ * *retval (modified) The new descriptor
+ */
+int
+fileport_makefd(proc_t p, struct fileport_makefd_args *uap, int32_t *retval)
+{
+ struct fileglob *fg;
+ struct fileproc *fp = FILEPROC_NULL;
+ ipc_port_t port = IPC_PORT_NULL;
+ mach_port_name_t send = uap->port;
+ kern_return_t res;
+ int fd;
+ int err;
+
+ res = ipc_object_copyin(get_task_ipcspace(p->task),
+ send, MACH_MSG_TYPE_COPY_SEND, &port);
+
+ if (res != KERN_SUCCESS) {
+ err = EINVAL;
+ goto out;
+ }
+
+ fg = fileport_port_to_fileglob(port);
+ if (fg == NULL) {
+ err = EINVAL;
+ goto out;
+ }
+
+ MALLOC_ZONE(fp, struct fileproc *, sizeof(*fp), M_FILEPROC, M_WAITOK);
+ if (fp == FILEPROC_NULL) {
+ err = ENOMEM;
+ goto out;
+ }
+
+ bzero(fp, sizeof(*fp));
+
+ fp->f_fglob = fg;
+ fg_ref(fp);
+
+ proc_fdlock(p);
+ err = fdalloc(p, 0, &fd);
+ if (err != 0) {
+ proc_fdunlock(p);
+ goto out;
+ }
+
+ procfdtbl_releasefd(p, fd, fp);
+ proc_fdunlock(p);
+
+ *retval = fd;
+ err = 0;
+out:
+ if ((fp != NULL) && (0 != err)) {
+ FREE_ZONE(fp, sizeof(*fp), M_FILEPROC);
+ }
+
+ if (IPC_PORT_NULL != port) {
+ ipc_port_release_send(port);
+ }
+
+ return err;
+}
+#endif /* CONFIG_EMBEDDED */
+
+
/*
* dupfdopen
*
* a setuid exec to be able to access/control the
* task/thread after.
*/
- if (current_task() == p->task) {
- ipc_task_reset(p->task);
- ipc_thread_reset(current_thread());
- }
+ ipc_task_reset(p->task);
+ ipc_thread_reset((imgp->ip_new_thread != NULL) ?
+ imgp->ip_new_thread : current_thread());
/*
* If 'leave_sugid_clear' is non-zero, then we passed the
proc_list_lock();
KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END,
pid, exitval, 0, 0, 0);
- p->p_stat = SZOMB;
/* check for sysctl zomb lookup */
while ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
}
/* safe to use p as this is a system reap */
+ p->p_stat = SZOMB;
p->p_listflag |= P_LIST_WAITING;
+
/*
* This is a named reference and it is not granted
* if the reap is already in progress. So we get
proc_list_unlock();
} else {
proc_list_lock();
- p->p_stat = SZOMB;
/* check for lookups by zomb sysctl */
while ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0);
}
+ p->p_stat = SZOMB;
p->p_listflag |= P_LIST_WAITING;
+
/*
* This is a named reference and it is not granted
* if the reap is already in progress. So we get
int kern_memorystatus_last_level = 0;
unsigned int kern_memorystatus_kev_failure_count = 0;
int kern_memorystatus_level_critical = 5;
+#define kern_memorystatus_level_highwater (kern_memorystatus_level_critical + 5)
static struct {
jetsam_kernel_stats_t stats;
}
static void
-jetsam_mark_pid_in_snapshot(pid_t pid)
+jetsam_mark_pid_in_snapshot(pid_t pid, int flag)
{
int i = 0;
for (i = 0; i < jetsam_snapshot_list_count; i++) {
if (jetsam_snapshot_list[i].pid == pid) {
- jetsam_snapshot_list[i].flags |= kJetsamFlagsKilled;
+ jetsam_snapshot_list[i].flags |= flag;
return;
}
}
}
-static int
+int
jetsam_kill_top_proc(void)
{
proc_t p;
continue; // with lock held
}
lck_mtx_unlock(jetsam_list_mlock);
- jetsam_mark_pid_in_snapshot(aPid);
+ jetsam_mark_pid_in_snapshot(aPid, kJetsamFlagsKilled);
p = proc_find(aPid);
if (p != NULL) {
-#if DEBUG
- printf("jetsam: killing pid %d [%s] - memory_status_level: %d - ", aPid, p->p_comm, kern_memorystatus_level);
-#endif /* DEBUG */
+ printf("jetsam: killing pid %d [%s] - memory_status_level: %d - ",
+ aPid, (p->p_comm ? p->p_comm : "(unknown)"), kern_memorystatus_level);
exit1(p, W_EXITCODE(0, SIGKILL), (int *)NULL);
proc_rele(p);
#if DEBUG
return -1;
}
+static int
+jetsam_kill_hiwat_proc(void)
+{
+ proc_t p;
+ int i;
+ if (jetsam_snapshot_list_count == 0) {
+ jetsam_snapshot_procs();
+ }
+ lck_mtx_lock(jetsam_list_mlock);
+ for (i = jetsam_priority_list_index; i < jetsam_priority_list_count; i++) {
+ pid_t aPid;
+ int32_t hiwat;
+ aPid = jetsam_priority_list[i].pid;
+ hiwat = jetsam_priority_list[i].hiwat_pages;
+ /* skip empty or non-hiwat slots in the list */
+ if (aPid == 0 || (hiwat < 0)) {
+ continue; // with lock held
+ }
+ lck_mtx_unlock(jetsam_list_mlock);
+ p = proc_find(aPid);
+ if (p != NULL) {
+ int32_t pages = (int32_t)jetsam_task_page_count(p->task);
+ if (pages > hiwat) {
+#if DEBUG
+ printf("jetsam: killing pid %d [%s] - %d pages > hiwat (%d)\n", aPid, p->p_comm, pages, hiwat);
+#endif /* DEBUG */
+ exit1(p, W_EXITCODE(0, SIGKILL), (int *)NULL);
+ proc_rele(p);
+#if DEBUG
+ printf("jetsam: pid %d killed - memory_status_level: %d\n", aPid, kern_memorystatus_level);
+#endif /* DEBUG */
+ jetsam_mark_pid_in_snapshot(aPid, kJetsamFlagsKilledHiwat);
+ jetsam_priority_list[i].pid = 0;
+ return 0;
+ } else {
+ proc_rele(p);
+ }
+
+ }
+ lck_mtx_lock(jetsam_list_mlock);
+ }
+ lck_mtx_unlock(jetsam_list_mlock);
+ return -1;
+}
+
static void
kern_memorystatus_thread(void)
{
}
}
+ while (kern_memorystatus_level <= kern_memorystatus_level_highwater) {
+ if (jetsam_kill_hiwat_proc() < 0) {
+ break;
+ }
+ }
+
kern_memorystatus_last_level = kern_memorystatus_level;
ev_msg.vendor_code = KEV_VENDOR_APPLE;
int sse4_2_flag = -1;
int x86_64_flag = -1;
int supplementalsse3_flag = -1;
+int aes_flag = -1;
SYSCTL_INT(_hw_optional, OID_AUTO, mmx, CTLFLAG_RD | CTLFLAG_KERN, &mmx_flag, 0, "");
SYSCTL_INT(_hw_optional, OID_AUTO, sse, CTLFLAG_RD | CTLFLAG_KERN, &sse_flag, 0, "");
/* "x86_64" is actually a preprocessor symbol on the x86_64 kernel, so we have to hack this */
#undef x86_64
SYSCTL_INT(_hw_optional, OID_AUTO, x86_64, CTLFLAG_RD | CTLFLAG_KERN, &x86_64_flag, 0, "");
+SYSCTL_INT(_hw_optional, OID_AUTO, aes, CTLFLAG_RD | CTLFLAG_KERN, &aes_flag, 0, "");
#endif /* __ppc__ */
/*
sse4_1_flag = ((_get_cpu_capabilities() & kHasSSE4_1) == kHasSSE4_1)? 1 : 0;
sse4_2_flag = ((_get_cpu_capabilities() & kHasSSE4_2) == kHasSSE4_2)? 1 : 0;
x86_64_flag = ((_get_cpu_capabilities() & k64Bit) == k64Bit)? 1 : 0;
+ aes_flag = ((_get_cpu_capabilities() & kHasAES) == kHasAES)? 1 : 0;
/* hw.cpufamily */
cpufamily = cpuid_cpufamily();
#include <kern/task.h>
#include <kern/clock.h> /* for absolutetime_to_microtime() */
-#include <netinet/in.h> /* for TRAFFIC_MGT_SO_BACKGROUND */
+#include <netinet/in.h> /* for TRAFFIC_MGT_SO_* */
#include <sys/socketvar.h> /* for struct socket */
#include <vm/vm_map.h>
int donice(struct proc *curp, struct proc *chgp, int n);
int dosetrlimit(struct proc *p, u_int which, struct rlimit *limp);
-static void do_background_socket(struct proc *curp, thread_t thread, int priority);
+int uthread_get_background_state(uthread_t);
+static void do_background_socket(struct proc *p, thread_t thread, int priority);
static int do_background_thread(struct proc *curp, int priority);
static int do_background_task(struct proc *curp, int priority);
ut = get_bsdthread_info(thread);
low = 0;
- if ( (ut->uu_flag & UT_BACKGROUND) != 0 ) {
+ if ( (ut->uu_flag & UT_BACKGROUND_TRAFFIC_MGT) != 0 ) {
low = 1;
}
break;
error = do_background_task(p, uap->prio);
(void) do_background_socket(p, NULL, uap->prio);
- proc_lock(p);
- p->p_iopol_disk = (uap->prio == PRIO_DARWIN_BG ?
- IOPOL_THROTTLE : IOPOL_DEFAULT);
- proc_unlock(p);
-
found++;
if (refheld != 0)
proc_rele(p);
int error = 0;
task_category_policy_data_t info;
+ /* set the max scheduling priority on the task */
if (priority & PRIO_DARWIN_BG) {
info.role = TASK_THROTTLE_APPLICATION;
} else {
TASK_CATEGORY_POLICY,
(task_policy_t) &info,
TASK_CATEGORY_POLICY_COUNT);
+
+ if (error)
+ goto out;
+
+ proc_lock(p);
+
+ /* mark proc structure as backgrounded */
+ if (priority & PRIO_DARWIN_BG) {
+ p->p_lflag |= P_LBACKGROUND;
+ } else {
+ p->p_lflag &= ~P_LBACKGROUND;
+ }
+
+ /* set or reset the disk I/O priority */
+ p->p_iopol_disk = (priority == PRIO_DARWIN_BG ?
+ IOPOL_THROTTLE : IOPOL_DEFAULT);
+
+ proc_unlock(p);
+
+out:
return (error);
}
static void
-do_background_socket(struct proc *curp, thread_t thread, int priority)
+do_background_socket(struct proc *p, thread_t thread, int priority)
{
struct filedesc *fdp;
struct fileproc *fp;
int i;
if (priority & PRIO_DARWIN_BG) {
- /* enable network throttle process-wide (if no thread is specified) */
+ /*
+ * For PRIO_DARWIN_PROCESS (thread is NULL), simply mark
+ * the sockets with the background flag. There's nothing
+ * to do here for the PRIO_DARWIN_THREAD case.
+ */
if (thread == NULL) {
- proc_fdlock(curp);
- fdp = curp->p_fd;
+ proc_fdlock(p);
+ fdp = p->p_fd;
for (i = 0; i < fdp->fd_nfiles; i++) {
struct socket *sockp;
continue;
}
sockp = (struct socket *)fp->f_fglob->fg_data;
- sockp->so_traffic_mgt_flags |= TRAFFIC_MGT_SO_BACKGROUND;
+ socket_set_traffic_mgt_flags(sockp, TRAFFIC_MGT_SO_BACKGROUND);
sockp->so_background_thread = NULL;
}
- proc_fdunlock(curp);
+ proc_fdunlock(p);
}
} else {
+ u_int32_t traffic_mgt;
+ /*
+ * See comments on do_background_thread(). Deregulate network
+ * traffics only for setpriority(PRIO_DARWIN_THREAD).
+ */
+ traffic_mgt = (thread == NULL) ? 0 : TRAFFIC_MGT_SO_BG_REGULATE;
+
/* disable networking IO throttle.
* NOTE - It is a known limitation of the current design that we
* could potentially clear TRAFFIC_MGT_SO_BACKGROUND bit for
* sockets created by other threads within this process.
*/
- proc_fdlock(curp);
- fdp = curp->p_fd;
+ proc_fdlock(p);
+ fdp = p->p_fd;
for ( i = 0; i < fdp->fd_nfiles; i++ ) {
struct socket *sockp;
if ((thread) && (sockp->so_background_thread != thread)) {
continue;
}
- sockp->so_traffic_mgt_flags &= ~TRAFFIC_MGT_SO_BACKGROUND;
+ socket_clear_traffic_mgt_flags(sockp, TRAFFIC_MGT_SO_BACKGROUND | traffic_mgt);
sockp->so_background_thread = NULL;
}
- proc_fdunlock(curp);
+ proc_fdunlock(p);
}
}
* do_background_thread
* Returns: 0 Success
* XXX - todo - does this need a MACF hook?
+ *
+ * NOTE: To maintain binary compatibility with PRIO_DARWIN_THREAD with respect
+ * to network traffic management, UT_BACKGROUND_TRAFFIC_MGT is set/cleared
+ * along with UT_BACKGROUND flag, as the latter alone no longer implies
+ * any form of traffic regulation (it simply means that the thread is
+ * background.) With PRIO_DARWIN_PROCESS, any form of network traffic
+ * management must be explicitly requested via whatever means appropriate,
+ * and only TRAFFIC_MGT_SO_BACKGROUND is set via do_background_socket().
*/
static int
do_background_thread(struct proc *curp __unused, int priority)
return(0);
}
- /* clear background bit in thread and disable disk IO throttle */
- ut->uu_flag &= ~UT_BACKGROUND;
+ /*
+ * Clear background bit in thread and disable disk IO
+ * throttle as well as network traffic management.
+ * The corresponding socket flags for sockets created by
+ * this thread will be cleared in do_background_socket().
+ */
+ ut->uu_flag &= ~(UT_BACKGROUND | UT_BACKGROUND_TRAFFIC_MGT);
ut->uu_iopol_disk = IOPOL_NORMAL;
/* reset thread priority (we did not save previous value) */
return(0);
}
- /* tag thread as background and throttle disk IO */
- ut->uu_flag |= UT_BACKGROUND;
+ /*
+ * Tag thread as background and throttle disk IO, as well
+ * as regulate network traffics. Future sockets created
+ * by this thread will have their corresponding socket
+ * flags set at socket create time.
+ */
+ ut->uu_flag |= (UT_BACKGROUND | UT_BACKGROUND_TRAFFIC_MGT);
ut->uu_iopol_disk = IOPOL_THROTTLE;
policy.importance = INT_MIN;
thread_policy_set( thread, THREAD_PRECEDENCE_POLICY,
(thread_policy_t)&policy,
THREAD_PRECEDENCE_POLICY_COUNT );
-
+
/* throttle networking IO happens in socket( ) syscall.
- * If UT_BACKGROUND is set in the current thread then
- * TRAFFIC_MGT_SO_BACKGROUND socket option is set.
+ * If UT_{BACKGROUND,BACKGROUND_TRAFFIC_MGT} is set in the current
+ * thread then TRAFFIC_MGT_SO_{BACKGROUND,BG_REGULATE} is set.
+ * Existing sockets are taken care of by do_background_socket().
*/
return(0);
}
+/*
+ * If the thread or its proc has been put into the background
+ * with setpriority(PRIO_DARWIN_{THREAD,PROCESS}, *, PRIO_DARWIN_BG),
+ * report that status.
+ *
+ * Returns: PRIO_DARWIN_BG if background
+ * 0 if foreground
+ */
+int
+uthread_get_background_state(uthread_t uth)
+{
+ proc_t p = uth->uu_proc;
+ if (p && (p->p_lflag & P_LBACKGROUND))
+ return PRIO_DARWIN_BG;
+
+ if (uth->uu_flag & UT_BACKGROUND)
+ return PRIO_DARWIN_BG;
+
+ return 0;
+}
/*
* Returns: 0 Success
int policy;
struct uthread *ut;
- policy = current_proc()->p_iopol_disk;
-
ut = get_bsdthread_info(current_thread());
- if (ut->uu_iopol_disk != IOPOL_DEFAULT)
- policy = ut->uu_iopol_disk;
+ if(ut){
+ policy = current_proc()->p_iopol_disk;
- if (policy == IOPOL_THROTTLE)
- return TRUE;
+ if (ut->uu_iopol_disk != IOPOL_DEFAULT)
+ policy = ut->uu_iopol_disk;
+ if (policy == IOPOL_THROTTLE)
+ return TRUE;
+ }
return FALSE;
}
{
return (_MHLEN);
}
+
+mbuf_priority_t
+mbuf_get_priority(struct mbuf *m)
+{
+#if !PKT_PRIORITY
+#pragma unused(m)
+ return (MBUF_PRIORITY_NORMAL);
+#else /* PKT_PRIORITY */
+ mbuf_priority_t prio = MBUF_PRIORITY_NORMAL;
+
+ if (m == NULL || !(m->m_flags & M_PKTHDR))
+ return (prio);
+
+ /* Defaults to normal; ignore anything else but background */
+ if (m->m_pkthdr.prio == MBUF_PRIORITY_BACKGROUND)
+ prio = MBUF_PRIORITY_BACKGROUND;
+
+ return (prio);
+#endif /* PKT_PRIORITY */
+}
#include <sys/uio_internal.h>
#include <kern/lock.h>
#include <netinet/in.h>
+#include <libkern/OSAtomic.h>
extern int soclose_locked(struct socket *so);
extern void soclose_wait_locked(struct socket *so);
{
return (sock->so_head);
}
+
+/*
+ * Caller must have ensured socket is valid and won't be going away.
+ */
+void
+socket_set_traffic_mgt_flags(socket_t sock, u_int32_t flags)
+{
+ (void) OSBitOrAtomic(flags, &sock->so_traffic_mgt_flags);
+}
+
+/*
+ * Caller must have ensured socket is valid and won't be going away.
+ */
+void
+socket_clear_traffic_mgt_flags(socket_t sock, u_int32_t flags)
+{
+ (void) OSBitAndAtomic(~flags, &sock->so_traffic_mgt_flags);
+}
else if (returntype == "uint32_t") {
munge_ret = "_SYSCALL_RET_UINT_T"
}
+ else if (returntype == "uint64_t") {
+ munge_ret = "_SYSCALL_RET_UINT64_T"
+ }
else if (returntype == "off_t") {
munge_ret = "_SYSCALL_RET_OFF_T"
}
return(error);
}
-int thread_selfid(__unused struct proc *p, __unused struct thread_selfid_args *uap, user_addr_t *retval)
+int thread_selfid(__unused struct proc *p, __unused struct thread_selfid_args *uap, uint64_t *retval)
{
- thread_t thread = current_thread();
- uint64_t thread_id = thread_tid(thread);
- *retval = thread_id;
+ thread_t thread = current_thread();
+ *retval = thread_tid(thread);
return KERN_SUCCESS;
}
370 AUE_NULL ALL { int nosys(void); } { old __semwait_signal }
371 AUE_NULL ALL { int nosys(void); } { old __semwait_signal }
#endif
-372 AUE_NULL ALL { user_addr_t thread_selfid (void) NO_SYSCALL_STUB; }
+372 AUE_NULL ALL { uint64_t thread_selfid (void) NO_SYSCALL_STUB; }
373 AUE_NULL ALL { int nosys(void); }
374 AUE_NULL ALL { int nosys(void); }
375 AUE_NULL ALL { int nosys(void); }
427 AUE_FSGETPATH ALL { user_ssize_t fsgetpath(user_addr_t buf, size_t bufsize, user_addr_t fsid, uint64_t objid) NO_SYSCALL_STUB; } { private fsgetpath (File Manager SPI) }
428 AUE_NULL ALL { mach_port_name_t audit_session_self(void); }
429 AUE_NULL ALL { int audit_session_join(mach_port_name_t port); }
+430 AUE_NULL ALL { int pid_suspend(int pid); }
+431 AUE_NULL ALL { int pid_resume(int pid); }
+#if CONFIG_EMBEDDED
+432 AUE_NULL ALL { int fileport_makeport(int fd, user_addr_t portnamep); }
+433 AUE_NULL ALL { int fileport_makefd(mach_port_name_t port); }
+#else
+432 AUE_NULL ALL { int nosys(void); }
+433 AUE_NULL ALL { int nosys(void); }
+#endif
/*
- * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 1998-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
for (pr = dp->dom_protosw; pr; pr = pr->pr_next) {
if (pr->pr_slowtimo)
(*pr->pr_slowtimo)();
- if (do_reclaim && pr->pr_drain)
+ if ((do_reclaim || (pr->pr_flags & PR_AGGDRAIN)) &&
+ pr->pr_drain)
(*pr->pr_drain)();
}
do_reclaim = 0;
lck_mtx_unlock(domain_proto_mtx);
timeout(pfslowtimo, NULL, hz/PR_SLOWHZ);
-
}
void
(m)->m_pkthdr.vlan_tag = 0; \
(m)->m_pkthdr.socket_id = 0; \
m_tag_init(m); \
+ m_prio_init(m); \
} \
}
m_tag_delete_chain(to, NULL);
to->m_pkthdr = from->m_pkthdr; /* especially tags */
m_tag_init(from); /* purge tags from src */
+ m_prio_init(from); /* reset priority from src */
to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
if ((to->m_flags & M_EXT) == 0)
to->m_data = to->m_pktdat;
{
return SLIST_NEXT(t, m_tag_link);
}
+
+void
+m_prio_init(struct mbuf *m)
+{
+#if !PKT_PRIORITY
+#pragma unused(m)
+#else /* PKT_PRIORITY */
+ if (m->m_flags & M_PKTHDR)
+ m->m_pkthdr.prio = MBUF_PRIORITY_NORMAL;
+#endif /* PKT_PRIORITY */
+}
+
+void
+m_prio_background(struct mbuf *m)
+{
+#if !PKT_PRIORITY
+#pragma unused(m)
+#else /* PKT_PRIORITY */
+ if (m->m_flags & M_PKTHDR)
+ m->m_pkthdr.prio = MBUF_PRIORITY_BACKGROUND;
+#endif /* PKT_PRIORITY */
+}
#include <sys/ev.h>
#include <sys/kdebug.h>
#include <sys/un.h>
+#include <sys/user.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/in_pcb.h>
extern int soclose_locked(struct socket *);
extern int soo_kqfilter(struct fileproc *, struct knote *, struct proc *);
+extern int uthread_get_background_state(uthread_t);
+
#ifdef __APPLE__
vm_size_t so_cache_zone_element_size;
register struct protosw *prp;
register struct socket *so;
register int error = 0;
+ thread_t thread;
+ struct uthread *ut;
+
#if TCPDEBUG
extern int tcpconsdebug;
#endif
so->so_options |= SO_DEBUG;
#endif
#endif
+ /*
+ * If this is a background thread/task, mark the socket as such.
+ */
+ thread = current_thread();
+ ut = get_bsdthread_info(thread);
+ if (uthread_get_background_state(ut)) {
+ socket_set_traffic_mgt_flags(so, TRAFFIC_MGT_SO_BACKGROUND);
+ so->so_background_thread = thread;
+ /*
+ * In case setpriority(PRIO_DARWIN_THREAD) was called
+ * on this thread, regulate network (TCP) traffics.
+ */
+ if (ut->uu_flag & UT_BACKGROUND_TRAFFIC_MGT) {
+ socket_set_traffic_mgt_flags(so,
+ TRAFFIC_MGT_SO_BG_REGULATE);
+ }
+ }
+
*aso = so;
return (0);
}
mac_socket_label_associate_accept(head, so);
#endif
+ /* inherit traffic management properties of listener */
+ so->so_traffic_mgt_flags = head->so_traffic_mgt_flags &
+ (TRAFFIC_MGT_SO_BACKGROUND | TRAFFIC_MGT_SO_BG_REGULATE);
+ so->so_background_thread = head->so_background_thread;
+
if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat)) {
sflt_termsock(so);
sodealloc(so);
xsb->sb_timeo = 1;
}
+int
+soisbackground(struct socket *so)
+{
+ return (so->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BACKGROUND);
+}
+
/*
* Here is the definition of some of the basic objects in the kern.ipc
* branch of the MIB.
if (error) {
fp_free(p, fd, fp);
} else {
- thread_t thread;
- struct uthread *ut;
-
- thread = current_thread();
- ut = get_bsdthread_info(thread);
-
- /* if this is a backgrounded thread then throttle all new sockets */
- if ( (ut->uu_flag & UT_BACKGROUND) != 0 ) {
- so->so_traffic_mgt_flags |= TRAFFIC_MGT_SO_BACKGROUND;
- so->so_background_thread = thread;
- }
fp->f_data = (caddr_t)so;
proc_fdlock(p);
bpf.h dlil.h \
ethernet.h if.h if_arp.h \
if_dl.h if_llc.h if_media.h if_mib.h \
- if_types.h if_var.h \
+ if_types.h if_utun.h if_var.h \
kext_net.h ndrv.h pfkeyv2.h \
route.h
+++ /dev/null
-/*
- * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
- *
- * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. The rights granted to you under the License
- * may not be used to create, or enable the creation or redistribution of,
- * unlawful or unlicensed copies of an Apple operating system, or to
- * circumvent, violate, or enable the circumvention or violation of, any
- * terms of an Apple operating system software license agreement.
- *
- * Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
- */
-
-/* $fpwf: Revision 1.2 2007/05/17 03:38:46 rnewberry Exp $ */
-/* $NetBSD: bridgestp.c,v 1.10 2006/11/16 01:33:40 christos Exp $ */
-
-/*
- * Copyright (c) 2000 Jason L. Wright (jason@thought.net)
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by Jason L. Wright
- * 4. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * OpenBSD: bridgestp.c,v 1.5 2001/03/22 03:48:29 jason Exp
- */
-
-/*
- * Implementation of the spanning tree protocol as defined in
- * ISO/IEC Final DIS 15802-3 (IEEE P802.1D/D17), May 25, 1998.
- * (In English: IEEE 802.1D, Draft 17, 1998)
- */
-
-/* $NetBSD: if_bridgevar.h,v 1.8 2005/12/10 23:21:38 elad Exp $ */
-
-#include <sys/cdefs.h>
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/mbuf.h>
-#include <sys/socket.h>
-#include <sys/ioctl.h>
-#include <sys/kernel.h>
-#include <sys/callout.h>
-
-#include <net/if.h>
-#include <net/if_dl.h>
-#include <net/if_types.h>
-#include <net/if_llc.h>
-
-#include <net/if_ether.h>
-#include <net/if_bridgevar.h>
-#include <net/if_media.h>
-
-#include <net/kpi_interface.h>
-
-/* BPDU message types */
-#define BSTP_MSGTYPE_CFG 0x00 /* Configuration */
-#define BSTP_MSGTYPE_TCN 0x80 /* Topology chg notification */
-
-/* BPDU flags */
-#define BSTP_FLAG_TC 0x01 /* Topology change */
-#define BSTP_FLAG_TCA 0x80 /* Topology change ack */
-
-#define BSTP_MESSAGE_AGE_INCR (1 * 256) /* in 256ths of a second */
-#define BSTP_TICK_VAL (1 * 256) /* in 256ths of a second */
-
-/*
- * Because BPDU's do not make nicely aligned structures, two different
- * declarations are used: bstp_?bpdu (wire representation, packed) and
- * bstp_*_unit (internal, nicely aligned version).
- */
-
-/* configuration bridge protocol data unit */
-struct bstp_cbpdu {
- uint8_t cbu_dsap; /* LLC: destination sap */
- uint8_t cbu_ssap; /* LLC: source sap */
- uint8_t cbu_ctl; /* LLC: control */
- uint16_t cbu_protoid; /* protocol id */
- uint8_t cbu_protover; /* protocol version */
- uint8_t cbu_bpdutype; /* message type */
- uint8_t cbu_flags; /* flags (below) */
-
- /* root id */
- uint16_t cbu_rootpri; /* root priority */
- uint8_t cbu_rootaddr[6]; /* root address */
-
- uint32_t cbu_rootpathcost; /* root path cost */
-
- /* bridge id */
- uint16_t cbu_bridgepri; /* bridge priority */
- uint8_t cbu_bridgeaddr[6]; /* bridge address */
-
- uint16_t cbu_portid; /* port id */
- uint16_t cbu_messageage; /* current message age */
- uint16_t cbu_maxage; /* maximum age */
- uint16_t cbu_hellotime; /* hello time */
- uint16_t cbu_forwarddelay; /* forwarding delay */
-} __attribute__((__packed__));
-
-/* topology change notification bridge protocol data unit */
-struct bstp_tbpdu {
- uint8_t tbu_dsap; /* LLC: destination sap */
- uint8_t tbu_ssap; /* LLC: source sap */
- uint8_t tbu_ctl; /* LLC: control */
- uint16_t tbu_protoid; /* protocol id */
- uint8_t tbu_protover; /* protocol version */
- uint8_t tbu_bpdutype; /* message type */
-} __attribute__((__packed__));
-
-const uint8_t bstp_etheraddr[] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
-
-void bstp_initialize_port(struct bridge_softc *, struct bridge_iflist *);
-void bstp_ifupdstatus(struct bridge_softc *, struct bridge_iflist *);
-void bstp_enable_port(struct bridge_softc *, struct bridge_iflist *);
-void bstp_disable_port(struct bridge_softc *, struct bridge_iflist *);
-void bstp_enable_change_detection(struct bridge_iflist *);
-void bstp_disable_change_detection(struct bridge_iflist *);
-int bstp_root_bridge(struct bridge_softc *sc);
-int bstp_supersedes_port_info(struct bridge_softc *,
- struct bridge_iflist *, struct bstp_config_unit *);
-int bstp_designated_port(struct bridge_softc *, struct bridge_iflist *);
-int bstp_designated_for_some_port(struct bridge_softc *);
-void bstp_transmit_config(struct bridge_softc *, struct bridge_iflist *);
-void bstp_transmit_tcn(struct bridge_softc *);
-void bstp_received_config_bpdu(struct bridge_softc *,
- struct bridge_iflist *, struct bstp_config_unit *);
-void bstp_received_tcn_bpdu(struct bridge_softc *, struct bridge_iflist *,
- struct bstp_tcn_unit *);
-void bstp_record_config_information(struct bridge_softc *,
- struct bridge_iflist *, struct bstp_config_unit *);
-void bstp_record_config_timeout_values(struct bridge_softc *,
- struct bstp_config_unit *);
-void bstp_config_bpdu_generation(struct bridge_softc *);
-void bstp_send_config_bpdu(struct bridge_softc *, struct bridge_iflist *,
- struct bstp_config_unit *);
-void bstp_configuration_update(struct bridge_softc *);
-void bstp_root_selection(struct bridge_softc *);
-void bstp_designated_port_selection(struct bridge_softc *);
-void bstp_become_designated_port(struct bridge_softc *,
- struct bridge_iflist *);
-void bstp_port_state_selection(struct bridge_softc *);
-void bstp_make_forwarding(struct bridge_softc *, struct bridge_iflist *);
-void bstp_make_blocking(struct bridge_softc *, struct bridge_iflist *);
-void bstp_set_port_state(struct bridge_iflist *, uint8_t);
-void bstp_set_bridge_priority(struct bridge_softc *, uint64_t);
-void bstp_set_port_priority(struct bridge_softc *, struct bridge_iflist *,
- uint16_t);
-void bstp_set_path_cost(struct bridge_softc *, struct bridge_iflist *,
- uint32_t);
-void bstp_topology_change_detection(struct bridge_softc *);
-void bstp_topology_change_acknowledged(struct bridge_softc *);
-void bstp_acknowledge_topology_change(struct bridge_softc *,
- struct bridge_iflist *);
-
-void bstp_tick(void *);
-void bstp_timer_start(struct bridge_timer *, uint16_t);
-void bstp_timer_stop(struct bridge_timer *);
-int bstp_timer_expired(struct bridge_timer *, uint16_t);
-
-void bstp_hold_timer_expiry(struct bridge_softc *, struct bridge_iflist *);
-void bstp_message_age_timer_expiry(struct bridge_softc *,
- struct bridge_iflist *);
-void bstp_forward_delay_timer_expiry(struct bridge_softc *,
- struct bridge_iflist *);
-void bstp_topology_change_timer_expiry(struct bridge_softc *);
-void bstp_tcn_timer_expiry(struct bridge_softc *);
-void bstp_hello_timer_expiry(struct bridge_softc *);
-
-void
-bstp_transmit_config(struct bridge_softc *sc, struct bridge_iflist *bif)
-{
- if (bif->bif_hold_timer.active) {
- bif->bif_config_pending = 1;
- return;
- }
-
- bif->bif_config_bpdu.cu_message_type = BSTP_MSGTYPE_CFG;
- bif->bif_config_bpdu.cu_rootid = sc->sc_designated_root;
- bif->bif_config_bpdu.cu_root_path_cost = sc->sc_root_path_cost;
- bif->bif_config_bpdu.cu_bridge_id = sc->sc_bridge_id;
- bif->bif_config_bpdu.cu_port_id = bif->bif_port_id;
-
- if (bstp_root_bridge(sc))
- bif->bif_config_bpdu.cu_message_age = 0;
- else
- bif->bif_config_bpdu.cu_message_age =
- sc->sc_root_port->bif_message_age_timer.value +
- BSTP_MESSAGE_AGE_INCR;
-
- bif->bif_config_bpdu.cu_max_age = sc->sc_max_age;
- bif->bif_config_bpdu.cu_hello_time = sc->sc_hello_time;
- bif->bif_config_bpdu.cu_forward_delay = sc->sc_forward_delay;
- bif->bif_config_bpdu.cu_topology_change_acknowledgment
- = bif->bif_topology_change_acknowledge;
- bif->bif_config_bpdu.cu_topology_change = sc->sc_topology_change;
-
- if (bif->bif_config_bpdu.cu_message_age < sc->sc_max_age) {
- bif->bif_topology_change_acknowledge = 0;
- bif->bif_config_pending = 0;
- bstp_send_config_bpdu(sc, bif, &bif->bif_config_bpdu);
- bstp_timer_start(&bif->bif_hold_timer, 0);
- }
-}
-
-void
-bstp_send_config_bpdu(struct bridge_softc *sc, struct bridge_iflist *bif,
- struct bstp_config_unit *cu)
-{
- struct ifnet *ifp;
- struct mbuf *m;
- struct ether_header *eh;
- struct bstp_cbpdu bpdu;
-
- ifp = bif->bif_ifp;
-
- if ((ifp->if_flags & IFF_RUNNING) == 0)
- return;
-
- MGETHDR(m, M_DONTWAIT, MT_DATA);
- if (m == NULL)
- return;
-
- eh = mtod(m, struct ether_header *);
-
- m->m_pkthdr.rcvif = ifp;
- m->m_pkthdr.len = sizeof(*eh) + sizeof(bpdu);
- m->m_len = m->m_pkthdr.len;
-
- bpdu.cbu_ssap = bpdu.cbu_dsap = LLC_8021D_LSAP;
- bpdu.cbu_ctl = LLC_UI;
- bpdu.cbu_protoid = htons(0);
- bpdu.cbu_protover = 0;
- bpdu.cbu_bpdutype = cu->cu_message_type;
- bpdu.cbu_flags = (cu->cu_topology_change ? BSTP_FLAG_TC : 0) |
- (cu->cu_topology_change_acknowledgment ? BSTP_FLAG_TCA : 0);
-
- bpdu.cbu_rootpri = htons(cu->cu_rootid >> 48);
- bpdu.cbu_rootaddr[0] = cu->cu_rootid >> 40;
- bpdu.cbu_rootaddr[1] = cu->cu_rootid >> 32;
- bpdu.cbu_rootaddr[2] = cu->cu_rootid >> 24;
- bpdu.cbu_rootaddr[3] = cu->cu_rootid >> 16;
- bpdu.cbu_rootaddr[4] = cu->cu_rootid >> 8;
- bpdu.cbu_rootaddr[5] = cu->cu_rootid >> 0;
-
- bpdu.cbu_rootpathcost = htonl(cu->cu_root_path_cost);
-
- bpdu.cbu_bridgepri = htons(cu->cu_rootid >> 48);
- bpdu.cbu_bridgeaddr[0] = cu->cu_rootid >> 40;
- bpdu.cbu_bridgeaddr[1] = cu->cu_rootid >> 32;
- bpdu.cbu_bridgeaddr[2] = cu->cu_rootid >> 24;
- bpdu.cbu_bridgeaddr[3] = cu->cu_rootid >> 16;
- bpdu.cbu_bridgeaddr[4] = cu->cu_rootid >> 8;
- bpdu.cbu_bridgeaddr[5] = cu->cu_rootid >> 0;
-
- bpdu.cbu_portid = htons(cu->cu_port_id);
- bpdu.cbu_messageage = htons(cu->cu_message_age);
- bpdu.cbu_maxage = htons(cu->cu_max_age);
- bpdu.cbu_hellotime = htons(cu->cu_hello_time);
- bpdu.cbu_forwarddelay = htons(cu->cu_forward_delay);
-
- memcpy(eh->ether_shost, ifnet_lladdr(ifp), ETHER_ADDR_LEN);
- memcpy(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN);
- eh->ether_type = htons(sizeof(bpdu));
-
- memcpy(mtod(m, caddr_t) + sizeof(*eh), &bpdu, sizeof(bpdu));
-
- bridge_enqueue(sc, ifp, m); // APPLE MODIFICATION - no flags param
-}
-
-int
-bstp_root_bridge(struct bridge_softc *sc)
-{
- return (sc->sc_designated_root == sc->sc_bridge_id);
-}
-
-int
-bstp_supersedes_port_info(struct bridge_softc *sc, struct bridge_iflist *bif,
- struct bstp_config_unit *cu)
-{
- if (cu->cu_rootid < bif->bif_designated_root)
- return (1);
- if (cu->cu_rootid > bif->bif_designated_root)
- return (0);
-
- if (cu->cu_root_path_cost < bif->bif_designated_cost)
- return (1);
- if (cu->cu_root_path_cost > bif->bif_designated_cost)
- return (0);
-
- if (cu->cu_bridge_id < bif->bif_designated_bridge)
- return (1);
- if (cu->cu_bridge_id > bif->bif_designated_bridge)
- return (0);
-
- if (sc->sc_bridge_id != cu->cu_bridge_id)
- return (1);
- if (cu->cu_port_id <= bif->bif_designated_port)
- return (1);
- return (0);
-}
-
-void
-bstp_record_config_information(__unused struct bridge_softc *sc,
- struct bridge_iflist *bif, struct bstp_config_unit *cu)
-{
- bif->bif_designated_root = cu->cu_rootid;
- bif->bif_designated_cost = cu->cu_root_path_cost;
- bif->bif_designated_bridge = cu->cu_bridge_id;
- bif->bif_designated_port = cu->cu_port_id;
- bstp_timer_start(&bif->bif_message_age_timer, cu->cu_message_age);
-}
-
-void
-bstp_record_config_timeout_values(struct bridge_softc *sc,
- struct bstp_config_unit *config)
-{
- sc->sc_max_age = config->cu_max_age;
- sc->sc_hello_time = config->cu_hello_time;
- sc->sc_forward_delay = config->cu_forward_delay;
- sc->sc_topology_change = config->cu_topology_change;
-}
-
-void
-bstp_config_bpdu_generation(struct bridge_softc *sc)
-{
- struct bridge_iflist *bif;
-
- LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
- if ((bif->bif_flags & IFBIF_STP) == 0)
- continue;
- if (bstp_designated_port(sc, bif) &&
- (bif->bif_state != BSTP_IFSTATE_DISABLED))
- bstp_transmit_config(sc, bif);
- }
-}
-
-int
-bstp_designated_port(struct bridge_softc *sc, struct bridge_iflist *bif)
-{
- return ((bif->bif_designated_bridge == sc->sc_bridge_id)
- && (bif->bif_designated_port == bif->bif_port_id));
-}
-
-void
-bstp_transmit_tcn(struct bridge_softc *sc)
-{
- struct bstp_tbpdu bpdu;
- struct bridge_iflist *bif = sc->sc_root_port;
- struct ifnet *ifp;
- struct ether_header *eh;
- struct mbuf *m;
-
- KASSERT(bif != NULL, "bstp_transmit_tcn bif NULL");
- ifp = bif->bif_ifp;
- if ((ifp->if_flags & IFF_RUNNING) == 0)
- return;
-
- MGETHDR(m, M_DONTWAIT, MT_DATA);
- if (m == NULL)
- return;
-
- m->m_pkthdr.rcvif = ifp;
- m->m_pkthdr.len = sizeof(*eh) + sizeof(bpdu);
- m->m_len = m->m_pkthdr.len;
-
- eh = mtod(m, struct ether_header *);
-
- memcpy(eh->ether_shost, ifnet_lladdr(ifp), ETHER_ADDR_LEN);
- memcpy(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN);
- eh->ether_type = htons(sizeof(bpdu));
-
- bpdu.tbu_ssap = bpdu.tbu_dsap = LLC_8021D_LSAP;
- bpdu.tbu_ctl = LLC_UI;
- bpdu.tbu_protoid = 0;
- bpdu.tbu_protover = 0;
- bpdu.tbu_bpdutype = BSTP_MSGTYPE_TCN;
-
- memcpy(mtod(m, caddr_t) + sizeof(*eh), &bpdu, sizeof(bpdu));
-
- bridge_enqueue(sc, ifp, m); // APPLE MODIFICATION - no flags param
-}
-
-void
-bstp_configuration_update(struct bridge_softc *sc)
-{
- bstp_root_selection(sc);
- bstp_designated_port_selection(sc);
-}
-
-void
-bstp_root_selection(struct bridge_softc *sc)
-{
- struct bridge_iflist *root_port = NULL, *bif;
-
- LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
- if ((bif->bif_flags & IFBIF_STP) == 0)
- continue;
- if (bstp_designated_port(sc, bif))
- continue;
- if (bif->bif_state == BSTP_IFSTATE_DISABLED)
- continue;
- if (bif->bif_designated_root >= sc->sc_bridge_id)
- continue;
- if (root_port == NULL)
- goto set_port;
-
- if (bif->bif_designated_root < root_port->bif_designated_root)
- goto set_port;
- if (bif->bif_designated_root > root_port->bif_designated_root)
- continue;
-
- if ((bif->bif_designated_cost + bif->bif_path_cost) <
- (root_port->bif_designated_cost + root_port->bif_path_cost))
- goto set_port;
- if ((bif->bif_designated_cost + bif->bif_path_cost) >
- (root_port->bif_designated_cost + root_port->bif_path_cost))
- continue;
-
- if (bif->bif_designated_bridge <
- root_port->bif_designated_bridge)
- goto set_port;
- if (bif->bif_designated_bridge >
- root_port->bif_designated_bridge)
- continue;
-
- if (bif->bif_designated_port < root_port->bif_designated_port)
- goto set_port;
- if (bif->bif_designated_port > root_port->bif_designated_port)
- continue;
-
- if (bif->bif_port_id >= root_port->bif_port_id)
- continue;
-set_port:
- root_port = bif;
- }
-
- sc->sc_root_port = root_port;
- if (root_port == NULL) {
- sc->sc_designated_root = sc->sc_bridge_id;
- sc->sc_root_path_cost = 0;
- } else {
- sc->sc_designated_root = root_port->bif_designated_root;
- sc->sc_root_path_cost = root_port->bif_designated_cost +
- root_port->bif_path_cost;
- }
-}
-
-void
-bstp_designated_port_selection(struct bridge_softc *sc)
-{
- struct bridge_iflist *bif;
-
- LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
- if ((bif->bif_flags & IFBIF_STP) == 0)
- continue;
- if (bstp_designated_port(sc, bif))
- goto designated;
- if (bif->bif_designated_root != sc->sc_designated_root)
- goto designated;
-
- if (sc->sc_root_path_cost < bif->bif_designated_cost)
- goto designated;
- if (sc->sc_root_path_cost > bif->bif_designated_cost)
- continue;
-
- if (sc->sc_bridge_id < bif->bif_designated_bridge)
- goto designated;
- if (sc->sc_bridge_id > bif->bif_designated_bridge)
- continue;
-
- if (bif->bif_port_id > bif->bif_designated_port)
- continue;
-designated:
- bstp_become_designated_port(sc, bif);
- }
-}
-
-void
-bstp_become_designated_port(struct bridge_softc *sc, struct bridge_iflist *bif)
-{
- bif->bif_designated_root = sc->sc_designated_root;
- bif->bif_designated_cost = sc->sc_root_path_cost;
- bif->bif_designated_bridge = sc->sc_bridge_id;
- bif->bif_designated_port = bif->bif_port_id;
-}
-
-void
-bstp_port_state_selection(struct bridge_softc *sc)
-{
- struct bridge_iflist *bif;
-
- LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
- if ((bif->bif_flags & IFBIF_STP) == 0)
- continue;
- if (bif == sc->sc_root_port) {
- bif->bif_config_pending = 0;
- bif->bif_topology_change_acknowledge = 0;
- bstp_make_forwarding(sc, bif);
- } else if (bstp_designated_port(sc, bif)) {
- bstp_timer_stop(&bif->bif_message_age_timer);
- bstp_make_forwarding(sc, bif);
- } else {
- bif->bif_config_pending = 0;
- bif->bif_topology_change_acknowledge = 0;
- bstp_make_blocking(sc, bif);
- }
- }
-}
-
-void
-bstp_make_forwarding(__unused struct bridge_softc *sc,
- struct bridge_iflist *bif)
-{
- if (bif->bif_state == BSTP_IFSTATE_BLOCKING) {
- bstp_set_port_state(bif, BSTP_IFSTATE_LISTENING);
- bstp_timer_start(&bif->bif_forward_delay_timer, 0);
- }
-}
-
-void
-bstp_make_blocking(struct bridge_softc *sc, struct bridge_iflist *bif)
-{
- if ((bif->bif_state != BSTP_IFSTATE_DISABLED) &&
- (bif->bif_state != BSTP_IFSTATE_BLOCKING)) {
- if ((bif->bif_state == BSTP_IFSTATE_FORWARDING) ||
- (bif->bif_state == BSTP_IFSTATE_LEARNING)) {
- if (bif->bif_change_detection_enabled) {
- bstp_topology_change_detection(sc);
- }
- }
- bstp_set_port_state(bif, BSTP_IFSTATE_BLOCKING);
- bstp_timer_stop(&bif->bif_forward_delay_timer);
- }
-}
-
-void
-bstp_set_port_state(struct bridge_iflist *bif, uint8_t state)
-{
- bif->bif_state = state;
-}
-
-void
-bstp_topology_change_detection(struct bridge_softc *sc)
-{
- if (bstp_root_bridge(sc)) {
- sc->sc_topology_change = 1;
- bstp_timer_start(&sc->sc_topology_change_timer, 0);
- } else if (!sc->sc_topology_change_detected) {
- bstp_transmit_tcn(sc);
- bstp_timer_start(&sc->sc_tcn_timer, 0);
- }
- sc->sc_topology_change_detected = 1;
-}
-
-void
-bstp_topology_change_acknowledged(struct bridge_softc *sc)
-{
- sc->sc_topology_change_detected = 0;
- bstp_timer_stop(&sc->sc_tcn_timer);
-}
-
-void
-bstp_acknowledge_topology_change(struct bridge_softc *sc,
- struct bridge_iflist *bif)
-{
- bif->bif_topology_change_acknowledge = 1;
- bstp_transmit_config(sc, bif);
-}
-
-__private_extern__ struct mbuf *
-bstp_input(struct bridge_softc *sc, struct ifnet *ifp, struct mbuf *m)
-{
- struct bridge_iflist *bif = NULL;
- struct ether_header *eh;
- struct bstp_tbpdu tpdu;
- struct bstp_cbpdu cpdu;
- struct bstp_config_unit cu;
- struct bstp_tcn_unit tu;
- uint16_t len;
-
- eh = mtod(m, struct ether_header *);
-
- LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
- if ((bif->bif_flags & IFBIF_STP) == 0)
- continue;
- if (bif->bif_ifp == ifp)
- break;
- }
- if (bif == NULL)
- goto out;
-
- len = ntohs(eh->ether_type);
- if (len < sizeof(tpdu))
- goto out;
-
- m_adj(m, ETHER_HDR_LEN);
-
- if (m->m_pkthdr.len > len)
- m_adj(m, len - m->m_pkthdr.len);
- if ((size_t)m->m_len < sizeof(tpdu) &&
- (m = m_pullup(m, sizeof(tpdu))) == NULL)
- goto out;
-
- memcpy(&tpdu, mtod(m, caddr_t), sizeof(tpdu));
-
- if (tpdu.tbu_dsap != LLC_8021D_LSAP ||
- tpdu.tbu_ssap != LLC_8021D_LSAP ||
- tpdu.tbu_ctl != LLC_UI)
- goto out;
- if (tpdu.tbu_protoid != 0 || tpdu.tbu_protover != 0)
- goto out;
-
- switch (tpdu.tbu_bpdutype) {
- case BSTP_MSGTYPE_TCN:
- tu.tu_message_type = tpdu.tbu_bpdutype;
- bstp_received_tcn_bpdu(sc, bif, &tu);
- break;
- case BSTP_MSGTYPE_CFG:
- if ((size_t)m->m_len < sizeof(cpdu) &&
- (m = m_pullup(m, sizeof(cpdu))) == NULL)
- goto out;
- memcpy(&cpdu, mtod(m, caddr_t), sizeof(cpdu));
-
- cu.cu_rootid =
- (((uint64_t)ntohs(cpdu.cbu_rootpri)) << 48) |
- (((uint64_t)cpdu.cbu_rootaddr[0]) << 40) |
- (((uint64_t)cpdu.cbu_rootaddr[1]) << 32) |
- (((uint64_t)cpdu.cbu_rootaddr[2]) << 24) |
- (((uint64_t)cpdu.cbu_rootaddr[3]) << 16) |
- (((uint64_t)cpdu.cbu_rootaddr[4]) << 8) |
- (((uint64_t)cpdu.cbu_rootaddr[5]) << 0);
-
- cu.cu_bridge_id =
- (((uint64_t)ntohs(cpdu.cbu_bridgepri)) << 48) |
- (((uint64_t)cpdu.cbu_bridgeaddr[0]) << 40) |
- (((uint64_t)cpdu.cbu_bridgeaddr[1]) << 32) |
- (((uint64_t)cpdu.cbu_bridgeaddr[2]) << 24) |
- (((uint64_t)cpdu.cbu_bridgeaddr[3]) << 16) |
- (((uint64_t)cpdu.cbu_bridgeaddr[4]) << 8) |
- (((uint64_t)cpdu.cbu_bridgeaddr[5]) << 0);
-
- cu.cu_root_path_cost = ntohl(cpdu.cbu_rootpathcost);
- cu.cu_message_age = ntohs(cpdu.cbu_messageage);
- cu.cu_max_age = ntohs(cpdu.cbu_maxage);
- cu.cu_hello_time = ntohs(cpdu.cbu_hellotime);
- cu.cu_forward_delay = ntohs(cpdu.cbu_forwarddelay);
- cu.cu_port_id = ntohs(cpdu.cbu_portid);
- cu.cu_message_type = cpdu.cbu_bpdutype;
- cu.cu_topology_change_acknowledgment =
- (cpdu.cbu_flags & BSTP_FLAG_TCA) ? 1 : 0;
- cu.cu_topology_change =
- (cpdu.cbu_flags & BSTP_FLAG_TC) ? 1 : 0;
- bstp_received_config_bpdu(sc, bif, &cu);
- break;
- default:
- goto out;
- }
-
- out:
- if (m)
- m_freem(m);
- return (NULL);
-}
-
-void
-bstp_received_config_bpdu(struct bridge_softc *sc, struct bridge_iflist *bif,
- struct bstp_config_unit *cu)
-{
- int root;
-
- root = bstp_root_bridge(sc);
-
- if (bif->bif_state != BSTP_IFSTATE_DISABLED) {
- if (bstp_supersedes_port_info(sc, bif, cu)) {
- bstp_record_config_information(sc, bif, cu);
- bstp_configuration_update(sc);
- bstp_port_state_selection(sc);
-
- if ((bstp_root_bridge(sc) == 0) && root) {
- bstp_timer_stop(&sc->sc_hello_timer);
-
- if (sc->sc_topology_change_detected) {
- bstp_timer_stop(
- &sc->sc_topology_change_timer);
- bstp_transmit_tcn(sc);
- bstp_timer_start(&sc->sc_tcn_timer, 0);
- }
- }
-
- if (bif == sc->sc_root_port) {
- bstp_record_config_timeout_values(sc, cu);
- bstp_config_bpdu_generation(sc);
-
- if (cu->cu_topology_change_acknowledgment)
- bstp_topology_change_acknowledged(sc);
- }
- } else if (bstp_designated_port(sc, bif))
- bstp_transmit_config(sc, bif);
- }
-}
-
-void
-bstp_received_tcn_bpdu(struct bridge_softc *sc, struct bridge_iflist *bif,
- __unused struct bstp_tcn_unit *tcn)
-{
- if (bif->bif_state != BSTP_IFSTATE_DISABLED &&
- bstp_designated_port(sc, bif)) {
- bstp_topology_change_detection(sc);
- bstp_acknowledge_topology_change(sc, bif);
- }
-}
-
-void
-bstp_hello_timer_expiry(struct bridge_softc *sc)
-{
- bstp_config_bpdu_generation(sc);
- bstp_timer_start(&sc->sc_hello_timer, 0);
-}
-
-void
-bstp_message_age_timer_expiry(struct bridge_softc *sc,
- struct bridge_iflist *bif)
-{
- int root;
-
- root = bstp_root_bridge(sc);
- bstp_become_designated_port(sc, bif);
- bstp_configuration_update(sc);
- bstp_port_state_selection(sc);
-
- if ((bstp_root_bridge(sc)) && (root == 0)) {
- sc->sc_max_age = sc->sc_bridge_max_age;
- sc->sc_hello_time = sc->sc_bridge_hello_time;
- sc->sc_forward_delay = sc->sc_bridge_forward_delay;
-
- bstp_topology_change_detection(sc);
- bstp_timer_stop(&sc->sc_tcn_timer);
- bstp_config_bpdu_generation(sc);
- bstp_timer_start(&sc->sc_hello_timer, 0);
- }
-}
-
-void
-bstp_forward_delay_timer_expiry(struct bridge_softc *sc,
- struct bridge_iflist *bif)
-{
- if (bif->bif_state == BSTP_IFSTATE_LISTENING) {
- bstp_set_port_state(bif, BSTP_IFSTATE_LEARNING);
- bstp_timer_start(&bif->bif_forward_delay_timer, 0);
- } else if (bif->bif_state == BSTP_IFSTATE_LEARNING) {
- bstp_set_port_state(bif, BSTP_IFSTATE_FORWARDING);
- if (bstp_designated_for_some_port(sc) &&
- bif->bif_change_detection_enabled)
- bstp_topology_change_detection(sc);
- }
-}
-
-int
-bstp_designated_for_some_port(struct bridge_softc *sc)
-{
-
- struct bridge_iflist *bif;
-
- LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
- if ((bif->bif_flags & IFBIF_STP) == 0)
- continue;
- if (bif->bif_designated_bridge == sc->sc_bridge_id)
- return (1);
- }
- return (0);
-}
-
-void
-bstp_tcn_timer_expiry(struct bridge_softc *sc)
-{
- bstp_transmit_tcn(sc);
- bstp_timer_start(&sc->sc_tcn_timer, 0);
-}
-
-void
-bstp_topology_change_timer_expiry(struct bridge_softc *sc)
-{
- sc->sc_topology_change_detected = 0;
- sc->sc_topology_change = 0;
-}
-
-void
-bstp_hold_timer_expiry(struct bridge_softc *sc, struct bridge_iflist *bif)
-{
- if (bif->bif_config_pending)
- bstp_transmit_config(sc, bif);
-}
-
-__private_extern__ void
-bstp_initialization(struct bridge_softc *sc)
-{
- struct bridge_iflist *bif, *mif;
- struct timespec ts;
- unsigned char *lladdr;
-
- lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_OWNED);
-
- mif = NULL;
- LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
- if ((bif->bif_flags & IFBIF_STP) == 0)
- continue;
- if (bif->bif_ifp->if_type != IFT_ETHER)
- continue;
- bif->bif_port_id = (bif->bif_priority << 8) |
- (bif->bif_ifp->if_index & 0xff);
-
- if (mif == NULL) {
- mif = bif;
- continue;
- }
- if (memcmp(ifnet_lladdr(bif->bif_ifp),
- ifnet_lladdr(mif->bif_ifp), ETHER_ADDR_LEN) < 0) {
- mif = bif;
- continue;
- }
- }
- if (mif == NULL) {
- bstp_stop(sc);
- return;
- }
-
- lladdr = ifnet_lladdr(mif->bif_ifp);
- sc->sc_bridge_id =
- (((uint64_t)sc->sc_bridge_priority) << 48) |
- (((uint64_t)lladdr[0]) << 40) |
- (((uint64_t)lladdr[1]) << 32) |
- (lladdr[2] << 24) |
- (lladdr[3] << 16) |
- (lladdr[4] << 8) |
- (lladdr[5]);
-
- sc->sc_designated_root = sc->sc_bridge_id;
- sc->sc_root_path_cost = 0;
- sc->sc_root_port = NULL;
-
- sc->sc_max_age = sc->sc_bridge_max_age;
- sc->sc_hello_time = sc->sc_bridge_hello_time;
- sc->sc_forward_delay = sc->sc_bridge_forward_delay;
- sc->sc_topology_change_detected = 0;
- sc->sc_topology_change = 0;
- bstp_timer_stop(&sc->sc_tcn_timer);
- bstp_timer_stop(&sc->sc_topology_change_timer);
-
- bsd_untimeout(bstp_tick, sc);
- ts.tv_sec = 1;
- ts.tv_nsec = 0;
- bsd_timeout(bstp_tick, sc, &ts);
-
- LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
- if (bif->bif_flags & IFBIF_STP)
- bstp_enable_port(sc, bif);
- else
- bstp_disable_port(sc, bif);
- }
-
- bstp_port_state_selection(sc);
- bstp_config_bpdu_generation(sc);
- bstp_timer_start(&sc->sc_hello_timer, 0);
-}
-
-__private_extern__ void
-bstp_stop(struct bridge_softc *sc)
-{
- struct bridge_iflist *bif;
-
- LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
- bstp_set_port_state(bif, BSTP_IFSTATE_DISABLED);
- bstp_timer_stop(&bif->bif_hold_timer);
- bstp_timer_stop(&bif->bif_message_age_timer);
- bstp_timer_stop(&bif->bif_forward_delay_timer);
- }
-
- bsd_untimeout(bstp_tick, sc);
-
- bstp_timer_stop(&sc->sc_topology_change_timer);
- bstp_timer_stop(&sc->sc_tcn_timer);
- bstp_timer_stop(&sc->sc_hello_timer);
-
-}
-
-void
-bstp_initialize_port(struct bridge_softc *sc, struct bridge_iflist *bif)
-{
- bstp_become_designated_port(sc, bif);
- bstp_set_port_state(bif, BSTP_IFSTATE_BLOCKING);
- bif->bif_topology_change_acknowledge = 0;
- bif->bif_config_pending = 0;
- bif->bif_change_detection_enabled = 1;
- bstp_timer_stop(&bif->bif_message_age_timer);
- bstp_timer_stop(&bif->bif_forward_delay_timer);
- bstp_timer_stop(&bif->bif_hold_timer);
-}
-
-void
-bstp_enable_port(struct bridge_softc *sc, struct bridge_iflist *bif)
-{
- bstp_initialize_port(sc, bif);
- bstp_port_state_selection(sc);
-}
-
-void
-bstp_disable_port(struct bridge_softc *sc, struct bridge_iflist *bif)
-{
- int root;
-
- root = bstp_root_bridge(sc);
- bstp_become_designated_port(sc, bif);
- bstp_set_port_state(bif, BSTP_IFSTATE_DISABLED);
- bif->bif_topology_change_acknowledge = 0;
- bif->bif_config_pending = 0;
- bstp_timer_stop(&bif->bif_message_age_timer);
- bstp_timer_stop(&bif->bif_forward_delay_timer);
- bstp_configuration_update(sc);
- bstp_port_state_selection(sc);
-
- if (bstp_root_bridge(sc) && (root == 0)) {
- sc->sc_max_age = sc->sc_bridge_max_age;
- sc->sc_hello_time = sc->sc_bridge_hello_time;
- sc->sc_forward_delay = sc->sc_bridge_forward_delay;
-
- bstp_topology_change_detection(sc);
- bstp_timer_stop(&sc->sc_tcn_timer);
- bstp_config_bpdu_generation(sc);
- bstp_timer_start(&sc->sc_hello_timer, 0);
- }
-}
-
-void
-bstp_set_bridge_priority(struct bridge_softc *sc, uint64_t new_bridge_id)
-{
- struct bridge_iflist *bif;
- int root;
-
- root = bstp_root_bridge(sc);
-
- LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
- if ((bif->bif_flags & IFBIF_STP) == 0)
- continue;
- if (bstp_designated_port(sc, bif))
- bif->bif_designated_bridge = new_bridge_id;
- }
-
- sc->sc_bridge_id = new_bridge_id;
-
- bstp_configuration_update(sc);
- bstp_port_state_selection(sc);
-
- if (bstp_root_bridge(sc) && (root == 0)) {
- sc->sc_max_age = sc->sc_bridge_max_age;
- sc->sc_hello_time = sc->sc_bridge_hello_time;
- sc->sc_forward_delay = sc->sc_bridge_forward_delay;
-
- bstp_topology_change_detection(sc);
- bstp_timer_stop(&sc->sc_tcn_timer);
- bstp_config_bpdu_generation(sc);
- bstp_timer_start(&sc->sc_hello_timer, 0);
- }
-}
-
-void
-bstp_set_port_priority(struct bridge_softc *sc, struct bridge_iflist *bif,
- uint16_t new_port_id)
-{
- if (bstp_designated_port(sc, bif))
- bif->bif_designated_port = new_port_id;
-
- bif->bif_port_id = new_port_id;
-
- if ((sc->sc_bridge_id == bif->bif_designated_bridge) &&
- (bif->bif_port_id < bif->bif_designated_port)) {
- bstp_become_designated_port(sc, bif);
- bstp_port_state_selection(sc);
- }
-}
-
-void
-bstp_set_path_cost(struct bridge_softc *sc, struct bridge_iflist *bif,
- uint32_t path_cost)
-{
- bif->bif_path_cost = path_cost;
- bstp_configuration_update(sc);
- bstp_port_state_selection(sc);
-}
-
-void
-bstp_enable_change_detection(struct bridge_iflist *bif)
-{
- bif->bif_change_detection_enabled = 1;
-}
-
-void
-bstp_disable_change_detection(struct bridge_iflist *bif)
-{
- bif->bif_change_detection_enabled = 0;
-}
-
-void
-bstp_ifupdstatus(struct bridge_softc *sc, struct bridge_iflist *bif)
-{
- struct ifnet *ifp = bif->bif_ifp;
- struct ifmediareq ifmr;
-
- if ((ifnet_flags(ifp) & IFF_UP)) {
- bzero(&ifmr, sizeof(ifmr));
- if (ifnet_ioctl(ifp, 0, SIOCGIFMEDIA, &ifmr) == 0) {
- // enable the port when the link is up, or its state is unknown
- if ((ifmr.ifm_status & IFM_ACTIVE) || !(ifmr.ifm_status & IFM_AVALID)) {
- if (bif->bif_state == BSTP_IFSTATE_DISABLED)
- bstp_enable_port(sc, bif);
- } else {
- if (bif->bif_state != BSTP_IFSTATE_DISABLED)
- bstp_disable_port(sc, bif);
- }
- }
- return;
- }
-
- if (bif->bif_state != BSTP_IFSTATE_DISABLED)
- bstp_disable_port(sc, bif);
-}
-
-void
-bstp_tick(void *arg)
-{
- struct bridge_softc *sc = arg;
- struct bridge_iflist *bif;
- struct timespec ts;
-
- lck_mtx_lock(sc->sc_mtx);
-
- LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
- if ((bif->bif_flags & IFBIF_STP) == 0)
- continue;
- /*
- * XXX This can cause a lag in "link does away"
- * XXX and "spanning tree gets updated". We need
- * XXX come sort of callback from the link state
- * XXX update code to kick spanning tree.
- * XXX --thorpej@NetBSD.org
- */
- bstp_ifupdstatus(sc, bif);
- }
-
- if (bstp_timer_expired(&sc->sc_hello_timer, sc->sc_hello_time))
- bstp_hello_timer_expiry(sc);
-
- if (bstp_timer_expired(&sc->sc_tcn_timer, sc->sc_bridge_hello_time))
- bstp_tcn_timer_expiry(sc);
-
- if (bstp_timer_expired(&sc->sc_topology_change_timer,
- sc->sc_topology_change_time))
- bstp_topology_change_timer_expiry(sc);
-
- LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
- if ((bif->bif_flags & IFBIF_STP) == 0)
- continue;
- if (bstp_timer_expired(&bif->bif_message_age_timer,
- sc->sc_max_age))
- bstp_message_age_timer_expiry(sc, bif);
- }
-
- LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
- if ((bif->bif_flags & IFBIF_STP) == 0)
- continue;
- if (bstp_timer_expired(&bif->bif_forward_delay_timer,
- sc->sc_forward_delay))
- bstp_forward_delay_timer_expiry(sc, bif);
-
- if (bstp_timer_expired(&bif->bif_hold_timer,
- sc->sc_hold_time))
- bstp_hold_timer_expiry(sc, bif);
- }
-
- lck_mtx_unlock(sc->sc_mtx);
-
- /* APPLE MODIFICATION - bridge changes */
- if (ifnet_flags(sc->sc_if) & IFF_RUNNING) {
- ts.tv_sec = 1;
- ts.tv_nsec = 0;
- bsd_timeout(bstp_tick, sc, &ts);
- }
-}
-
-void
-bstp_timer_start(struct bridge_timer *t, uint16_t v)
-{
- t->value = v;
- t->active = 1;
-}
-
-void
-bstp_timer_stop(struct bridge_timer *t)
-{
- t->value = 0;
- t->active = 0;
-}
-
-int
-bstp_timer_expired(struct bridge_timer *t, uint16_t v)
-{
- if (t->active == 0)
- return (0);
- t->value += BSTP_TICK_VAL;
- if (t->value >= v) {
- bstp_timer_stop(t);
- return (1);
- }
- return (0);
-
-}
#define DLIL_PRINTF kprintf
#endif
+#define atomic_add_32(a, n) \
+ ((void) OSAddAtomic(n, (volatile SInt32 *)a))
+
+#if PKT_PRIORITY
+#define _CASSERT(x) \
+ switch (0) { case 0: case (x): ; }
+
+#define IF_DATA_REQUIRE_ALIGNED_32(f) \
+ _CASSERT(!(offsetof(struct if_data_internal, f) % sizeof (u_int32_t)))
+
+#define IFNET_IF_DATA_REQUIRE_ALIGNED_32(f) \
+ _CASSERT(!(offsetof(struct ifnet, if_data.f) % sizeof (u_int32_t)))
+#endif /* PKT_PRIORITY */
enum {
kProtoKPI_v1 = 1,
u_int32_t dlil_filter_count = 0;
extern u_int32_t ipv4_ll_arp_aware;
+#if IFNET_ROUTE_REFCNT
+/*
+ * Updating this variable should be done by first acquiring the global
+ * radix node head (rnh_lock), in tandem with settting/clearing the
+ * PR_AGGDRAIN for routedomain.
+ */
+u_int32_t ifnet_aggressive_drainers;
+static u_int32_t net_rtref;
+#endif /* IFNET_ROUTE_REFCNT */
+
static struct dlil_threading_info dlil_lo_thread;
__private_extern__ struct dlil_threading_info *dlil_lo_thread_ptr = &dlil_lo_thread;
{
thread_t thread = THREAD_NULL;
+#if PKT_PRIORITY
+ /*
+ * The following fields must be 32-bit aligned for atomic operations.
+ */
+ IF_DATA_REQUIRE_ALIGNED_32(ifi_obgpackets);
+ IF_DATA_REQUIRE_ALIGNED_32(ifi_obgbytes)
+
+ IFNET_IF_DATA_REQUIRE_ALIGNED_32(ifi_obgpackets);
+ IFNET_IF_DATA_REQUIRE_ALIGNED_32(ifi_obgbytes)
+#endif /* PKT_PRIORITY */
+
PE_parse_boot_argn("net_affinity", &net_affinity, sizeof (net_affinity));
-
+#if IFNET_ROUTE_REFCNT
+ PE_parse_boot_argn("net_rtref", &net_rtref, sizeof (net_rtref));
+#endif /* IFNET_ROUTE_REFCNT */
+
TAILQ_INIT(&dlil_ifnet_head);
TAILQ_INIT(&ifnet_head);
}
else {
KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
+#if PKT_PRIORITY
+ if (mbuf_get_priority(m) == MBUF_PRIORITY_BACKGROUND) {
+ atomic_add_32(&ifp->if_obgpackets, 1);
+ atomic_add_32(&ifp->if_obgbytes,
+ m->m_pkthdr.len);
+ }
+#endif /* PKT_PRIORITY */
retval = ifp->if_output(ifp, m);
if (retval && dlil_verbose) {
printf("dlil_output: output error on %s%d retval = %d\n",
if (send_head) {
KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
+#if PKT_PRIORITY
+ if (mbuf_get_priority(send_head) == MBUF_PRIORITY_BACKGROUND) {
+ atomic_add_32(&ifp->if_obgpackets, 1);
+ atomic_add_32(&ifp->if_obgbytes,
+ send_head->m_pkthdr.len);
+ }
+#endif /* PKT_PRIORITY */
retval = ifp->if_output(ifp, send_head);
if (retval && dlil_verbose) {
printf("dlil_output: output error on %s%d retval = %d\n",
#endif /* PF */
dlil_write_end();
+#if IFNET_ROUTE_REFCNT
+ if (net_rtref) {
+ (void) ifnet_set_idle_flags(ifp, IFRF_IDLE_NOTIFY,
+ IFRF_IDLE_NOTIFY);
+ }
+#endif /* IFNET_ROUTE_REFCNT */
+
dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_ATTACHED, NULL, 0);
return 0;
/* Let BPF know we're detaching */
bpfdetach(ifp);
+#if IFNET_ROUTE_REFCNT
+ /*
+ * Check to see if this interface has previously triggered
+ * aggressive protocol draining; if so, decrement the global
+ * refcnt and clear PR_AGGDRAIN on the route domain if
+ * there are no more of such an interface around.
+ */
+ if (ifp->if_want_aggressive_drain != 0)
+ (void) ifnet_set_idle_flags(ifp, 0, ~0);
+#endif /* IFNET_ROUTE_REFCNT */
+
if ((retval = dlil_write_begin()) != 0) {
if (retval == EDEADLK) {
retval = 0;
/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
void dlil_if_release(struct ifnet *ifp);
+#if IFNET_ROUTE_REFCNT
+extern u_int32_t ifnet_aggressive_drainers;
+#endif /* IFNET_ROUTE_REFCNT */
+
#endif /* KERNEL_PRIVATE */
#endif /* KERNEL */
#endif /* DLIL_H */
#include <sys/socketvar.h>
#include <net/if_vlan_var.h>
#include <net/if_bond_var.h>
-#if IF_BRIDGE
-#include <net/if_bridgevar.h>
-#endif
#include <net/dlil.h>
#if BOND
bond_family_init();
#endif /* BOND */
-#if IF_BRIDGE
- bridgeattach(0);
-#endif
done:
/*
- * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
static int if_rtmtu(struct radix_node *, void *);
static void if_rtmtu_update(struct ifnet *);
-static struct if_clone *if_clone_lookup(const char *, int *);
#if IF_CLONE_LIST
static int if_clone_list(int count, int * total, user_addr_t dst);
#endif /* IF_CLONE_LIST */
extern void nd6_setmtu(struct ifnet *);
#endif
-#define M_CLONE M_IFADDR
/*
* Network interface utility routines.
* Create a clone network interface.
*/
static int
-if_clone_create(char *name, int len)
+if_clone_create(char *name, int len, void *params)
{
struct if_clone *ifc;
char *dp;
- int wildcard, bytoff, bitoff;
- int unit;
+ int wildcard;
+ u_int32_t bytoff, bitoff;
+ u_int32_t unit;
int err;
ifc = if_clone_lookup(name, &unit);
return (EEXIST);
bytoff = bitoff = 0;
- wildcard = (unit < 0);
+ wildcard = (unit == UINT32_MAX);
/*
* Find a free unit if none was given.
*/
if (unit > ifc->ifc_maxunit)
return (ENXIO);
- err = (*ifc->ifc_create)(ifc, unit);
+ err = (*ifc->ifc_create)(ifc, unit, params);
if (err != 0)
return (err);
struct if_clone *ifc;
struct ifnet *ifp;
int bytoff, bitoff;
- int unit;
+ u_int32_t unit;
ifc = if_clone_lookup(name, &unit);
if (ifc == NULL)
* Look up a network interface cloner.
*/
-static struct if_clone *
-if_clone_lookup(const char *name, int *unitp)
+__private_extern__ struct if_clone *
+if_clone_lookup(const char *name, u_int32_t *unitp)
{
struct if_clone *ifc;
const char *cp;
found_name:
if (*cp == '\0') {
- i = -1;
+ i = 0xffff;
} else {
for (i = 0; *cp != '\0'; cp++) {
if (*cp < '0' || *cp > '9') {
int bytoff, bitoff;
int err;
int len, maxclone;
- int unit;
+ u_int32_t unit;
KASSERT(ifc->ifc_minifs - 1 <= ifc->ifc_maxunit,
("%s: %s requested more units then allowed (%d > %d)",
if_cloners_count++;
for (unit = 0; unit < ifc->ifc_minifs; unit++) {
- err = (*ifc->ifc_create)(ifc, unit);
+ err = (*ifc->ifc_create)(ifc, unit, NULL);
KASSERT(err == 0,
("%s: failed to create required interface %s%d",
__func__, ifc->ifc_name, unit));
ifr = (struct ifreq *)data;
switch (cmd) {
case SIOCIFCREATE:
+ case SIOCIFCREATE2:
+ error = proc_suser(p);
+ if (error)
+ return (error);
+ return if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name),
+ cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL);
case SIOCIFDESTROY:
error = proc_suser(p);
if (error)
return (error);
- return ((cmd == SIOCIFCREATE) ?
- if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name)) :
- if_clone_destroy(ifr->ifr_name));
+ return if_clone_destroy(ifr->ifr_name);
#if IF_CLONE_LIST
case SIOCIFGCLONERS32: {
struct if_clonereq32 *ifcr = (struct if_clonereq32 *)data;
ifr->ifr_wake_flags = ifnet_get_wake_flags(ifp);
ifnet_lock_done(ifp);
break;
-
+
+ case SIOCGIFGETRTREFCNT:
+#if IFNET_ROUTE_REFCNT
+ ifnet_lock_shared(ifp);
+ ifr->ifr_route_refcnt = ifp->if_route_refcnt;
+ ifnet_lock_done(ifp);
+ break;
+#else
+ return (EOPNOTSUPP);
+#endif /* IFNET_ROUTE_REFCNT */
+
default:
oif_flags = ifp->if_flags;
if (so->so_proto == 0)
#define KEV_DL_PROTO_DETACHED 15
#define KEV_DL_LINK_ADDRESS_CHANGED 16
#define KEV_DL_WAKEFLAGS_CHANGED 17
+#define KEV_DL_IF_IDLE_ROUTE_REFCNT 18
#include <net/if_var.h>
#include <sys/types.h>
#define IFEF_INUSE 0x40000000 /* DLIL ifnet recycler, ifnet in use */
#define IFEF_UPDOWNCHANGE 0x80000000 /* Interface's up/down state is changing */
+/*
+ * !!! NOTE !!!
+ *
+ * if_idle_flags definitions: (all bits are reserved for internal/future
+ * use). Setting these flags MUST be done via the ifnet_set_idle_flags()
+ * KPI due to the associated reference counting. Clearing them may be done by
+ * calling the KPI, otherwise implicitly at interface detach time. Setting
+ * the if_idle_flags field to a non-zero value will cause the networking
+ * stack to aggressively purge expired objects (routes, etc.)
+ */
+#define IFRF_IDLE_NOTIFY 0x1 /* Generate notifications on idle */
+
/* flags set internally only: */
#define IFF_CANTCHANGE \
(IFF_BROADCAST|IFF_POINTOPOINT|IFF_RUNNING|IFF_OACTIVE|\
struct ifdevmtu ifru_devmtu;
struct ifkpi ifru_kpi;
u_int32_t ifru_wake_flags;
+ u_int32_t ifru_route_refcnt;
} ifr_ifru;
#define ifr_addr ifr_ifru.ifru_addr /* address */
#define ifr_dstaddr ifr_ifru.ifru_dstaddr /* other end of p-to-p link */
#endif /* KERNEL_PRIVATE */
#define ifr_kpi ifr_ifru.ifru_kpi
#define ifr_wake_flags ifr_ifru.ifru_wake_flags /* wake capabilities of devive */
+#define ifr_route_refcnt ifr_ifru.ifru_route_refcnt /* route references on interface */
};
#define _SIZEOF_ADDR_IFREQ(ifr) \
/*
- * Copyright (c) 2004-2008 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2004-2010 Apple Computer, Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
/**
** bond interface/dlil specific routines
**/
-static int bond_clone_create(struct if_clone *, int);
-static void bond_clone_destroy(struct ifnet *);
+static int bond_clone_create(struct if_clone *, u_int32_t, void *);
+static int bond_clone_destroy(struct ifnet *);
static int bond_input(ifnet_t ifp, protocol_family_t protocol, mbuf_t m,
char *frame_header);
static int bond_output(struct ifnet *ifp, struct mbuf *m);
}
static int
-bond_clone_create(struct if_clone * ifc, int unit)
+bond_clone_create(struct if_clone * ifc, u_int32_t unit, __unused void *params)
{
int error;
ifbond_ref ifb;
return;
}
-static void
+static int
bond_clone_destroy(struct ifnet * ifp)
{
ifbond_ref ifb;
ifb = ifnet_softc(ifp);
if (ifb == NULL || ifnet_type(ifp) != IFT_IEEE8023ADLAG) {
bond_unlock();
- return;
+ return 0;
}
if (ifbond_flags_if_detaching(ifb)) {
bond_unlock();
- return;
+ return 0;
}
bond_remove(ifb);
bond_unlock();
bond_if_detach(ifp);
- return;
+ return 0;
}
static int
+++ /dev/null
-/*
- * Copyright (c) 2004-2009 Apple Inc. All rights reserved.
- *
- * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. The rights granted to you under the License
- * may not be used to create, or enable the creation or redistribution of,
- * unlawful or unlicensed copies of an Apple operating system, or to
- * circumvent, violate, or enable the circumvention or violation of, any
- * terms of an Apple operating system software license agreement.
- *
- * Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
- */
-
-/* $apfw: Revision 1.19 2008/10/24 02:34:06 cbzimmer Exp $ */
-/* $NetBSD: if_bridge.c,v 1.46 2006/11/23 04:07:07 rpaulo Exp $ */
-
-/*
- * Copyright 2001 Wasabi Systems, Inc.
- * All rights reserved.
- *
- * Written by Jason R. Thorpe for Wasabi Systems, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed for the NetBSD Project by
- * Wasabi Systems, Inc.
- * 4. The name of Wasabi Systems, Inc. may not be used to endorse
- * or promote products derived from this software without specific prior
- * written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by Jason L. Wright
- * 4. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
- */
-
-/*
- * Network interface bridge support.
- *
- * TODO:
- *
- * - Currently only supports Ethernet-like interfaces (Ethernet,
- * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
- * to bridge other types of interfaces (FDDI-FDDI, and maybe
- * consider heterogenous bridges).
- */
-
-#include <sys/cdefs.h>
-//_KERNEL_RCSID(0, "$NetBSD: if_bridge.c,v 1.46 2006/11/23 04:07:07 rpaulo Exp $");
-
-//#include "opt_bridge_ipf.h"
-//#include "opt_inet.h"
-//#include "opt_pfil_hooks.h"
-//#include "opt_wlan.h" /* APPLE MODIFICATION <cbz@apple.com> - Proxy STA support */
-//#include "bpfilter.h"
-//#include "gif.h" // APPLE MODIFICATION - add gif support
-
-#define BRIDGE_DEBUG 0
-
-#include <sys/param.h>
-#include <sys/kernel.h>
-#include <sys/mbuf.h>
-#include <sys/queue.h>
-#include <sys/socket.h>
-#include <sys/sockio.h>
-#include <sys/systm.h>
-#include <sys/proc.h>
-//#include <sys/pool.h>
-#include <sys/kauth.h>
-#include <sys/random.h>
-#include <sys/kern_event.h>
-#include <sys/systm.h>
-#include <sys/sysctl.h>
-
-#include <libkern/libkern.h>
-
-#include <kern/zalloc.h>
-
-#if NBPFILTER > 0
-#include <net/bpf.h>
-#endif
-#include <net/if.h>
-#include <net/if_dl.h>
-#include <net/if_types.h>
-#include <net/if_llc.h>
-
-#include <net/if_ether.h>
-#include <net/if_bridgevar.h>
-#include <net/dlil.h>
-
-#include <net/kpi_interfacefilter.h>
-
-#include <netinet/in.h>
-#include <netinet/in_systm.h>
-#include <netinet/ip.h>
-#include <netinet/ip_var.h>
-#ifdef INET6
-#include <netinet/ip6.h>
-#include <netinet6/in6_var.h>
-#include <netinet6/ip6_var.h>
-#endif
-
-#if BRIDGE_DEBUG
-#define static __private_extern__
-#endif
-
-extern void dlil_input_packet_list(struct ifnet *, struct mbuf *);
-
-/*
- * Size of the route hash table. Must be a power of two.
- */
-/* APPLE MODIFICATION - per Wasabi performance improvement, change the hash table size */
-#if 0
-#ifndef BRIDGE_RTHASH_SIZE
-#define BRIDGE_RTHASH_SIZE 1024
-#endif
-#else
-#ifndef BRIDGE_RTHASH_SIZE
-#define BRIDGE_RTHASH_SIZE 256
-#endif
-#endif
-
-/* APPLE MODIFICATION - support for HW checksums */
-#if APPLE_BRIDGE_HWCKSUM_SUPPORT
-#include <netinet/udp.h>
-#include <netinet/tcp.h>
-#endif
-
-#define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1)
-
-//#include "carp.h"
-#if NCARP > 0
-#include <netinet/in.h>
-#include <netinet/in_var.h>
-#include <netinet/ip_carp.h>
-#endif
-
-/*
- * Maximum number of addresses to cache.
- */
-#ifndef BRIDGE_RTABLE_MAX
-#define BRIDGE_RTABLE_MAX 100
-#endif
-
-/* APPLE MODIFICATION <cbz@apple.com> - add support for Proxy STA */
-#if IEEE80211_PROXYSTA
-/*
- * Maximum (additional to maxcache) number of proxysta addresses to cache.
- */
-#ifndef BRIDGE_RTABLE_MAX_PROXYSTA
-#define BRIDGE_RTABLE_MAX_PROXYSTA 16
-#endif
-#endif
-
-/*
- * Spanning tree defaults.
- */
-#define BSTP_DEFAULT_MAX_AGE (20 * 256)
-#define BSTP_DEFAULT_HELLO_TIME (2 * 256)
-#define BSTP_DEFAULT_FORWARD_DELAY (15 * 256)
-#define BSTP_DEFAULT_HOLD_TIME (1 * 256)
-#define BSTP_DEFAULT_BRIDGE_PRIORITY 0x8000
-#define BSTP_DEFAULT_PORT_PRIORITY 0x80
-#define BSTP_DEFAULT_PATH_COST 55
-
-/*
- * Timeout (in seconds) for entries learned dynamically.
- */
-#ifndef BRIDGE_RTABLE_TIMEOUT
-#define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
-#endif
-
-/*
- * Number of seconds between walks of the route list.
- */
-#ifndef BRIDGE_RTABLE_PRUNE_PERIOD
-#define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
-#endif
-
-/*
- * List of capabilities to mask on the member interface.
- */
-#define BRIDGE_IFCAPS_MASK \
- (IFCAP_CSUM_IPv4_Tx | \
- IFCAP_CSUM_TCPv4_Tx | \
- IFCAP_CSUM_UDPv4_Tx | \
- IFCAP_CSUM_TCPv6_Tx | \
- IFCAP_CSUM_UDPv6_Tx)
-
-
-int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
-
-static zone_t bridge_rtnode_pool = NULL;
-
-static errno_t
-bridge_iff_input(void* cookie, ifnet_t ifp, __unused protocol_family_t protocol,
- mbuf_t *data, char **frame_ptr);
-static void
-bridge_iff_event(void* cookie, ifnet_t ifp, __unused protocol_family_t protocol,
- const struct kev_msg *event_msg);
-static void
-bridge_iff_detached(void* cookie, __unused ifnet_t interface);
-
-static uint32_t
-bridge_rthash(__unused struct bridge_softc *sc, const uint8_t *addr);
-
-static int bridge_clone_create(struct if_clone *, int);
-static void bridge_clone_destroy(struct ifnet *);
-
-static errno_t bridge_ioctl(ifnet_t ifp, unsigned long cmd, void *data);
-#if HAS_IF_CAP
-static void bridge_mutecaps(struct bridge_iflist *, int);
-#endif
-static int bridge_init(struct ifnet *);
-static void bridge_stop(struct ifnet *, int);
-
-#if BRIDGE_MEMBER_OUT_FILTER
-static errno_t
-bridge_iff_output(void *cookie, ifnet_t ifp, protocol_family_t protocol, mbuf_t *data);
-static int bridge_output(struct bridge_softc *sc, ifnet_t ifp, mbuf_t m);
-#endif /* BRIDGE_MEMBER_OUT_FILTER */
-
-static errno_t bridge_start(struct ifnet *, mbuf_t);
-static errno_t bridge_set_bpf_tap(ifnet_t ifn, bpf_tap_mode mode, bpf_packet_func bpf_callback);
-__private_extern__ errno_t bridge_bpf_input(ifnet_t ifp, struct mbuf *m);
-__private_extern__ errno_t bridge_bpf_output(ifnet_t ifp, struct mbuf *m);
-
-static void bridge_detach(ifnet_t ifp);
-
-static errno_t bridge_input(struct bridge_iflist *, struct ifnet *, struct mbuf *, void *frame_header);
-
-static void bridge_forward(struct bridge_softc *, struct mbuf *m);
-
-static void bridge_timer(void *);
-
-static void bridge_broadcast(struct bridge_softc *, struct ifnet *,
- struct mbuf *, int);
-
-static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
- struct ifnet *, int, uint8_t);
-static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
-static void bridge_rttrim(struct bridge_softc *);
-static void bridge_rtage(struct bridge_softc *);
-static void bridge_rtflush(struct bridge_softc *, int);
-/* APPLE MODIFICATION <cbz@apple.com> - add support for Proxy STA */
-#if IEEE80211_PROXYSTA
-static void bridge_rtdiscovery(struct bridge_softc *);
-static void bridge_rtpurge(struct bridge_softc *, struct ifnet *);
-#endif
-static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
-
-static int bridge_rtable_init(struct bridge_softc *);
-static void bridge_rtable_fini(struct bridge_softc *);
-
-static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
- const uint8_t *);
-static int bridge_rtnode_insert(struct bridge_softc *,
- struct bridge_rtnode *);
-static void bridge_rtnode_destroy(struct bridge_softc *,
- struct bridge_rtnode *);
-
-static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
- const char *name);
-static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
- struct ifnet *ifp);
-static void bridge_delete_member(struct bridge_softc *,
- struct bridge_iflist *);
-
-static void bridge_ifdetach(struct bridge_iflist *bif, struct ifnet *ifp);
-
-
-static int bridge_ioctl_add(struct bridge_softc *, void *);
-static int bridge_ioctl_del(struct bridge_softc *, void *);
-/* APPLE MODIFICATION <cbz@apple.com> - add support for Proxy STA */
-#if IEEE80211_PROXYSTA
-static int bridge_ioctl_purge(struct bridge_softc *sc, void *arg);
-#endif
-static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
-static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
-static int bridge_ioctl_scache(struct bridge_softc *, void *);
-static int bridge_ioctl_gcache(struct bridge_softc *, void *);
-static int bridge_ioctl_gifs32(struct bridge_softc *, void *);
-static int bridge_ioctl_gifs64(struct bridge_softc *, void *);
-static int bridge_ioctl_rts32(struct bridge_softc *, void *);
-static int bridge_ioctl_rts64(struct bridge_softc *, void *);
-static int bridge_ioctl_saddr32(struct bridge_softc *, void *);
-static int bridge_ioctl_saddr64(struct bridge_softc *, void *);
-static int bridge_ioctl_sto(struct bridge_softc *, void *);
-static int bridge_ioctl_gto(struct bridge_softc *, void *);
-static int bridge_ioctl_daddr32(struct bridge_softc *, void *);
-static int bridge_ioctl_daddr64(struct bridge_softc *, void *);
-static int bridge_ioctl_flush(struct bridge_softc *, void *);
-static int bridge_ioctl_gpri(struct bridge_softc *, void *);
-static int bridge_ioctl_spri(struct bridge_softc *, void *);
-static int bridge_ioctl_ght(struct bridge_softc *, void *);
-static int bridge_ioctl_sht(struct bridge_softc *, void *);
-static int bridge_ioctl_gfd(struct bridge_softc *, void *);
-static int bridge_ioctl_sfd(struct bridge_softc *, void *);
-static int bridge_ioctl_gma(struct bridge_softc *, void *);
-static int bridge_ioctl_sma(struct bridge_softc *, void *);
-static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
-static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
-
-struct bridge_control {
- int (*bc_func)(struct bridge_softc *, void *);
- unsigned int bc_argsize;
- unsigned int bc_flags;
-};
-
-#define BC_F_COPYIN 0x01 /* copy arguments in */
-#define BC_F_COPYOUT 0x02 /* copy arguments out */
-#define BC_F_SUSER 0x04 /* do super-user check */
-
-static const struct bridge_control bridge_control_table32[] = {
- { bridge_ioctl_add, sizeof(struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
- { bridge_ioctl_del, sizeof(struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
-
- { bridge_ioctl_gifflags, sizeof(struct ifbreq),
- BC_F_COPYIN|BC_F_COPYOUT },
- { bridge_ioctl_sifflags, sizeof(struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
-
- { bridge_ioctl_scache, sizeof(struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
- { bridge_ioctl_gcache, sizeof(struct ifbrparam),
- BC_F_COPYOUT },
-
- { bridge_ioctl_gifs32, sizeof(struct ifbifconf32),
- BC_F_COPYIN|BC_F_COPYOUT },
- { bridge_ioctl_rts32, sizeof(struct ifbaconf32),
- BC_F_COPYIN|BC_F_COPYOUT },
-
- { bridge_ioctl_saddr32, sizeof(struct ifbareq32),
- BC_F_COPYIN|BC_F_SUSER },
-
- { bridge_ioctl_sto, sizeof(struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
- { bridge_ioctl_gto, sizeof(struct ifbrparam),
- BC_F_COPYOUT },
-
- { bridge_ioctl_daddr32, sizeof(struct ifbareq32),
- BC_F_COPYIN|BC_F_SUSER },
-
- { bridge_ioctl_flush, sizeof(struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
-
- { bridge_ioctl_gpri, sizeof(struct ifbrparam),
- BC_F_COPYOUT },
- { bridge_ioctl_spri, sizeof(struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
-
- { bridge_ioctl_ght, sizeof(struct ifbrparam),
- BC_F_COPYOUT },
- { bridge_ioctl_sht, sizeof(struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
-
- { bridge_ioctl_gfd, sizeof(struct ifbrparam),
- BC_F_COPYOUT },
- { bridge_ioctl_sfd, sizeof(struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
-
- { bridge_ioctl_gma, sizeof(struct ifbrparam),
- BC_F_COPYOUT },
- { bridge_ioctl_sma, sizeof(struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
-
- { bridge_ioctl_sifprio, sizeof(struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
-
- { bridge_ioctl_sifcost, sizeof(struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
-
- /* APPLE MODIFICATION <cbz@apple.com> - add support for Proxy STA */
-#if IEEE80211_PROXYSTA
- { bridge_ioctl_purge, sizeof(struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
-#endif
-};
-
-static const struct bridge_control bridge_control_table64[] = {
- { bridge_ioctl_add, sizeof(struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
- { bridge_ioctl_del, sizeof(struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
-
- { bridge_ioctl_gifflags, sizeof(struct ifbreq),
- BC_F_COPYIN|BC_F_COPYOUT },
- { bridge_ioctl_sifflags, sizeof(struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
-
- { bridge_ioctl_scache, sizeof(struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
- { bridge_ioctl_gcache, sizeof(struct ifbrparam),
- BC_F_COPYOUT },
-
- { bridge_ioctl_gifs64, sizeof(struct ifbifconf64),
- BC_F_COPYIN|BC_F_COPYOUT },
- { bridge_ioctl_rts64, sizeof(struct ifbaconf64),
- BC_F_COPYIN|BC_F_COPYOUT },
-
- { bridge_ioctl_saddr64, sizeof(struct ifbareq64),
- BC_F_COPYIN|BC_F_SUSER },
-
- { bridge_ioctl_sto, sizeof(struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
- { bridge_ioctl_gto, sizeof(struct ifbrparam),
- BC_F_COPYOUT },
-
- { bridge_ioctl_daddr64, sizeof(struct ifbareq64),
- BC_F_COPYIN|BC_F_SUSER },
-
- { bridge_ioctl_flush, sizeof(struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
-
- { bridge_ioctl_gpri, sizeof(struct ifbrparam),
- BC_F_COPYOUT },
- { bridge_ioctl_spri, sizeof(struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
-
- { bridge_ioctl_ght, sizeof(struct ifbrparam),
- BC_F_COPYOUT },
- { bridge_ioctl_sht, sizeof(struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
-
- { bridge_ioctl_gfd, sizeof(struct ifbrparam),
- BC_F_COPYOUT },
- { bridge_ioctl_sfd, sizeof(struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
-
- { bridge_ioctl_gma, sizeof(struct ifbrparam),
- BC_F_COPYOUT },
- { bridge_ioctl_sma, sizeof(struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
-
- { bridge_ioctl_sifprio, sizeof(struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
-
- { bridge_ioctl_sifcost, sizeof(struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
-
- /* APPLE MODIFICATION <cbz@apple.com> - add support for Proxy STA */
-#if IEEE80211_PROXYSTA
- { bridge_ioctl_purge, sizeof(struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
-#endif
-};
-
-static const unsigned int bridge_control_table_size =
-sizeof(bridge_control_table32) / sizeof(bridge_control_table32[0]);
-
-static LIST_HEAD(, bridge_softc) bridge_list = LIST_HEAD_INITIALIZER(bridge_list);
-
-static lck_grp_t *bridge_lock_grp = NULL;
-static lck_attr_t *bridge_lock_attr = NULL;
-
-static lck_rw_t *bridge_list_lock = NULL;
-
-
-static struct if_clone bridge_cloner =
- IF_CLONE_INITIALIZER("bridge",
- bridge_clone_create,
- bridge_clone_destroy,
- 0,
- IF_MAXUNIT);
-
-#if BRIDGE_DEBUG
-
-SYSCTL_DECL(_net_link);
-
-SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Bridge");
-
-__private_extern__ int _if_brige_debug = 0;
-
-SYSCTL_INT(_net_link_bridge, OID_AUTO, debug, CTLFLAG_RW,
- &_if_brige_debug, 0, "Bridge debug");
-
-static void printf_ether_header(struct ether_header *eh);
-static void printf_mbuf_data(mbuf_t m, size_t offset, size_t len);
-static void printf_mbuf_pkthdr(mbuf_t m, const char *prefix, const char *suffix);
-static void printf_mbuf(mbuf_t m, const char *prefix, const char *suffix);
-static void link_print(struct sockaddr_dl * dl_p);
-
-void
-printf_mbuf_pkthdr(mbuf_t m, const char *prefix, const char *suffix)
-{
- if (m)
- printf("%spktlen: %u rcvif: %p header: %p nextpkt: %p%s",
- prefix ? prefix : "",
- (unsigned int)mbuf_pkthdr_len(m), mbuf_pkthdr_rcvif(m), mbuf_pkthdr_header(m), mbuf_nextpkt(m),
- suffix ? suffix : "");
- else
- printf("%s<NULL>%s\n", prefix, suffix);
-}
-
-void
-printf_mbuf(mbuf_t m, const char *prefix, const char *suffix)
-{
- if (m) {
- printf("%s%p type: %u flags: 0x%x len: %u data: %p maxlen: %u datastart: %p next: %p%s",
- prefix ? prefix : "",
- m, mbuf_type(m), mbuf_flags(m), (unsigned int)mbuf_len(m), mbuf_data(m),
- (unsigned int)mbuf_maxlen(m), mbuf_datastart(m), mbuf_next(m),
- !suffix || (mbuf_flags(m) & MBUF_PKTHDR) ? "" : suffix);
- if ((mbuf_flags(m) & MBUF_PKTHDR))
- printf_mbuf_pkthdr(m, " ", suffix);
- } else
- printf("%s<NULL>%s\n", prefix, suffix);
-}
-
-void
-printf_mbuf_data(mbuf_t m, size_t offset, size_t len)
-{
- mbuf_t n;
- size_t i, j;
- size_t pktlen, mlen, maxlen;
- unsigned char *ptr;
-
- pktlen = mbuf_pkthdr_len(m);
-
- if (offset > pktlen)
- return;
-
- maxlen = (pktlen - offset > len) ? len : pktlen;
- n = m;
- mlen = mbuf_len(n);
- ptr = mbuf_data(n);
- for (i = 0, j = 0; i < maxlen; i++, j++) {
- if (j >= mlen) {
- n = mbuf_next(n);
- if (n == 0)
- break;
- ptr = mbuf_data(n);
- mlen = mbuf_len(n);
- j = 0;
- }
- if (i >= offset) {
- printf("%02x%s", ptr[j], i % 2 ? " " : "");
- }
- }
- return;
-}
-
-static void
-printf_ether_header(struct ether_header *eh)
-{
- printf("%02x:%02x:%02x:%02x:%02x:%02x > %02x:%02x:%02x:%02x:%02x:%02x 0x%04x ",
- eh->ether_shost[0], eh->ether_shost[1], eh->ether_shost[2],
- eh->ether_shost[3], eh->ether_shost[4], eh->ether_shost[5],
- eh->ether_dhost[0], eh->ether_dhost[1], eh->ether_dhost[2],
- eh->ether_dhost[3], eh->ether_dhost[4], eh->ether_dhost[5],
- eh->ether_type);
-}
-#endif /* BRIDGE_DEBUG */
-
-/*
- * bridgeattach:
- *
- * Pseudo-device attach routine.
- */
-__private_extern__ int
-bridgeattach(__unused int n)
-{
- int error;
- lck_grp_attr_t *lck_grp_attr = NULL;
-
- bridge_rtnode_pool = zinit(sizeof(struct bridge_rtnode), 1024 * sizeof(struct bridge_rtnode),
- 0, "bridge_rtnode");
-
- lck_grp_attr = lck_grp_attr_alloc_init();
-
- bridge_lock_grp = lck_grp_alloc_init("if_bridge", lck_grp_attr);
-
- bridge_lock_attr = lck_attr_alloc_init();
-
-#if BRIDGE_DEBUG
- lck_attr_setdebug(bridge_lock_attr);
-#endif
-
- bridge_list_lock = lck_rw_alloc_init(bridge_lock_grp, bridge_lock_attr);
-
- // can free the attributes once we've allocated the group lock
- lck_grp_attr_free(lck_grp_attr);
-
- LIST_INIT(&bridge_list);
- error = if_clone_attach(&bridge_cloner);
-
- return error;
-}
-
-#if BRIDGE_DEBUG
-
-static void
-link_print(struct sockaddr_dl * dl_p)
-{
- int i;
-
-#if 1
- printf("sdl len %d index %d family %d type 0x%x nlen %d alen %d"
- " slen %d addr ", dl_p->sdl_len,
- dl_p->sdl_index, dl_p->sdl_family, dl_p->sdl_type,
- dl_p->sdl_nlen, dl_p->sdl_alen, dl_p->sdl_slen);
-#endif
- for (i = 0; i < dl_p->sdl_alen; i++)
- printf("%s%x", i ? ":" : "",
- (CONST_LLADDR(dl_p))[i]);
- printf("\n");
- return;
-}
-#endif /* BRIDGE_DEBUG */
-
-
-/*
- * bridge_clone_create:
- *
- * Create a new bridge instance.
- */
-/* APPLE MODIFICATION <cbz@apple.com> - add opaque <const caddr_t params> argument for cloning. This is done for
- net80211's VAP creation (with the Marvell codebase). I think this could end up being useful
- for other devices, too. This is not in an ifdef because it doesn't hurt anything to have
- this extra param */
-static int
-bridge_clone_create(struct if_clone *ifc, int unit)
-{
- struct bridge_softc *sc = NULL;
- struct ifnet *ifp = NULL;
- u_char eaddr[6];
- uint32_t r;
- struct ifnet_init_params init_params;
- errno_t error = 0;
- uint32_t sdl_buffer[offsetof(struct sockaddr_dl, sdl_data) + IFNAMSIZ + ETHER_ADDR_LEN];
- struct sockaddr_dl *sdl = (struct sockaddr_dl *)sdl_buffer;
-
- sc = _MALLOC(sizeof(*sc), M_DEVBUF, M_WAITOK);
- memset(sc, 0, sizeof(*sc));
-
- sc->sc_brtmax = BRIDGE_RTABLE_MAX;
- /* APPLE MODIFICATION <cbz@apple.com> - add support for Proxy STA */
-#if IEEE80211_PROXYSTA
- sc->sc_brtmax_proxysta = BRIDGE_RTABLE_MAX_PROXYSTA;
-#endif
- sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
- sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
- sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
- sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
- sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
- sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
- sc->sc_filter_flags = IFBF_FILT_DEFAULT;
-#ifndef BRIDGE_IPF
- /*
- * For backwards compatibility with previous behaviour...
- * Switch off filtering on the bridge itself if BRIDGE_IPF is
- * not defined.
- */
- sc->sc_filter_flags &= ~IFBF_FILT_USEIPF;
-#endif
-
- /* Initialize our routing table. */
- error = bridge_rtable_init(sc);
- if (error != 0) {
- printf("bridge_clone_create: bridge_rtable_init failed %d\n", error);
- goto done;
- }
-
- LIST_INIT(&sc->sc_iflist);
-
- sc->sc_mtx = lck_mtx_alloc_init(bridge_lock_grp, bridge_lock_attr);
-
- /* use the interface name as the unique id for ifp recycle */
- snprintf(sc->sc_if_xname, sizeof(sc->sc_if_xname), "%s%d",
- ifc->ifc_name, unit);
- memset(&init_params, 0, sizeof(struct ifnet_init_params));
- init_params.uniqueid = sc->sc_if_xname;
- init_params.uniqueid_len = strlen(sc->sc_if_xname);
- init_params.name = ifc->ifc_name;
- init_params.unit = unit;
- init_params.family = IFNET_FAMILY_ETHERNET;
- init_params.type = IFT_BRIDGE;
- init_params.output = bridge_start;
- init_params.demux = ether_demux;
- init_params.add_proto = ether_add_proto;
- init_params.del_proto = ether_del_proto;
- init_params.check_multi = ether_check_multi;
- init_params.framer = ether_frameout;
- init_params.softc = sc;
- init_params.ioctl = bridge_ioctl;
- init_params.set_bpf_tap = bridge_set_bpf_tap;
- init_params.detach = bridge_detach;
- init_params.broadcast_addr = etherbroadcastaddr;
- init_params.broadcast_len = ETHER_ADDR_LEN;
- error = ifnet_allocate(&init_params, &ifp);
- if (error != 0) {
- printf("bridge_clone_create: ifnet_allocate failed %d\n", error);
- goto done;
- }
- sc->sc_if = ifp;
-
- error = ifnet_set_mtu(ifp, ETHERMTU);
- if (error != 0) {
- printf("bridge_clone_create: ifnet_set_mtu failed %d\n", error);
- goto done;
- }
- error = ifnet_set_addrlen(ifp, ETHER_ADDR_LEN);
- if (error != 0) {
- printf("bridge_clone_create: ifnet_set_addrlen failed %d\n", error);
- goto done;
- }
- error = ifnet_set_baudrate(ifp, 10000000) ; // XXX: this is what IONetworking does
- if (error != 0) {
- printf("bridge_clone_create: ifnet_set_baudrate failed %d\n", error);
- goto done;
- }
- error = ifnet_set_hdrlen(ifp, ETHER_HDR_LEN);
- if (error != 0) {
- printf("bridge_clone_create: ifnet_set_hdrlen failed %d\n", error);
- goto done;
- }
- error = ifnet_set_flags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST,
- 0xffff);
- if (error != 0) {
- printf("bridge_clone_create: ifnet_set_flags failed %d\n", error);
- goto done;
- }
-
- /*
- * Generate a random ethernet address and use the private AC:DE:48
- * OUI code.
- */
- read_random(&r, sizeof(r));
- eaddr[0] = 0xAC;
- eaddr[1] = 0xDE;
- eaddr[2] = 0x48;
- eaddr[3] = (r >> 0) & 0xffu;
- eaddr[4] = (r >> 8) & 0xffu;
- eaddr[5] = (r >> 16) & 0xffu;
-
- memset(sdl, 0, sizeof(sdl_buffer));
- sdl->sdl_family = AF_LINK;
- sdl->sdl_nlen = strlen(sc->sc_if_xname);
- sdl->sdl_alen = ETHER_ADDR_LEN;
- sdl->sdl_len = offsetof(struct sockaddr_dl, sdl_data);
- memcpy(sdl->sdl_data, sc->sc_if_xname, sdl->sdl_nlen);
- memcpy(LLADDR(sdl), eaddr, ETHER_ADDR_LEN);
-
-#if BRIDGE_DEBUG
- link_print(sdl);
-#endif
-
- error = ifnet_attach(ifp, NULL);
- if (error != 0) {
- printf("bridge_clone_create: ifnet_attach failed %d\n", error);
- goto done;
- }
-
- error = ifnet_set_lladdr_and_type(ifp, eaddr, ETHER_ADDR_LEN, IFT_ETHER);
- if (error != 0) {
- printf("bridge_clone_create: ifnet_set_lladdr_and_type failed %d\n", error);
- goto done;
- }
-
-#if APPLE_BRIDGE_HWCKSUM_SUPPORT
- /*
- * APPLE MODIFICATION - our bridge can support HW checksums
- * (useful if underlying interfaces support them) on TX,
- * RX is not that interesting, since the stack just looks to
- * see if the packet has been checksummed already (I think)
- * but we might as well indicate we support it
- */
- ifp->if_capabilities =
- IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx |
- IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx ;
-#endif
-
- lck_rw_lock_exclusive(bridge_list_lock);
- LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
- lck_rw_done(bridge_list_lock);
-
- /* attach as ethernet */
- error = bpf_attach(ifp, DLT_EN10MB, sizeof(struct ether_header), NULL, NULL);
-
-done:
- if (error != 0) {
- printf("bridge_clone_create failed error %d\n", error);
- /* Cleanup TBD */
- }
-
- return error;
-}
-
-/*
- * bridge_clone_destroy:
- *
- * Destroy a bridge instance.
- */
-static void
-bridge_clone_destroy(struct ifnet *ifp)
-{
- struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
- struct bridge_iflist *bif;
- int error;
-
- lck_mtx_lock(sc->sc_mtx);
- if ((sc->sc_flags & SCF_DETACHING)) {
- lck_mtx_unlock(sc->sc_mtx);
- return;
- }
- sc->sc_flags |= SCF_DETACHING;
-
- bridge_stop(ifp, 1);
-
- error = ifnet_set_flags(ifp, 0, IFF_UP);
- if (error != 0) {
- printf("bridge_clone_destroy: ifnet_set_flags failed %d\n", error);
- }
-
- while ((bif = LIST_FIRST(&sc->sc_iflist)) != NULL)
- bridge_delete_member(sc, bif);
-
- lck_mtx_unlock(sc->sc_mtx);
-
- error = ifnet_detach(ifp);
- if (error != 0) {
- printf("bridge_clone_destroy: ifnet_detach failed %d\n", error);
- if ((sc = (struct bridge_softc *)ifnet_softc(ifp)) != NULL) {
- lck_mtx_lock(sc->sc_mtx);
- sc->sc_flags &= ~SCF_DETACHING;
- lck_mtx_unlock(sc->sc_mtx);
- }
- }
-
- return;
-}
-
-#define DRVSPEC \
- if (ifd->ifd_cmd >= bridge_control_table_size) { \
- error = EINVAL; \
- break; \
- } \
- bc = &bridge_control_table[ifd->ifd_cmd]; \
- \
- if ((cmd & IOC_DIRMASK) == IOC_INOUT && \
- (bc->bc_flags & BC_F_COPYOUT) == 0) { \
- error = EINVAL; \
- break; \
- } \
- else if (((cmd & IOC_DIRMASK) == IOC_IN) && \
- (bc->bc_flags & BC_F_COPYOUT) != 0) { \
- error = EINVAL; \
- break; \
- } \
- \
- if (bc->bc_flags & BC_F_SUSER) { \
- error = kauth_authorize_generic(kauth_cred_get(), KAUTH_GENERIC_ISSUSER); \
- if (error) \
- break; \
- } \
- \
- if (ifd->ifd_len != bc->bc_argsize || \
- ifd->ifd_len > sizeof(args)) { \
- error = EINVAL; \
- break; \
- } \
- \
- memset(&args, 0, sizeof(args)); \
- if (bc->bc_flags & BC_F_COPYIN) { \
- error = copyin(ifd->ifd_data, &args, ifd->ifd_len); \
- if (error) \
- break; \
- } \
- \
- lck_mtx_lock(sc->sc_mtx); \
- error = (*bc->bc_func)(sc, &args); \
- lck_mtx_unlock(sc->sc_mtx); \
- if (error) \
- break; \
- \
- if (bc->bc_flags & BC_F_COPYOUT) \
- error = copyout(&args, ifd->ifd_data, ifd->ifd_len)
-
-/*
- * bridge_ioctl:
- *
- * Handle a control request from the operator.
- */
-static errno_t
-bridge_ioctl(ifnet_t ifp, unsigned long cmd, void *data)
-{
- struct bridge_softc *sc = ifnet_softc(ifp);
- struct ifreq *ifr = (struct ifreq *) data;
- int error = 0;
-
- lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_NOTOWNED);
-
-#if BRIDGE_DEBUG
- printf("bridge_ioctl: ifp %p cmd 0x%08lx (%c%c [%lu] %c %lu)\n",
- ifp,
- cmd,
- (cmd & IOC_IN) ? 'I' : ' ',
- (cmd & IOC_OUT) ? 'O' : ' ',
- IOCPARM_LEN(cmd),
- (char)IOCGROUP(cmd),
- cmd & 0xff);
- printf("SIOCGDRVSPEC32 %lx SIOCGDRVSPEC64 %lx\n", SIOCGDRVSPEC32, SIOCGDRVSPEC64);
-#endif
-
- switch (cmd) {
- case SIOCADDMULTI:
- break;
- case SIOCDELMULTI:
- break;
-
- case SIOCSDRVSPEC32:
- case SIOCGDRVSPEC32: {
- union {
- struct ifbreq ifbreq;
- struct ifbifconf32 ifbifconf;
- struct ifbareq32 ifbareq;
- struct ifbaconf32 ifbaconf;
- struct ifbrparam ifbrparam;
- } args;
- struct ifdrv32 *ifd = (struct ifdrv32 *) data;
- const struct bridge_control *bridge_control_table = bridge_control_table32, *bc;
-
- DRVSPEC;
-
- break;
- }
- case SIOCSDRVSPEC64:
- case SIOCGDRVSPEC64: {
- union {
- struct ifbreq ifbreq;
- struct ifbifconf64 ifbifconf;
- struct ifbareq64 ifbareq;
- struct ifbaconf64 ifbaconf;
- struct ifbrparam ifbrparam;
- } args;
- struct ifdrv64 *ifd = (struct ifdrv64 *) data;
- const struct bridge_control *bridge_control_table = bridge_control_table64, *bc;
-
- DRVSPEC;
-
- break;
- }
-
- case SIOCSIFFLAGS:
- if ((ifnet_flags(ifp) & (IFF_UP|IFF_RUNNING)) == IFF_RUNNING) {
- /*
- * If interface is marked down and it is running,
- * then stop and disable it.
- */
- lck_mtx_lock(sc->sc_mtx);
- bridge_stop(ifp, 1);
- lck_mtx_unlock(sc->sc_mtx);
- } else if ((ifnet_flags(ifp) & (IFF_UP|IFF_RUNNING)) == IFF_UP) {
- /*
- * If interface is marked up and it is stopped, then
- * start it.
- */
- lck_mtx_lock(sc->sc_mtx);
- error = bridge_init(ifp);
- lck_mtx_unlock(sc->sc_mtx);
- }
- break;
-
- case SIOCSIFMTU:
-#if 0
- /* APPLE MODIFICATION <cbz@apple.com>
- if we wanted to support changing the MTU */
- {
- struct ifreq *ifr = (struct ifreq *)data;
- struct bridge_iflist *bif;
- struct ifnet *dst_if;
- sc->sc_if.if_mtu = ifr->ifr_mtu;
- LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
- dst_if = bif->bif_ifp;
- error = ifnet_ioctl(dst_if, 0, cmd, data);
- if (error)
- break;
- }
- }
-#else
- /* Do not allow the MTU to be changed on the bridge */
- error = EINVAL;
-#endif
- break;
-
- /* APPLE MODIFICATION - don't pass this down to ether_ioctl, just indicate we don't handle it */
- case SIOCGIFMEDIA:
- error = EINVAL;
- break;
-
- case SIOCSIFLLADDR:
- error = ifnet_set_lladdr(ifp, ifr->ifr_addr.sa_data, ifr->ifr_addr.sa_len);
- if (error != 0)
- printf("bridge_ioctl: ifnet_set_lladdr failed %d\n", error);
- break;
-
- default:
- error = ether_ioctl(ifp, cmd, data);
-#if BRIDGE_DEBUG
- if (error != 0)
- printf("bridge_ioctl: ether_ioctl ifp %p cmd 0x%08lx (%c%c [%lu] %c %lu) failed error: %d\n",
- ifp,
- cmd,
- (cmd & IOC_IN) ? 'I' : ' ',
- (cmd & IOC_OUT) ? 'O' : ' ',
- IOCPARM_LEN(cmd),
- (char) IOCGROUP(cmd),
- cmd & 0xff,
- error);
-#endif /* BRIDGE_DEBUG */
- break;
- }
- lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_NOTOWNED);
-
- return (error);
-}
-
-/*
- * bridge_mutecaps:
- *
- * Clear or restore unwanted capabilities on the member interface
- */
-#if HAS_IF_CAP
-void
-bridge_mutecaps(struct bridge_iflist *bif, int mute)
-{
- struct ifnet *ifp = bif->bif_ifp;
- struct ifcapreq ifcr;
-
- if (ifp->if_ioctl == NULL)
- return;
-
- memset(&ifcr, 0, sizeof(ifcr));
- ifcr.ifcr_capenable = ifp->if_capenable;
-
- if (mute) {
- /* mask off and save capabilities */
- bif->bif_mutecap = ifcr.ifcr_capenable & BRIDGE_IFCAPS_MASK;
- if (bif->bif_mutecap != 0)
- ifcr.ifcr_capenable &= ~BRIDGE_IFCAPS_MASK;
- } else
- /* restore muted capabilities */
- ifcr.ifcr_capenable |= bif->bif_mutecap;
-
- if (bif->bif_mutecap != 0) {
- (void) (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifcr);
- }
-}
-#endif /* HAS_IF_CAP */
-
-/*
- * bridge_lookup_member:
- */
-static struct bridge_iflist *
-bridge_lookup_member(struct bridge_softc *sc, const char *name)
-{
- struct bridge_iflist *bif;
- struct ifnet *ifp;
- char if_xname[IFNAMSIZ];
-
- lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_OWNED);
-
- LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
- ifp = bif->bif_ifp;
- snprintf(if_xname, sizeof(if_xname), "%s%d",
- ifnet_name(ifp), ifnet_unit(ifp));
- if (strncmp(if_xname, name, sizeof(if_xname)) == 0)
- return (bif);
- }
-
- return (NULL);
-}
-
-/*
- * bridge_lookup_member_if:
- */
-static struct bridge_iflist *
-bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
-{
- struct bridge_iflist *bif;
-
- lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_OWNED);
-
- LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
- if (bif->bif_ifp == member_ifp)
- return (bif);
- }
-
- return (NULL);
-}
-
-static errno_t
-bridge_iff_input(void* cookie, ifnet_t ifp, __unused protocol_family_t protocol,
- mbuf_t *data, char **frame_ptr)
-{
- errno_t error = 0;
- struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
- struct bridge_softc *sc = bif->bif_sc;
- int included = 0;
- size_t frmlen = 0;
- mbuf_t m = *data;
-
- if ((m->m_flags & M_PROTO1))
- goto out;
-
- if (*frame_ptr >= (char *)mbuf_datastart(m) && *frame_ptr <= (char *)mbuf_data(m)) {
- included = 1;
- frmlen = (char *)mbuf_data(m) - *frame_ptr;
- }
-#if BRIDGE_DEBUG
- if (_if_brige_debug) {
- printf("bridge_iff_input %s%d from %s%d m %p data %p frame %p %s frmlen %lu\n",
- ifnet_name(sc->sc_if), ifnet_unit(sc->sc_if),
- ifnet_name(ifp), ifnet_unit(ifp),
- m, mbuf_data(m), *frame_ptr, included ? "inside" : "outside", frmlen);
-
- if (_if_brige_debug > 1) {
- printf_mbuf(m, "bridge_iff_input[", "\n");
- printf_ether_header((struct ether_header *)*frame_ptr);
- printf_mbuf_data(m, 0, 20);
- printf("\n");
- }
- }
-#endif /* BRIDGE_DEBUG */
-
- /* Move data pointer to start of frame to the link layer header */
- if (included) {
- (void) mbuf_setdata(m, (char *)mbuf_data(m) - frmlen, mbuf_len(m) + frmlen);
- (void) mbuf_pkthdr_adjustlen(m, frmlen);
- } else {
- printf("bridge_iff_input: frame_ptr outside mbuf\n");
- goto out;
- }
-
- error = bridge_input(bif, ifp, m, *frame_ptr);
-
- /* Adjust packet back to original */
- if (error == 0) {
- (void) mbuf_setdata(m, (char *)mbuf_data(m) + frmlen, mbuf_len(m) - frmlen);
- (void) mbuf_pkthdr_adjustlen(m, -frmlen);
- }
-#if BRIDGE_DEBUG
- if (_if_brige_debug > 1) {
- printf("\n");
- printf_mbuf(m, "bridge_iff_input]", "\n");
- }
-#endif /* BRIDGE_DEBUG */
-
-out:
- lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_NOTOWNED);
-
- return error;
-}
-
-
-#if BRIDGE_MEMBER_OUT_FILTER
-static errno_t
-bridge_iff_output(void *cookie, ifnet_t ifp, __unused protocol_family_t protocol, mbuf_t *data)
-{
- errno_t error = 0;
- struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
- struct bridge_softc *sc = bif->bif_sc;
- mbuf_t m = *data;
-
- if ((m->m_flags & M_PROTO1))
- goto out;
-
-#if BRIDGE_DEBUG
- if (_if_brige_debug) {
- printf("bridge_iff_output %s%d from %s%d m %p data %p\n",
- ifnet_name(sc->sc_if), ifnet_unit(sc->sc_if),
- ifnet_name(ifp), ifnet_unit(ifp),
- m, mbuf_data(m));
- }
-#endif /* BRIDGE_DEBUG */
-
- error = bridge_output(sc, ifp, m);
- if (error != 0) {
- printf("bridge_iff_output: bridge_output failed error %d\n", error);
- }
-
-out:
- lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_NOTOWNED);
-
- return error;
-}
-#endif /* BRIDGE_MEMBER_OUT_FILTER */
-
-
-static void
-bridge_iff_event(void* cookie, ifnet_t ifp, __unused protocol_family_t protocol,
- const struct kev_msg *event_msg)
-{
- struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
-
- if (event_msg->vendor_code == KEV_VENDOR_APPLE &&
- event_msg->kev_class == KEV_NETWORK_CLASS &&
- event_msg->kev_subclass == KEV_DL_SUBCLASS) {
- switch (event_msg->event_code) {
- case KEV_DL_IF_DETACHING:
- bridge_ifdetach(bif, ifp);
- break;
-
- default:
- break;
- }
- }
-}
-
-static void
-bridge_iff_detached(void* cookie, __unused ifnet_t interface)
-{
- struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
-
- _FREE(bif, M_DEVBUF);
-
- return;
-}
-
-/*
- * bridge_delete_member:
- *
- * Delete the specified member interface.
- */
-static void
-bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif)
-{
- struct ifnet *ifs = bif->bif_ifp;
-
- switch (ifnet_type(ifs)) {
- case IFT_ETHER:
- /*
- * Take the interface out of promiscuous mode.
- */
- (void) ifnet_set_promiscuous(ifs, 0);
- break;
-#if NGIF > 0
- case IFT_GIF:
- break;
-#endif
- default:
-#ifdef DIAGNOSTIC
- panic("bridge_delete_member: impossible");
-#endif
- break;
- }
-
- ifs->if_bridge = NULL;
- LIST_REMOVE(bif, bif_next);
-
- /* Respect lock ordering with DLIL lock */
- lck_mtx_unlock(sc->sc_mtx);
- iflt_detach(bif->bif_iff_ref);
- lck_mtx_lock(sc->sc_mtx);
-
- bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
-
- if (ifnet_flags(sc->sc_if) & IFF_RUNNING)
- bstp_initialization(sc);
-
- /* On the last deleted interface revert the MTU */
-
- if (LIST_EMPTY(&sc->sc_iflist))
- (void) ifnet_set_mtu(sc->sc_if, ETHERMTU);
-}
-
-static int
-bridge_ioctl_add(struct bridge_softc *sc, void *arg)
-{
- struct ifbreq *req = arg;
- struct bridge_iflist *bif = NULL;
- struct ifnet *ifs;
- int error = 0;
- /* APPLE MODIFICATION <cbz@apple.com> - is this a proxy sta being added? */
-#if IEEE80211_PROXYSTA
- struct bridge_rtnode *brt;
-#endif
-
- error = ifnet_find_by_name(req->ifbr_ifsname, &ifs);
- if (error || ifs == NULL)
- return (ENOENT);
-
- /* Is the interface already attached to this bridge interface */
- if (ifs->if_bridge == sc)
- return (EEXIST);
-
- if (ifs->if_bridge != NULL)
- return (EBUSY);
-
- /* First added interface resets the MTU */
-
- if (LIST_EMPTY(&sc->sc_iflist))
- (void) ifnet_set_mtu(sc->sc_if, ETHERMTU);
-
- if (ifnet_mtu(sc->sc_if) != ifnet_mtu(ifs))
- return (EINVAL);
-
- bif = _MALLOC(sizeof(*bif), M_DEVBUF, M_WAITOK|M_ZERO);
- if (bif == NULL)
- return (ENOMEM);
-
- bif->bif_ifp = ifs;
- bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
- bif->bif_priority = BSTP_DEFAULT_PORT_PRIORITY;
- bif->bif_path_cost = BSTP_DEFAULT_PATH_COST;
- bif->bif_sc = sc;
-
- switch (ifnet_type(ifs)) {
- case IFT_ETHER:
- /*
- * Place the interface into promiscuous mode.
- */
- error = ifnet_set_promiscuous(ifs, 1);
- if (error)
- goto out;
-#if HAS_IF_CAP
- bridge_mutecaps(bif, 1);
-#endif
- break;
-#if NGIF > 0
- case IFT_GIF:
- break;
-#endif
- default:
- error = EINVAL;
- goto out;
- }
-
- /*
- * If the LINK0 flag is set, and this is the first member interface,
- * attempt to inherit its link-layer address.
- */
- if ((ifnet_flags(sc->sc_if) & IFF_LINK0) && LIST_EMPTY(&sc->sc_iflist) &&
- ifnet_type(ifs) == IFT_ETHER) {
- (void) ifnet_set_lladdr(sc->sc_if, ifnet_lladdr(ifs),
- ETHER_ADDR_LEN);
- }
-
- // install an interface filter
- {
- struct iff_filter iff;
-
- memset(&iff, 0, sizeof(struct iff_filter));
-
- iff.iff_cookie = bif;
- iff.iff_name = "com.apple.kernel.bsd.net.if_bridge";
- iff.iff_input = bridge_iff_input;
-#if BRIDGE_MEMBER_OUT_FILTER
- iff.iff_output = bridge_iff_output;
-#endif /* BRIDGE_MEMBER_OUT_FILTER */
- iff.iff_event = bridge_iff_event;
- iff.iff_detached = bridge_iff_detached;
-
- /* Respect lock ordering with DLIL lock */
- lck_mtx_unlock(sc->sc_mtx);
- error = iflt_attach(ifs, &iff, &bif->bif_iff_ref);
- lck_mtx_lock(sc->sc_mtx);
- if (error != 0) {
- printf("bridge_ioctl_add: iflt_attach failed %d\n", error);
- goto out;
- }
- }
- ifs->if_bridge = sc;
- LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next);
-
-
- if (ifnet_flags(sc->sc_if) & IFF_RUNNING)
- bstp_initialization(sc);
- else
- bstp_stop(sc);
-
- /* APPLE MODIFICATION <cbz@apple.com> - is this a proxy sta being added? */
-#if IEEE80211_PROXYSTA
- brt = bridge_rtnode_lookup(sc, ifnet_lladdr(ifs));
- if (brt) {
-#if DIAGNOSTIC
- printf( "%s: attach %s to bridge as proxysta for %02x:%02x:%02x:%02x:%02x:%02x discovered on %s\n",
- __func__, ifs->if_xname, brt->brt_addr[0], brt->brt_addr[1], brt->brt_addr[2],
- brt->brt_addr[3], brt->brt_addr[4], brt->brt_addr[5], brt->brt_ifp->if_xname );
-#endif
- brt->brt_ifp_proxysta = ifs;
- }
-#endif
-
-
-out:
- if (error) {
- if (bif != NULL)
- _FREE(bif, M_DEVBUF);
- }
- return (error);
-}
-
-static int
-bridge_ioctl_del(struct bridge_softc *sc, void *arg)
-{
- struct ifbreq *req = arg;
- struct bridge_iflist *bif;
-
- bif = bridge_lookup_member(sc, req->ifbr_ifsname);
- if (bif == NULL)
- return (ENOENT);
-
- bridge_delete_member(sc, bif);
-
- return (0);
-}
-
-/* APPLE MODIFICATION <cbz@apple.com> - add support for Proxy STA */
-#if IEEE80211_PROXYSTA
-static int
-bridge_ioctl_purge(struct bridge_softc *sc, void *arg)
-{
- struct ifbreq *req = arg;
- struct bridge_iflist *bif;
- struct ifnet *ifs;
-
- bif = bridge_lookup_member(sc, req->ifbr_ifsname);
- if (bif == NULL)
- return (ENOENT);
-
- ifs = bif->bif_ifp;
- bridge_rtpurge(sc, ifs);
-
- return (0);
-}
-#endif
-
-static int
-bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
-{
- struct ifbreq *req = arg;
- struct bridge_iflist *bif;
-
- bif = bridge_lookup_member(sc, req->ifbr_ifsname);
- if (bif == NULL)
- return (ENOENT);
-
- req->ifbr_ifsflags = bif->bif_flags;
- req->ifbr_state = bif->bif_state;
- req->ifbr_priority = bif->bif_priority;
- req->ifbr_path_cost = bif->bif_path_cost;
- req->ifbr_portno = ifnet_index(bif->bif_ifp) & 0xffff;
-
- return (0);
-}
-
-static int
-bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
-{
- struct ifbreq *req = arg;
- struct bridge_iflist *bif;
-
- bif = bridge_lookup_member(sc, req->ifbr_ifsname);
- if (bif == NULL)
- return (ENOENT);
-
- if (req->ifbr_ifsflags & IFBIF_STP) {
- switch (ifnet_type(bif->bif_ifp)) {
- case IFT_ETHER:
- /* These can do spanning tree. */
- break;
-
- default:
- /* Nothing else can. */
- return (EINVAL);
- }
- }
-
- /* APPLE MODIFICATION <cbz@apple.com> - add support for Proxy STA */
-#if IEEE80211_PROXYSTA
- if ((bif->bif_flags & IFBIF_PROXYSTA_DISCOVER) &&
- ((req->ifbr_ifsflags & IFBIF_PROXYSTA_DISCOVER) == 0))
- bridge_rtpurge(sc, bif->bif_ifp);
-#endif
-
- bif->bif_flags = req->ifbr_ifsflags;
-
- if (ifnet_flags(sc->sc_if) & IFF_RUNNING)
- bstp_initialization(sc);
-
- /* APPLE MODIFICATION <cbz@apple.com> - add support for Proxy STA */
-#if IEEE80211_PROXYSTA
- if (bif->bif_flags & IFBIF_PROXYSTA_DISCOVER)
- bridge_rtdiscovery(sc);
-#endif
-
- return (0);
-}
-
-static int
-bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
-{
- struct ifbrparam *param = arg;
-
- sc->sc_brtmax = param->ifbrp_csize;
- bridge_rttrim(sc);
-
- return (0);
-}
-
-static int
-bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
-{
- struct ifbrparam *param = arg;
-
- param->ifbrp_csize = sc->sc_brtmax;
-
- return (0);
-}
-
-#define BRIDGE_IOCTL_GIFS \
- struct bridge_iflist *bif; \
- struct ifbreq breq; \
- int count, error = 0; \
- uint32_t len; \
- \
- count = 0; \
- LIST_FOREACH(bif, &sc->sc_iflist, bif_next) \
- count++; \
- \
- if (bifc->ifbic_len == 0) { \
- bifc->ifbic_len = sizeof(breq) * count; \
- return (0); \
- } \
- \
- count = 0; \
- len = bifc->ifbic_len; \
- memset(&breq, 0, sizeof breq); \
- LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { \
- if (len < sizeof(breq)) \
- break; \
- \
- snprintf(breq.ifbr_ifsname, sizeof(breq.ifbr_ifsname), "%s%d", \
- ifnet_name(bif->bif_ifp), ifnet_unit(bif->bif_ifp)); \
- breq.ifbr_ifsflags = bif->bif_flags; \
- breq.ifbr_state = bif->bif_state; \
- breq.ifbr_priority = bif->bif_priority; \
- breq.ifbr_path_cost = bif->bif_path_cost; \
- breq.ifbr_portno = ifnet_index(bif->bif_ifp) & 0xffff; \
- error = copyout(&breq, bifc->ifbic_req + count * sizeof(breq), sizeof(breq)); \
- if (error) \
- break; \
- count++; \
- len -= sizeof(breq); \
- } \
- \
- bifc->ifbic_len = sizeof(breq) * count
-
-
-static int
-bridge_ioctl_gifs64(struct bridge_softc *sc, void *arg)
-{
- struct ifbifconf64 *bifc = arg;
-
- BRIDGE_IOCTL_GIFS;
-
- return (error);
-}
-
-static int
-bridge_ioctl_gifs32(struct bridge_softc *sc, void *arg)
-{
- struct ifbifconf32 *bifc = arg;
-
- BRIDGE_IOCTL_GIFS;
-
- return (error);
-}
-
-#define BRIDGE_IOCTL_RTS \
- struct bridge_rtnode *brt; \
- int count = 0, error = 0; \
- uint32_t len; \
- struct timespec now; \
- \
- if (bac->ifbac_len == 0) \
- return (0); \
- \
- len = bac->ifbac_len; \
- LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { \
- if (len < sizeof(bareq)) \
- goto out; \
- memset(&bareq, 0, sizeof(bareq)); \
- snprintf(bareq.ifba_ifsname, sizeof(bareq.ifba_ifsname), "%s%d", \
- ifnet_name(brt->brt_ifp), ifnet_unit(brt->brt_ifp)); \
- memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr)); \
- if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { \
- nanouptime(&now); \
- if (brt->brt_expire >= (unsigned long)now.tv_sec) \
- bareq.ifba_expire = brt->brt_expire - now.tv_sec; \
- else \
- bareq.ifba_expire = 0; \
- } else \
- bareq.ifba_expire = 0; \
- bareq.ifba_flags = brt->brt_flags; \
- \
- error = copyout(&bareq, bac->ifbac_req + count * sizeof(bareq), sizeof(bareq)); \
- if (error) \
- goto out; \
- count++; \
- len -= sizeof(bareq); \
- } \
-out: \
- bac->ifbac_len = sizeof(bareq) * count
-
-
-static int
-bridge_ioctl_rts64(struct bridge_softc *sc, void *arg)
-{
- struct ifbaconf64 *bac = arg;
- struct ifbareq64 bareq;
-
- BRIDGE_IOCTL_RTS;
-
- return (error);
-}
-
-static int
-bridge_ioctl_rts32(struct bridge_softc *sc, void *arg)
-{
- struct ifbaconf32 *bac = arg;
- struct ifbareq32 bareq;
-
- BRIDGE_IOCTL_RTS;
-
- return (error);
-}
-
-static int
-bridge_ioctl_saddr64(struct bridge_softc *sc, void *arg)
-{
- struct ifbareq64 *req = arg;
- struct bridge_iflist *bif;
- int error;
-
- bif = bridge_lookup_member(sc, req->ifba_ifsname);
- if (bif == NULL)
- return (ENOENT);
-
- error = bridge_rtupdate(sc, req->ifba_dst, bif->bif_ifp, 1,
- req->ifba_flags);
-
- return (error);
-}
-
-static int
-bridge_ioctl_saddr32(struct bridge_softc *sc, void *arg)
-{
- struct ifbareq32 *req = arg;
- struct bridge_iflist *bif;
- int error;
-
- bif = bridge_lookup_member(sc, req->ifba_ifsname);
- if (bif == NULL)
- return (ENOENT);
-
- error = bridge_rtupdate(sc, req->ifba_dst, bif->bif_ifp, 1,
- req->ifba_flags);
-
- return (error);
-}
-
-static int
-bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
-{
- struct ifbrparam *param = arg;
-
- sc->sc_brttimeout = param->ifbrp_ctime;
-
- return (0);
-}
-
-static int
-bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
-{
- struct ifbrparam *param = arg;
-
- param->ifbrp_ctime = sc->sc_brttimeout;
-
- return (0);
-}
-
-static int
-bridge_ioctl_daddr64(struct bridge_softc *sc, void *arg)
-{
- struct ifbareq64 *req = arg;
-
- return (bridge_rtdaddr(sc, req->ifba_dst));
-}
-
-static int
-bridge_ioctl_daddr32(struct bridge_softc *sc, void *arg)
-{
- struct ifbareq32 *req = arg;
-
- return (bridge_rtdaddr(sc, req->ifba_dst));
-}
-
-static int
-bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
-{
- struct ifbreq *req = arg;
-
- bridge_rtflush(sc, req->ifbr_ifsflags);
-
- return (0);
-}
-
-static int
-bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
-{
- struct ifbrparam *param = arg;
-
- param->ifbrp_prio = sc->sc_bridge_priority;
-
- return (0);
-}
-
-static int
-bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
-{
- struct ifbrparam *param = arg;
-
- sc->sc_bridge_priority = param->ifbrp_prio;
-
- if (ifnet_flags(sc->sc_if) & IFF_RUNNING)
- bstp_initialization(sc);
-
- return (0);
-}
-
-static int
-bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
-{
- struct ifbrparam *param = arg;
-
- param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
-
- return (0);
-}
-
-static int
-bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
-{
- struct ifbrparam *param = arg;
-
- if (param->ifbrp_hellotime == 0)
- return (EINVAL);
- sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
-
- if (ifnet_flags(sc->sc_if) & IFF_RUNNING)
- bstp_initialization(sc);
-
- return (0);
-}
-
-static int
-bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
-{
- struct ifbrparam *param = arg;
-
- param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
-
- return (0);
-}
-
-static int
-bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
-{
- struct ifbrparam *param = arg;
-
- if (param->ifbrp_fwddelay == 0)
- return (EINVAL);
- sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
-
- if (ifnet_flags(sc->sc_if) & IFF_RUNNING)
- bstp_initialization(sc);
-
- return (0);
-}
-
-static int
-bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
-{
- struct ifbrparam *param = arg;
-
- param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
-
- return (0);
-}
-
-static int
-bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
-{
- struct ifbrparam *param = arg;
-
- if (param->ifbrp_maxage == 0)
- return (EINVAL);
- sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
-
- if (ifnet_flags(sc->sc_if) & IFF_RUNNING)
- bstp_initialization(sc);
-
- return (0);
-}
-
-static int
-bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
-{
- struct ifbreq *req = arg;
- struct bridge_iflist *bif;
-
- bif = bridge_lookup_member(sc, req->ifbr_ifsname);
- if (bif == NULL)
- return (ENOENT);
-
- bif->bif_priority = req->ifbr_priority;
-
- if (ifnet_flags(sc->sc_if) & IFF_RUNNING)
- bstp_initialization(sc);
-
- return (0);
-}
-
-/* APPLE MODIFICATION <cbz@apple.com> - add support for Proxy STA */
-#if IEEE80211_PROXYSTA
-static void
-bridge_proxysta_notify_macaddr(struct ifnet *ifp, int op, const uint8_t *mac)
-{
- struct proxy_sta_event iev;
-
- memset(&iev, 0, sizeof(iev));
- memcpy(iev.iev_addr, mac, ETHER_ADDR_LEN);
-
- rt_proxystamsg(ifp, op, &iev, sizeof(iev));
-}
-
-static void
-bridge_proxysta_discover(struct ifnet *ifp, const uint8_t *mac)
-{
- bridge_proxysta_notify_macaddr( ifp, RTM_PROXYSTA_DISCOVERY, mac );
-}
-
-static void
-bridge_proxysta_idle_timeout(struct ifnet *ifp, const uint8_t *mac)
-{
- bridge_proxysta_notify_macaddr( ifp, RTM_PROXYSTA_IDLE_TIMEOUT, mac );
-}
-#endif
-
-static int
-bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
-{
- struct ifbreq *req = arg;
- struct bridge_iflist *bif;
-
- bif = bridge_lookup_member(sc, req->ifbr_ifsname);
- if (bif == NULL)
- return (ENOENT);
-
- bif->bif_path_cost = req->ifbr_path_cost;
-
- if (ifnet_flags(sc->sc_if) & IFF_RUNNING)
- bstp_initialization(sc);
-
- return (0);
-}
-
-/*
- * bridge_ifdetach:
- *
- * Detach an interface from a bridge. Called when a member
- * interface is detaching.
- */
-static void
-bridge_ifdetach(struct bridge_iflist *bif, struct ifnet *ifp)
-{
- struct bridge_softc *sc = bif->bif_sc;
- struct ifbreq breq;
-
- memset(&breq, 0, sizeof(breq));
- snprintf(breq.ifbr_ifsname, sizeof(breq.ifbr_ifsname), "%s%d",
- ifnet_name(ifp), ifnet_unit(ifp));
-
- lck_mtx_lock(sc->sc_mtx);
-
- (void) bridge_ioctl_del(sc, &breq);
-
- lck_mtx_unlock(sc->sc_mtx);
-}
-
-/*
- * bridge_init:
- *
- * Initialize a bridge interface.
- */
-static int
-bridge_init(struct ifnet *ifp)
-{
- struct bridge_softc *sc = ifnet_softc(ifp);
- struct timespec ts;
- errno_t error;
-
- if (ifnet_flags(ifp) & IFF_RUNNING)
- return (0);
-
- ts.tv_sec = bridge_rtable_prune_period;
- ts.tv_nsec = 0;
- bsd_timeout(bridge_timer, sc, &ts);
-
- error = ifnet_set_flags(ifp, IFF_RUNNING, IFF_RUNNING);
- if (error == 0)
- bstp_initialization(sc);
-
- return error;
-}
-
-/*
- * bridge_stop:
- *
- * Stop the bridge interface.
- */
-static void
-bridge_stop(struct ifnet *ifp, __unused int disable)
-{
- struct bridge_softc *sc = ifnet_softc(ifp);
-
- if ((ifnet_flags(ifp) & IFF_RUNNING) == 0)
- return;
-
- bsd_untimeout(bridge_timer, sc);
- bstp_stop(sc);
-
- bridge_rtflush(sc, IFBF_FLUSHDYN);
-
- (void) ifnet_set_flags(ifp, 0, IFF_RUNNING);
-}
-
-/*
- * bridge_enqueue:
- *
- * Enqueue a packet on a bridge member interface.
- *
- * Note: this is called both on the input and output path so this routine
- * cannot simply muck with the HW checksum flag. For the time being we
- * rely on the caller to do the right thing.
- */
-__private_extern__ void
-bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m)
-{
- int len, error;
-
- lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_OWNED);
-
-#if BRIDGE_DEBUG
- if (_if_brige_debug)
- printf("bridge_enqueue sc %s%d to dst_ifp %s%d m %p\n",
- ifnet_name(sc->sc_if), ifnet_unit(sc->sc_if),
- ifnet_name(dst_ifp), ifnet_unit(dst_ifp), m);
-#endif /* BRIDGE_DEBUG */
-
- len = m->m_pkthdr.len;
- m->m_flags |= M_PROTO1; //set to avoid loops
-
- error = ifnet_output_raw(dst_ifp, 0, m);
- if (error == 0) {
- (void) ifnet_stat_increment_out(sc->sc_if, 1, len, 0);
- } else {
- (void) ifnet_stat_increment_out(sc->sc_if, 0, 0, 1);
- }
-
- return;
-}
-
-
-#if BRIDGE_MEMBER_OUT_FILTER
-
-/*
- * bridge_output:
- *
- * Send output from a bridge member interface. This
- * performs the bridging function for locally originated
- * packets.
- *
- * The mbuf has the Ethernet header already attached. We must
- * enqueue or free the mbuf before returning.
- */
-static int
-bridge_output(struct bridge_softc *sc, ifnet_t ifp, mbuf_t m)
-{
- struct ether_header *eh;
- struct ifnet *dst_if;
-
-#if BRIDGE_DEBUG
- if (_if_brige_debug)
- printf("bridge_output ifp %p %s%d\n", ifp, ifnet_name(ifp), ifnet_unit(ifp));
-#endif /* BRIDGE_DEBUG */
-
- if (m->m_len < ETHER_HDR_LEN) {
- m = m_pullup(m, ETHER_HDR_LEN);
- if (m == NULL) {
- printf("bridge_output ifp %p m_pullup failed\n", ifp);
- return EJUSTRETURN;
- }
- }
-
- eh = mtod(m, struct ether_header *);
-
- /* APPLE MODIFICATION <jhw@apple.com>
- * If the packet is an 802.1X ethertype, then only send on the
- * original output interface.
- */
- if (eh->ether_type == htons(ETHERTYPE_PAE)) {
- dst_if = ifp;
- goto sendunicast;
- }
-
- /*
- * If bridge is down, but the original output interface is up,
- * go ahead and send out that interface. Otherwise, the packet
- * is dropped below.
- */
- if ((ifnet_flags(sc->sc_if) & IFF_RUNNING) == 0) {
- dst_if = ifp;
- goto sendunicast;
- }
-
- lck_mtx_lock(sc->sc_mtx);
-
- /*
- * If the packet is a multicast, or we don't know a better way to
- * get there, send to all interfaces.
- */
- if (ETHER_IS_MULTICAST(eh->ether_dhost))
- dst_if = NULL;
- else
- dst_if = bridge_rtlookup(sc, eh->ether_dhost);
- if (dst_if == NULL) {
- struct bridge_iflist *bif;
- struct mbuf *mc;
- int used = 0;
-
- LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
- dst_if = bif->bif_ifp;
- if ((ifnet_flags(dst_if) & IFF_RUNNING) == 0)
- continue;
-
- /*
- * If this is not the original output interface,
- * and the interface is participating in spanning
- * tree, make sure the port is in a state that
- * allows forwarding.
- */
- if (dst_if != ifp &&
- (bif->bif_flags & IFBIF_STP) != 0) {
- switch (bif->bif_state) {
- case BSTP_IFSTATE_BLOCKING:
- case BSTP_IFSTATE_LISTENING:
- case BSTP_IFSTATE_DISABLED:
- continue;
- }
- }
-
- if (LIST_NEXT(bif, bif_next) == NULL) {
- used = 1;
- mc = m;
- } else {
- mc = m_copym(m, 0, M_COPYALL, M_NOWAIT);
- if (mc == NULL) {
- printf("bridge_output ifp %p m_copym failed\n", ifp);
- (void) ifnet_stat_increment_out(sc->sc_if, 0, 0, 1);
- continue;
- }
- }
-
- bridge_enqueue(sc, dst_if, mc);
- }
- if (used == 0) {
- printf("bridge_output ifp %p not used\n", ifp);
- m_freem(m);
- }
- lck_mtx_unlock(sc->sc_mtx);
-
- return EJUSTRETURN;
- }
-
-sendunicast:
- /*
- * XXX Spanning tree consideration here?
- */
-
- if ((ifnet_flags(dst_if) & IFF_RUNNING) == 0) {
- printf("bridge_output ifp %p dst_if %p not running\n", ifp, dst_if);
- m_freem(m);
-
- return EJUSTRETURN;
- }
-
- if (dst_if != ifp) {
- lck_mtx_lock(sc->sc_mtx);
-
- bridge_enqueue(sc, dst_if, m);
-
- lck_mtx_unlock(sc->sc_mtx);
-
- return EJUSTRETURN;
- }
-
- return (0);
-}
-#endif /* BRIDGE_MEMBER_OUT_FILTER */
-
-#if APPLE_BRIDGE_HWCKSUM_SUPPORT
-static struct mbuf* bridge_fix_txcsum( struct mbuf *m )
-{
- // basic tests indicate that the vast majority of packets being processed
- // here have an Ethernet header mbuf pre-pended to them (the first case below)
- // the second highest are those where the Ethernet and IP/TCP/UDP headers are
- // all in one mbuf (second case below)
- // the third case has, in fact, never hit for me -- although if I comment out
- // the first two cases, that code works for them, so I consider it a
- // decent general solution
-
- int amt = ETHER_HDR_LEN;
- int hlen = M_CSUM_DATA_IPv4_IPHL( m->m_pkthdr.csum_data );
- int off = M_CSUM_DATA_IPv4_OFFSET( m->m_pkthdr.csum_data );
-
- /*
- * NOTE we should never get vlan-attached packets here;
- * support for those COULD be added, but we don't use them
- * and it really kinda slows things down to worry about them
- */
-
-#ifdef DIAGNOSTIC
- if ( m_tag_find( m, PACKET_TAG_VLAN, NULL ) != NULL )
- {
- printf( "bridge: transmitting packet tagged with VLAN?\n" );
- KASSERT( 0 );
- m_freem( m );
- return NULL;
- }
-#endif
-
- if ( m->m_pkthdr.csum_flags & M_CSUM_IPv4 )
- {
- amt += hlen;
- }
- if ( m->m_pkthdr.csum_flags & M_CSUM_TCPv4 )
- {
- amt += off + sizeof( uint16_t );
- }
-
- if ( m->m_pkthdr.csum_flags & M_CSUM_UDPv4 )
- {
- amt += off + sizeof( uint16_t );
- }
-
- if ( m->m_len == ETHER_HDR_LEN )
- {
- // this is the case where there's an Ethernet header in an mbuf
-
- // the first mbuf is the Ethernet header -- just strip it off and do the checksum
- struct mbuf *m_ip = m->m_next;
-
- // set up m_ip so the cksum operations work
- /* APPLE MODIFICATION 22 Apr 2008 <mvega@apple.com>
- * <rdar://5817385> Clear the m_tag list before setting
- * M_PKTHDR.
- *
- * If this m_buf chain was extended via M_PREPEND(), then
- * m_ip->m_pkthdr is identical to m->m_pkthdr (see
- * M_MOVE_PKTHDR()). The only thing preventing access to this
- * invalid packet header data is the fact that the M_PKTHDR
- * flag is clear, i.e., m_ip->m_flag & M_PKTHDR == 0, but we're
- * about to set the M_PKTHDR flag, so to be safe we initialize,
- * more accurately, we clear, m_ip->m_pkthdr.tags via
- * m_tag_init().
- *
- * Suppose that we do not do this; if m_pullup(), below, fails,
- * then m_ip will be freed along with m_ip->m_pkthdr.tags, but
- * we will also free m soon after, via m_freem(), and
- * consequently attempt to free m->m_pkthdr.tags in the
- * process. The problem is that m->m_pkthdr.tags will have
- * already been freed by virtue of being equal to
- * m_ip->m_pkthdr.tags. Attempts to dereference
- * m->m_pkthdr.tags in m_tag_delete_chain() will result in a
- * panic.
- */
- m_tag_init(m_ip);
- /* END MODIFICATION */
- m_ip->m_flags |= M_PKTHDR;
- m_ip->m_pkthdr.csum_flags = m->m_pkthdr.csum_flags;
- m_ip->m_pkthdr.csum_data = m->m_pkthdr.csum_data;
- m_ip->m_pkthdr.len = m->m_pkthdr.len - ETHER_HDR_LEN;
-
- // set up the header mbuf so we can prepend it back on again later
- m->m_pkthdr.csum_flags = 0;
- m->m_pkthdr.csum_data = 0;
- m->m_pkthdr.len = ETHER_HDR_LEN;
- m->m_next = NULL;
-
-
- // now do the checksums we need -- first IP
- if ( m_ip->m_pkthdr.csum_flags & M_CSUM_IPv4 )
- {
- // make sure the IP header (or at least the part with the cksum) is there
- m_ip = m_pullup( m_ip, sizeof( struct ip ) );
- if ( m_ip == NULL )
- {
- printf( "bridge: failed to flatten header\n ");
- m_freem( m );
- return NULL;
- }
-
- // now do the checksum
- {
- struct ip *ip = mtod( m_ip, struct ip* );
- ip->ip_sum = in_cksum( m_ip, hlen );
-
-#ifdef VERY_VERY_VERY_DIAGNOSTIC
- printf( "bridge: performed IPv4 checksum\n" );
-#endif
- }
- }
-
- // now do a TCP or UDP delayed checksum
- if ( m_ip->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4) )
- {
- in_delayed_cksum( m_ip );
-
-#ifdef VERY_VERY_VERY_DIAGNOSTIC
- printf( "bridge: performed TCPv4/UDPv4 checksum\n" );
-#endif
- }
-
- // now attach the ethernet header back onto the IP packet
- m->m_next = m_ip;
- m->m_pkthdr.len += m_length( m_ip );
-
- // clear the M_PKTHDR flags on the ip packet (again, we re-attach later)
- m_ip->m_flags &= ~M_PKTHDR;
-
- // and clear any csum flags
- m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_IPv4);
- }
- else if ( m->m_len >= amt )
- {
- // everything fits in the first mbuf, so futz with m->m_data, m->m_len and m->m_pkthdr.len to
- // make it work
- m->m_len -= ETHER_HDR_LEN;
- m->m_data += ETHER_HDR_LEN;
- m->m_pkthdr.len -= ETHER_HDR_LEN;
-
- // now do the checksums we need -- first IP
- if ( m->m_pkthdr.csum_flags & M_CSUM_IPv4 )
- {
- struct ip *ip = mtod( m, struct ip* );
- ip->ip_sum = in_cksum( m, hlen );
-
-#ifdef VERY_VERY_VERY_DIAGNOSTIC
- printf( "bridge: performed IPv4 checksum\n" );
-#endif
- }
-
- // now do a TCP or UDP delayed checksum
- if ( m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4) )
- {
- in_delayed_cksum( m );
-
-#ifdef VERY_VERY_VERY_DIAGNOSTIC
- printf( "bridge: performed TCPv4/UDPv4 checksum\n" );
-#endif
- }
-
- // now stick the ethernet header back on
- m->m_len += ETHER_HDR_LEN;
- m->m_data -= ETHER_HDR_LEN;
- m->m_pkthdr.len += ETHER_HDR_LEN;
-
- // and clear any csum flags
- m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_IPv4);
- }
- else
- {
- struct mbuf *m_ip;
-
- // general case -- need to simply split it off and deal
-
- // first, calculate how much needs to be made writable (we may have a read-only mbuf here)
- hlen = M_CSUM_DATA_IPv4_IPHL( m->m_pkthdr.csum_data );
-#if PARANOID
- off = M_CSUM_DATA_IPv4_OFFSET( m->m_pkthdr.csum_data );
-
- if ( m->m_pkthdr.csum_flags & M_CSUM_IPv4 )
- {
- amt += hlen;
- }
-
- if ( m->m_pkthdr.csum_flags & M_CSUM_TCPv4 )
- {
- amt += sizeof( struct tcphdr * );
- amt += off;
- }
-
- if ( m->m_pkthdr.csum_flags & M_CSUM_UDPv4 )
- {
- amt += sizeof( struct udphdr * );
- amt += off;
- }
-#endif
-
- // now split the ethernet header off of the IP packet (we'll re-attach later)
- m_ip = m_split( m, ETHER_HDR_LEN, M_NOWAIT );
- if ( m_ip == NULL )
- {
- printf( "bridge_fix_txcsum: could not split ether header\n" );
-
- m_freem( m );
- return NULL;
- }
-
-#if PARANOID
- // make sure that the IP packet is writable for the portion we need
- if ( m_makewritable( &m_ip, 0, amt, M_DONTWAIT ) != 0 )
- {
- printf( "bridge_fix_txcsum: could not make %d bytes writable\n", amt );
-
- m_freem( m );
- m_freem( m_ip );
- return NULL;
- }
-#endif
-
- m_ip->m_pkthdr.csum_flags = m->m_pkthdr.csum_flags;
- m_ip->m_pkthdr.csum_data = m->m_pkthdr.csum_data;
-
- m->m_pkthdr.csum_flags = 0;
- m->m_pkthdr.csum_data = 0;
-
- // now do the checksums we need -- first IP
- if ( m_ip->m_pkthdr.csum_flags & M_CSUM_IPv4 )
- {
- // make sure the IP header (or at least the part with the cksum) is there
- m_ip = m_pullup( m_ip, sizeof( struct ip ) );
- if ( m_ip == NULL )
- {
- printf( "bridge: failed to flatten header\n ");
- m_freem( m );
- return NULL;
- }
-
- // now do the checksum
- {
- struct ip *ip = mtod( m_ip, struct ip* );
- ip->ip_sum = in_cksum( m_ip, hlen );
-
-#ifdef VERY_VERY_VERY_DIAGNOSTIC
- printf( "bridge: performed IPv4 checksum\n" );
-#endif
- }
- }
-
- // now do a TCP or UDP delayed checksum
- if ( m_ip->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4) )
- {
- in_delayed_cksum( m_ip );
-
-#ifdef VERY_VERY_VERY_DIAGNOSTIC
- printf( "bridge: performed TCPv4/UDPv4 checksum\n" );
-#endif
- }
-
- // now attach the ethernet header back onto the IP packet
- m->m_next = m_ip;
- m->m_pkthdr.len += m_length( m_ip );
-
- // clear the M_PKTHDR flags on the ip packet (again, we re-attach later)
- m_ip->m_flags &= ~M_PKTHDR;
-
- // and clear any csum flags
- m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_IPv4);
- }
-
- return m;
-}
-#endif
-
-/*
- * bridge_start:
- *
- * Start output on a bridge.
- */
-static errno_t
-bridge_start(ifnet_t ifp, mbuf_t m)
-{
- struct bridge_softc *sc = ifnet_softc(ifp);
- struct ether_header *eh;
- struct ifnet *dst_if;
-
- lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_NOTOWNED);
-
- eh = mtod(m, struct ether_header *);
-
- if ((m->m_flags & (M_BCAST|M_MCAST)) == 0 &&
- (dst_if = bridge_rtlookup(sc, eh->ether_dhost)) != NULL) {
-
- {
-#if APPLE_BRIDGE_HWCKSUM_SUPPORT
- /*
- * APPLE MODIFICATION - if the packet needs a checksum (i.e.,
- * checksum has been deferred for HW support) AND the destination
- * interface doesn't support HW checksums, then we
- * need to fix-up the checksum here
- */
- if (
- ( (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_IPv4) ) != 0 ) &&
- ( (dst_if->if_csum_flags_tx & m->m_pkthdr.csum_flags ) != m->m_pkthdr.csum_flags )
- )
- {
- m = bridge_fix_txcsum( m );
- if ( m == NULL )
- {
- goto done;
- }
- }
-
-#else
- if (eh->ether_type == htons(ETHERTYPE_IP))
- mbuf_outbound_finalize(m, PF_INET, sizeof(struct ether_header));
- else
- m->m_pkthdr.csum_flags = 0;
-#endif
- lck_mtx_lock(sc->sc_mtx);
- #if NBPFILTER > 0
- if (sc->sc_bpf_output)
- bridge_bpf_output(ifp, m);
- #endif
- bridge_enqueue(sc, dst_if, m);
- lck_mtx_unlock(sc->sc_mtx);
- }
- } else
- {
-#if APPLE_BRIDGE_HWCKSUM_SUPPORT
-
- /*
- * APPLE MODIFICATION - if the MULTICAST packet needs a checksum (i.e.,
- * checksum has been deferred for HW support) AND at least one destination
- * interface doesn't support HW checksums, then we go ahead and fix it up
- * here, since it doesn't make sense to do it more than once
- */
-
- if (
- (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_IPv4)) &&
- /*
- * XXX FIX ME: keep track of whether or not we have any interfaces that
- * do not support checksums (for now, assume we do)
- */
- ( 1 )
- )
- {
- m = bridge_fix_txcsum( m );
- if ( m == NULL )
- {
- goto done;
- }
- }
-#else
- if (eh->ether_type == htons(ETHERTYPE_IP))
- mbuf_outbound_finalize(m, PF_INET, sizeof(struct ether_header));
- else
- m->m_pkthdr.csum_flags = 0;
-#endif
-
- lck_mtx_lock(sc->sc_mtx);
- #if NBPFILTER > 0
- if (sc->sc_bpf_output)
- bridge_bpf_output(ifp, m);
- #endif
- bridge_broadcast(sc, ifp, m, 0);
- lck_mtx_unlock(sc->sc_mtx);
- }
-#if APPLE_BRIDGE_HWCKSUM_SUPPORT
-done:
-#endif
-
- return 0;
-}
-
-/*
- * bridge_forward:
- *
- * The forwarding function of the bridge.
- */
-static void
-bridge_forward(struct bridge_softc *sc, struct mbuf *m)
-{
- struct bridge_iflist *bif;
- struct ifnet *src_if, *dst_if;
- struct ether_header *eh;
-
- lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_OWNED);
-
-#if BRIDGE_DEBUG
- if (_if_brige_debug)
- printf("bridge_forward %s%d m%p\n", ifnet_name(sc->sc_if), ifnet_unit(sc->sc_if), m);
-#endif /* BRIDGE_DEBUG */
-
- src_if = m->m_pkthdr.rcvif;
-
- (void) ifnet_stat_increment_in(sc->sc_if, 1, m->m_pkthdr.len, 0);
-
- /*
- * Look up the bridge_iflist.
- */
- bif = bridge_lookup_member_if(sc, src_if);
- if (bif == NULL) {
- /* Interface is not a bridge member (anymore?) */
- m_freem(m);
- return;
- }
-
- /* APPLE MODIFICATION <cbz@apple.com> - add the ability to block forwarding of packets; for the guest network */
-#if ( APPLE_HAVE_80211_GUEST_NETWORK )
- if (bif->bif_flags & IFBIF_NO_FORWARDING) {
- /* Drop the packet and we're done. */
- m_freem(m);
- return;
- }
-#endif
-
- if (bif->bif_flags & IFBIF_STP) {
- switch (bif->bif_state) {
- case BSTP_IFSTATE_BLOCKING:
- case BSTP_IFSTATE_LISTENING:
- case BSTP_IFSTATE_DISABLED:
- m_freem(m);
- return;
- }
- }
-
- eh = mtod(m, struct ether_header *);
-
- /*
- * If the interface is learning, and the source
- * address is valid and not multicast, record
- * the address.
- */
- if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
- ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
- (eh->ether_shost[0] | eh->ether_shost[1] |
- eh->ether_shost[2] | eh->ether_shost[3] |
- eh->ether_shost[4] | eh->ether_shost[5]) != 0) {
- (void) bridge_rtupdate(sc, eh->ether_shost,
- src_if, 0, IFBAF_DYNAMIC);
- }
-
- if ((bif->bif_flags & IFBIF_STP) != 0 &&
- bif->bif_state == BSTP_IFSTATE_LEARNING) {
- m_freem(m);
- return;
- }
-
- /*
- * At this point, the port either doesn't participate
- * in spanning tree or it is in the forwarding state.
- */
-
- /*
- * If the packet is unicast, destined for someone on
- * "this" side of the bridge, drop it.
- */
- if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
- /* APPLE MODIFICATION <cbz@apple.com> - if the packet came in on a proxy sta discovery interface,
- we need to not look up the node by DA of the packet; we need to look up the proxy sta which
- matches the SA. If it's not found yet, drop the packet. */
-#if IEEE80211_PROXYSTA
- if (bif->bif_flags & IFBIF_PROXYSTA_DISCOVER)
- {
- struct bridge_rtnode *brt;
- dst_if = NULL;
- brt = bridge_rtnode_lookup(sc, eh->ether_shost);
- if (brt) {
- dst_if = brt->brt_ifp_proxysta;
- }
- if (dst_if == NULL) {
- m_freem(m);
- return;
- }
- }
- else
-#endif
- dst_if = bridge_rtlookup(sc, eh->ether_dhost);
- if (src_if == dst_if) {
- m_freem(m);
- return;
- }
- } else {
- /* ...forward it to all interfaces. */
- sc->sc_if->if_imcasts++;
- dst_if = NULL;
- }
-
- /* APPLE MODIFICATION
- <rnewberry@apple.com> - this is now handled by bridge_input
- <cbz@apple.com> - turning this back on because all packets are not bpf_mtap'd
- equally. RSN Preauth were not getting through; we're
- conditionalizing this call on
- (eh->ether_type == htons(ETHERTYPE_RSN_PREAUTH))
- */
-#if 1
- if (eh->ether_type == htons(ETHERTYPE_RSN_PREAUTH))
- {
- m->m_pkthdr.rcvif = sc->sc_if;
-#if NBPFILTER > 0
- if (sc->sc_bpf_input)
- bridge_bpf_input(sc->sc_if, m);
-#endif
- }
-#endif
-
- if (dst_if == NULL) {
-
-#if APPLE_BRIDGE_HWCKSUM_SUPPORT
- /*
- * Clear any in-bound checksum flags for this packet.
- */
- m->m_pkthdr.csum_flags = 0;
-#else
- mbuf_inbound_modified(m);
-#endif
-
- bridge_broadcast(sc, src_if, m, 1);
- return;
- }
-
- /*
- * At this point, we're dealing with a unicast frame
- * going to a different interface.
- */
- if ((ifnet_flags(dst_if) & IFF_RUNNING) == 0) {
- m_freem(m);
- return;
- }
- bif = bridge_lookup_member_if(sc, dst_if);
- if (bif == NULL) {
- /* Not a member of the bridge (anymore?) */
- m_freem(m);
- return;
- }
-
- if (bif->bif_flags & IFBIF_STP) {
- switch (bif->bif_state) {
- case BSTP_IFSTATE_DISABLED:
- case BSTP_IFSTATE_BLOCKING:
- m_freem(m);
- return;
- }
- }
-
-#if APPLE_BRIDGE_HWCKSUM_SUPPORT
- /*
- * Clear any in-bound checksum flags for this packet.
- */
- {
- m->m_pkthdr.csum_flags = 0;
- }
-#else
- mbuf_inbound_modified(m);
-#endif
-
- bridge_enqueue(sc, dst_if, m);
-}
-
-char * ether_ntop(char *, size_t , const u_char *);
-
-__private_extern__ char *
-ether_ntop(char *buf, size_t len, const u_char *ap)
-{
- snprintf(buf, len, "%02x:%02x:%02x:%02x:%02x:%02x",
- ap[0], ap[1], ap[2], ap[3], ap[4], ap[5]);
-
- return buf;
-}
-
-/*
- * bridge_input:
- *
- * Receive input from a member interface. Queue the packet for
- * bridging if it is not for us.
- */
-errno_t
-bridge_input(struct bridge_iflist *bif, struct ifnet *ifp, struct mbuf *m, void *frame_header)
-{
- struct ifnet *bifp;
- struct ether_header *eh;
- struct mbuf *mc;
- int is_for_us = 0;
- struct bridge_softc *sc = bif->bif_sc;
- struct bridge_iflist *brm;
-
-#if BRIDGE_DEBUG
- if (_if_brige_debug)
- printf("bridge_input: %s%d from %s%d m %p data %p\n",
- ifnet_name(sc->sc_if), ifnet_unit(sc->sc_if),
- ifnet_name(ifp), ifnet_unit(ifp),
- m, mbuf_data(m));
-#endif /* BRIDGE_DEBUG */
-
- if ((ifnet_flags(sc->sc_if) & IFF_RUNNING) == 0) {
-#if BRIDGE_DEBUG
- if (_if_brige_debug)
- printf( "bridge_input: %s%d not running passing along\n",
- ifnet_name(sc->sc_if), ifnet_unit(sc->sc_if));
-#endif /* BRIDGE_DEBUG */
- return 0;
- }
-
- /* Need to clear the promiscous flags otherwise it will be dropped by DLIL after processing filters */
- if ((mbuf_flags(m) & MBUF_PROMISC))
- mbuf_setflags_mask(m, 0, MBUF_PROMISC);
-
- lck_mtx_lock(sc->sc_mtx);
-
- bifp = sc->sc_if;
-
- /* Is it a good idea to reassign a new value to bif ? TBD */
- bif = bridge_lookup_member_if(sc, ifp);
- if (bif == NULL) {
- lck_mtx_unlock(sc->sc_mtx);
-#if BRIDGE_DEBUG
- if (_if_brige_debug)
- printf( "bridge_input: %s%d bridge_lookup_member_if failed\n",
- ifnet_name(sc->sc_if), ifnet_unit(sc->sc_if));
-#endif /* BRIDGE_DEBUG */
- return 0;
- }
-
- eh = (struct ether_header *)mbuf_data(m);
-
- /*
- * If the packet is for us, set the packets source as the
- * bridge, and return the packet back to ether_input for
- * local processing.
- */
- if (memcmp(eh->ether_dhost, ifnet_lladdr(bifp),
- ETHER_ADDR_LEN) == 0) {
-
- /* Mark the packet as arriving on the bridge interface */
- (void) mbuf_pkthdr_setrcvif(m, bifp);
- mbuf_pkthdr_setheader(m, frame_header);
-
- /*
- * If the interface is learning, and the source
- * address is valid and not multicast, record
- * the address.
- */
- if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
- ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
- (eh->ether_shost[0] | eh->ether_shost[1] |
- eh->ether_shost[2] | eh->ether_shost[3] |
- eh->ether_shost[4] | eh->ether_shost[5]) != 0) {
- (void) bridge_rtupdate(sc, eh->ether_shost,
- ifp, 0, IFBAF_DYNAMIC);
- }
-
-#if NBPFILTER > 0
- if (sc->sc_bpf_input)
- bridge_bpf_input(bifp, m);
-#endif
-
- (void) mbuf_setdata(m, (char *)mbuf_data(m) + ETHER_HDR_LEN, mbuf_len(m) - ETHER_HDR_LEN);
- (void) mbuf_pkthdr_adjustlen(m, - ETHER_HDR_LEN);
-
- (void) ifnet_stat_increment_in(bifp, 1, mbuf_pkthdr_len(m), 0);
-
- lck_mtx_unlock(sc->sc_mtx);
-
-#if BRIDGE_DEBUG
- if (_if_brige_debug)
- printf( "bridge_input: %s%d packet for bridge\n",
- ifnet_name(sc->sc_if), ifnet_unit(sc->sc_if));
-#endif /* BRIDGE_DEBUG */
-
- dlil_input_packet_list(bifp, m);
-
- return EJUSTRETURN;
- }
-
- /*
- * if the destination of the packet is for the MAC address of
- * the member interface itself, then we don't need to forward
- * it -- just pass it back. Note that it'll likely just be
- * dropped by the stack, but if something else is bound to
- * the interface directly (for example, the wireless stats
- * protocol -- although that actually uses BPF right now),
- * then it will consume the packet
- *
- * ALSO, note that we do this check AFTER checking for the
- * bridge's own MAC address, because the bridge may be
- * using the SAME MAC address as one of its interfaces
- */
- if (memcmp(eh->ether_dhost, ifnet_lladdr(ifp),
- ETHER_ADDR_LEN) == 0) {
- /* APPLE MODIFICATION <cbz@apple.com> - add support for Proxy STA */
-#if IEEE80211_PROXYSTA
- if ((bif->bif_flags & IFBIF_PROXYSTA) == 0) {
-#endif
-
-#ifdef VERY_VERY_VERY_DIAGNOSTIC
- printf("bridge_input: not forwarding packet bound for member interface\n" );
-#endif
- lck_mtx_unlock(sc->sc_mtx);
- return 0;
- /* APPLE MODIFICATION <cbz@apple.com> - add support for Proxy STA */
-#if IEEE80211_PROXYSTA
- }
-#if VERY_VERY_VERY_DIAGNOSTIC
- else {
- printf( "%s: pkt rx on %s [proxysta iface], da is %02x:%02x:%02x:%02x:%02x:%02x\n",
- __func__, ifp->if_xname, eh->ether_dhost[0], eh->ether_dhost[1], eh->ether_dhost[2],
- eh->ether_dhost[3], eh->ether_dhost[4], eh->ether_dhost[5] );
- }
-#endif
-#endif
- }
-
- if ((m->m_flags & (M_BCAST|M_MCAST))) {
- struct ifmultiaddr *ifma = NULL;
-
- if ((m->m_flags & M_BCAST)) {
- is_for_us = 1;
- } else {
-#if BRIDGE_DEBUG
- printf("mulicast: %02x:%02x:%02x:%02x:%02x:%02x\n",
- eh->ether_dhost[0], eh->ether_dhost[1], eh->ether_dhost[2],
- eh->ether_dhost[3], eh->ether_dhost[4], eh->ether_dhost[5]);
-
- for (ifma = bifp->if_multiaddrs.lh_first; ifma;
- ifma = ifma->ifma_link.le_next) {
-
- if (ifma->ifma_addr == NULL)
- printf(" <none> ");
- else if (ifma->ifma_addr->sa_family == AF_INET) {
- struct sockaddr_in *sin = (struct sockaddr_in *)ifma->ifma_addr;
-
- printf(" %u.%u.%u.%u ",
- (sin->sin_addr.s_addr & 0xff000000) >> 24,
- (sin->sin_addr.s_addr & 0x00ff0000) >> 16,
- (sin->sin_addr.s_addr & 0x0000ff00) >> 8,
- (sin->sin_addr.s_addr & 0x000000ff));
- }
- if (!ifma->ifma_ll || !ifma->ifma_ll->ifma_addr)
- printf("<none>\n");
- else {
- struct sockaddr_dl *sdl = (struct sockaddr_dl *)ifma->ifma_ll->ifma_addr;
-
- printf("%02x:%02x:%02x:%02x:%02x:%02x\n",
- CONST_LLADDR(sdl)[0], CONST_LLADDR(sdl)[1], CONST_LLADDR(sdl)[2],
- CONST_LLADDR(sdl)[3], CONST_LLADDR(sdl)[4], CONST_LLADDR(sdl)[5]);
-
- }
- }
-#endif /* BRIDGE_DEBUG */
-
- /*
- * the upper layer of the stack have attached a list of multicast addresses to the bridge itself
- * (for example, the IP stack has bound 01:00:5e:00:00:01 to the 224.0.0.1 all hosts address), since
- * the IP stack is bound to the bridge. so we need to see if the packets arriving here SHOULD be
- * passed up as coming from the bridge.
- *
- * furthermore, since we know the IP stack is attached to the bridge, and NOTHING is attached
- * to the underlying devices themselves, we can drop packets that don't need to go up (by returning NULL
- * from bridge_input to the caller) after we forward the packet to other interfaces
- */
-
- for (ifma = bifp->if_multiaddrs.lh_first; ifma;
- ifma = ifma->ifma_link.le_next) {
- if (ifma->ifma_ll && ifma->ifma_ll->ifma_addr) {
- struct sockaddr_dl *sdl = (struct sockaddr_dl *)ifma->ifma_ll->ifma_addr;
-
- if (memcmp(eh->ether_dhost, CONST_LLADDR(sdl), ETHER_ADDR_LEN) == 0)
- break;
- }
- }
- if (ifma != NULL) {
- /* this packet matches the bridge's own filter, so pass it up as coming from us */
-
- /* Mark the packet as arriving on the bridge interface */
- // don't do this until AFTER we forward the packet -- bridge_forward uses this information
- //m->m_pkthdr.rcvif = bifp;
-
- /* keep track of this to help us decide about forwarding */
- is_for_us = 1;
-
-#if BRIDGE_DEBUG
- char addr[sizeof("XX:XX:XX:XX:XX:XX")+1];
- printf( "bridge_input: multicast frame for us (%s)\n",
- ether_ntop(addr, sizeof(addr), eh->ether_dhost) );
-#endif
- } else {
-#if BRIDGE_DEBUG
- char addr[sizeof("XX:XX:XX:XX:XX:XX")+1];
- printf( "bridge_input: multicast frame for unbound address (%s), forwarding but not passing to stack\n",
- ether_ntop(addr, sizeof(addr), eh->ether_dhost) );
-#endif
- }
- }
- /* Tap off 802.1D packets; they do not get forwarded. */
- if (memcmp(eh->ether_dhost, bstp_etheraddr,
- ETHER_ADDR_LEN) == 0) {
- m = bstp_input(sc, ifp, m);
- if (m == NULL) {
- lck_mtx_unlock(sc->sc_mtx);
-#if BRIDGE_DEBUG
- if (_if_brige_debug)
- printf( "bridge_input: %s%d mcast BSTP not forwarded\n",
- ifnet_name(sc->sc_if), ifnet_unit(sc->sc_if));
-#endif /* BRIDGE_DEBUG */
- return EJUSTRETURN;
- }
- }
-
- if (bif->bif_flags & IFBIF_STP) {
- switch (bif->bif_state) {
- case BSTP_IFSTATE_BLOCKING:
- case BSTP_IFSTATE_LISTENING:
- case BSTP_IFSTATE_DISABLED:
- {
- lck_mtx_unlock(sc->sc_mtx);
-
-#if BRIDGE_DEBUG
- if (_if_brige_debug)
- printf( "bridge_input: %s%d mcast bridge not learning or forwarding \n",
- ifnet_name(sc->sc_if), ifnet_unit(sc->sc_if));
-#endif /* BRIDGE_DEBUG */
-
- m_freem(m);
- return EJUSTRETURN;
- }
- }
- }
-
- /*
- * If the interface is learning, and the source
- * address is valid and not multicast, record
- * the address.
- */
- if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
- ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
- (eh->ether_shost[0] | eh->ether_shost[1] |
- eh->ether_shost[2] | eh->ether_shost[3] |
- eh->ether_shost[4] | eh->ether_shost[5]) != 0) {
- (void) bridge_rtupdate(sc, eh->ether_shost,
- ifp, 0, IFBAF_DYNAMIC);
- }
-
- if (is_for_us) {
- /*
- * Make a deep copy of the packet and enqueue the copy
- * for bridge processing; return the original packet for
- * local processing.
- */
- mc = m_dup(m, M_NOWAIT);
- if (mc == NULL) {
-#ifdef DIAGNOSTIC
- printf( "bridge_input: failed to duplicate multicast frame, not forwarding\n" );
-#endif
-#if BRIDGE_DEBUG
- } else {
- if (_if_brige_debug) {
- printf_mbuf(mc, "mc for us: ", "\n");
- printf_mbuf_data(m, 0, 20);
- printf("\n");
- }
-#endif /* BRIDGE_DEBUG */
- }
- } else {
- /*
- * we'll just pass the original, since we don't need to pass it
- * up the stack
- */
- mc = m;
- }
-
- /* Perform the bridge forwarding function with the copy. */
- if (mc != NULL) {
-#if BRIDGE_DEBUG
- if (_if_brige_debug)
- printf( "bridge_input: %s%d mcast forwarding \n",
- ifnet_name(sc->sc_if), ifnet_unit(sc->sc_if));
-#endif /* BRIDGE_DEBUG */
- bridge_forward(sc, mc);
- }
-
- // TBD should have an option for type of bridge
-#if 0
- /*
- * Reinject the mbuf as arriving on the bridge so we have a
- * chance at claiming multicast packets. We can not loop back
- * here from ether_input as a bridge is never a member of a
- * bridge.
- */
- if (bifp->if_bridge != NULL)
- panic("brige_input: brige %p in a bridge %p\n", bifp, bifp->if_bridge);
- mc = m_dup(m, M_NOWAIT);
- if (mc != NULL) {
- mc->m_pkthdr.rcvif = bifp;
-#if NBPFILTER > 0
- if (sc->sc_bpf_input)
- bridge_bpf_input(bifp, mc);
-#endif
- }
-#endif
- /* Return the original packet for local processing. */
- if ( !is_for_us )
- {
- /* we don't free the packet -- bridge_forward already did so */
- lck_mtx_unlock(sc->sc_mtx);
-
-#if BRIDGE_DEBUG
- if (_if_brige_debug)
- printf( "bridge_input: %s%d mcast local processing\n",
- ifnet_name(sc->sc_if), ifnet_unit(sc->sc_if));
-#endif
-
- return EJUSTRETURN;
- }
-
- // mark packet as arriving on the bridge
- m->m_pkthdr.rcvif = bifp;
- m->m_pkthdr.header = mbuf_data(m);
-
-#if NBPFILTER > 0
- if (sc->sc_bpf_input)
- bridge_bpf_input(bifp, m);
-#endif
- (void) mbuf_setdata(m, (char *)mbuf_data(m) + ETHER_HDR_LEN, mbuf_len(m) - ETHER_HDR_LEN);
- (void) mbuf_pkthdr_adjustlen(m, - ETHER_HDR_LEN);
-
- (void) ifnet_stat_increment_in(bifp, 1, mbuf_pkthdr_len(m), 0);
-
- lck_mtx_unlock(sc->sc_mtx);
-
-#if BRIDGE_DEBUG
- if (_if_brige_debug)
- printf( "bridge_input: %s%d mcast for us\n",
- ifnet_name(sc->sc_if), ifnet_unit(sc->sc_if));
-#endif /* BRIDGE_DEBUG */
-
- dlil_input_packet_list(bifp, m);
-
- return EJUSTRETURN;
- }
-
- if (bif->bif_flags & IFBIF_STP) {
- switch (bif->bif_state) {
- case BSTP_IFSTATE_BLOCKING:
- case BSTP_IFSTATE_LISTENING:
- case BSTP_IFSTATE_DISABLED:
- lck_mtx_unlock(sc->sc_mtx);
-
-#if BRIDGE_DEBUG
- if (_if_brige_debug)
- printf( "bridge_input: %s%d ucast bridge not learning or forwarding \n",
- ifnet_name(sc->sc_if), ifnet_unit(sc->sc_if));
-#endif /* BRIDGE_DEBUG */
-
- m_freem(m);
- return EJUSTRETURN;
- }
- }
-
- /* this code is not needed for Apple's bridge where the stack attaches directly */
-#if 1 /* TBD should be an option */
- /*
- * Unicast. Make sure it's not for us.
- */
- LIST_FOREACH(brm, &sc->sc_iflist, bif_next) {
- if(ifnet_type(brm->bif_ifp) != IFT_ETHER)
- continue;
-
- /* It is destined for us. */
- if (memcmp(ifnet_lladdr(brm->bif_ifp), eh->ether_dhost,
- ETHER_ADDR_LEN) == 0) {
- if (brm->bif_flags & IFBIF_LEARNING)
- (void) bridge_rtupdate(sc,
- eh->ether_shost, ifp, 0, IFBAF_DYNAMIC);
- m->m_pkthdr.rcvif = brm->bif_ifp;
- m->m_pkthdr.header = mbuf_data(m);
-
- (void) mbuf_setdata(m, (char *)mbuf_data(m) + ETHER_HDR_LEN, mbuf_len(m) - ETHER_HDR_LEN);
- (void) mbuf_pkthdr_adjustlen(m, - ETHER_HDR_LEN);
-#if BRIDGE_SUPPORT_GIF
-#if NGIF > 0
- if (ifnet_type(ifp) == IFT_GIF) {
- m->m_flags |= M_PROTO1;
- m->m_pkthdr.rcvif = brm->bif_ifp;
- (*brm->bif_ifp->if_input)(brm->bif_ifp, m);
- m = NULL;
- }
-#endif
-#endif
- lck_mtx_unlock(sc->sc_mtx);
-
-#if BRIDGE_DEBUG
- if (_if_brige_debug)
- printf( "bridge_input: %s%d ucast to member %s%d\n",
- ifnet_name(sc->sc_if), ifnet_unit(sc->sc_if),
- ifnet_name(brm->bif_ifp), ifnet_unit(brm->bif_ifp));
-#endif /* BRIDGE_DEBUG */
-
- dlil_input_packet_list(brm->bif_ifp, m);
-
- return EJUSTRETURN;
- }
-
- /* We just received a packet that we sent out. */
- if (memcmp(ifnet_lladdr(brm->bif_ifp), eh->ether_shost,
- ETHER_ADDR_LEN) == 0) {
- lck_mtx_unlock(sc->sc_mtx);
-
-#if BRIDGE_DEBUG
- if (_if_brige_debug)
- printf( "bridge_input: %s%d ucast drop packet we sent out\n",
- ifnet_name(sc->sc_if), ifnet_unit(sc->sc_if));
-#endif /* BRIDGE_DEBUG */
-
- m_freem(m);
- return EJUSTRETURN;
- }
- }
-#endif
-
- /*
- * If the interface is learning, and the source
- * address is valid and not multicast, record
- * the address.
- */
- if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
- ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
- (eh->ether_shost[0] | eh->ether_shost[1] |
- eh->ether_shost[2] | eh->ether_shost[3] |
- eh->ether_shost[4] | eh->ether_shost[5]) != 0) {
- (void) bridge_rtupdate(sc, eh->ether_shost,
- ifp, 0, IFBAF_DYNAMIC);
- }
-
- /* Perform the bridge forwarding function. */
-#if BRIDGE_DEBUG
- if (_if_brige_debug)
- printf( "bridge_input: %s%d ucast forwarding\n",
- ifnet_name(sc->sc_if), ifnet_unit(sc->sc_if));
-#endif /* BRIDGE_DEBUG */
-
- bridge_forward(sc, m);
- lck_mtx_unlock(sc->sc_mtx);
- return EJUSTRETURN;
-}
-
-/*
- * bridge_broadcast:
- *
- * Send a frame to all interfaces that are members of
- * the bridge, except for the one on which the packet
- * arrived.
- */
-static void
-bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
- struct mbuf *m, __unused int runfilt)
-{
- struct bridge_iflist *bif;
- struct mbuf *mc;
- struct ifnet *dst_if;
- int used = 0;
-
- lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_OWNED);
-
- LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
- dst_if = bif->bif_ifp;
- if (dst_if == src_if)
- continue;
-
- if (bif->bif_flags & IFBIF_STP) {
- switch (bif->bif_state) {
- case BSTP_IFSTATE_BLOCKING:
- case BSTP_IFSTATE_DISABLED:
- continue;
- }
- }
-
- if ((bif->bif_flags & IFBIF_DISCOVER) == 0 &&
- (m->m_flags & (M_BCAST|M_MCAST)) == 0)
- continue;
-
- if ((ifnet_flags(dst_if) & IFF_RUNNING) == 0)
- continue;
-
- if (LIST_NEXT(bif, bif_next) == NULL) {
- mc = m;
- used = 1;
- } else {
- mc = m_copym(m, 0, M_COPYALL, M_DONTWAIT);
- if (mc == NULL) {
- (void) ifnet_stat_increment_out(sc->sc_if, 0, 0, 1);
- continue;
- }
- }
-
- bridge_enqueue(sc, dst_if, mc);
- }
- if (used == 0)
- m_freem(m);
-}
-
-/*
- * bridge_rtupdate:
- *
- * Add a bridge routing entry.
- */
-static int
-bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
- struct ifnet *dst_if, int setflags, uint8_t flags)
-{
- struct bridge_rtnode *brt;
- int error;
- /* APPLE MODIFICATION <cbz@apple.com> - add support for Proxy STA */
-#if IEEE80211_PROXYSTA
- struct bridge_iflist *bif;
- int is_pds; /* are we a proxy sta discovery interface? */
-#endif
- struct timespec now;
-
- /* APPLE MODIFICATION <cbz@apple.com> - add support for Proxy STA - is this an interface
- we want to do proxy sta discovery on? */
-#if IEEE80211_PROXYSTA
- bif = bridge_lookup_member_if(sc, dst_if);
- if ((bif) && (bif->bif_flags & IFBIF_PROXYSTA_DISCOVER)) {
- is_pds = 1;
- }
- else {
- is_pds = 0;
- }
-#endif
- /*
- * A route for this destination might already exist. If so,
- * update it, otherwise create a new one.
- */
- if ((brt = bridge_rtnode_lookup(sc, dst)) == NULL) {
- /* APPLE MODIFICATION <cbz@apple.com> - add support for Proxy STA */
-#if IEEE80211_PROXYSTA
- /* don't count this address against the bridge cache (well, allow proxy stas to double that
- number...put *some* boundary on it.) if we are a proxy sta discovery interface */
- if (is_pds) {
- if (sc->sc_brtcnt >= (sc->sc_brtmax+sc->sc_brtmax_proxysta))
- return (ENOSPC);
- }
- else
-#endif
- if (sc->sc_brtcnt >= sc->sc_brtmax)
- return (ENOSPC);
-
- /*
- * Allocate a new bridge forwarding node, and
- * initialize the expiration time and Ethernet
- * address.
- */
- brt = zalloc_noblock(bridge_rtnode_pool);
- if (brt == NULL)
- return (ENOMEM);
-
- memset(brt, 0, sizeof(*brt));
- nanouptime(&now);
- brt->brt_expire = now.tv_sec + sc->sc_brttimeout;
- brt->brt_flags = IFBAF_DYNAMIC;
- memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
-
- /* APPLE MODIFICATION <cbz@apple.com> - add support for Proxy STA - is this an interface
- we want to do proxy sta discovery on? If so, post a monitoring event */
-#if IEEE80211_PROXYSTA
- if (is_pds) {
- brt->brt_flags_ext |= IFBAF_EXT_PROXYSTA;
-#if DIAGNOSTIC
- printf( "%s: proxysta %02x:%02x:%02x:%02x:%02x:%02x on %s; discovery\n",
- __func__, dst[0], dst[1], dst[2], dst[3], dst[4], dst[5], dst_if->if_xname );
-#endif
- bridge_proxysta_discover( dst_if, dst );
- }
-#endif
-
- if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
- zfree(bridge_rtnode_pool, brt);
- return (error);
- }
- }
-
- brt->brt_ifp = dst_if;
- if (setflags) {
- brt->brt_flags = flags;
- brt->brt_expire = (flags & IFBAF_STATIC) ? 0 :
- now.tv_sec + sc->sc_brttimeout;
- }
-
- /* APPLE MODIFICATION <cbz@apple.com> - add support for Proxy STA - */
-#if IEEE80211_PROXYSTA
- if (is_pds) {
-#if VERY_VERY_DIAGNOSTIC
- printf( "%s: proxysta %02x:%02x:%02x:%02x:%02x:%02x on %s; reset timeout\n",
- __func__, dst[0], dst[1], dst[2], dst[3], dst[4], dst[5], dst_if->if_xname );
-#endif
- brt->brt_expire = (flags & IFBAF_STATIC) ? 0 :
- now.tv_sec + sc->sc_brttimeout;
- }
-#endif
-
- return (0);
-}
-
-/*
- * bridge_rtlookup:
- *
- * Lookup the destination interface for an address.
- */
-static struct ifnet *
-bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
-{
- struct bridge_rtnode *brt;
-
- if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
- return (NULL);
-
- return (brt->brt_ifp);
-}
-
-/*
- * bridge_rttrim:
- *
- * Trim the routine table so that we have a number
- * of routing entries less than or equal to the
- * maximum number.
- */
-static void
-bridge_rttrim(struct bridge_softc *sc)
-{
- struct bridge_rtnode *brt, *nbrt;
-
- /* Make sure we actually need to do this. */
- if (sc->sc_brtcnt <= sc->sc_brtmax)
- return;
-
- /* Force an aging cycle; this might trim enough addresses. */
- bridge_rtage(sc);
- if (sc->sc_brtcnt <= sc->sc_brtmax)
- return;
-
- for (brt = LIST_FIRST(&sc->sc_rtlist); brt != NULL; brt = nbrt) {
- nbrt = LIST_NEXT(brt, brt_list);
- if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
- bridge_rtnode_destroy(sc, brt);
- if (sc->sc_brtcnt <= sc->sc_brtmax)
- return;
- }
- }
-}
-
-/*
- * bridge_timer:
- *
- * Aging timer for the bridge.
- */
-static void
-bridge_timer(void *arg)
-{
- struct bridge_softc *sc = arg;
- struct timespec ts;
-
- lck_mtx_lock(sc->sc_mtx);
-
- bridge_rtage(sc);
-
- lck_mtx_unlock(sc->sc_mtx);
-
- if (ifnet_flags(sc->sc_if) & IFF_RUNNING) {
- ts.tv_sec = bridge_rtable_prune_period;
- ts.tv_nsec = 0;
- bsd_timeout(bridge_timer, sc, &ts);
- }
-}
-
-/*
- * bridge_rtage:
- *
- * Perform an aging cycle.
- */
-static void
-bridge_rtage(struct bridge_softc *sc)
-{
- struct bridge_rtnode *brt, *nbrt;
- struct timespec now;
-
- nanouptime(&now);
-
- for (brt = LIST_FIRST(&sc->sc_rtlist); brt != NULL; brt = nbrt) {
- nbrt = LIST_NEXT(brt, brt_list);
- if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
- if ((unsigned long)now.tv_sec >= brt->brt_expire)
- bridge_rtnode_destroy(sc, brt);
- }
- }
-}
-
-/*
- * bridge_rtflush:
- *
- * Remove all dynamic addresses from the bridge.
- */
-static void
-bridge_rtflush(struct bridge_softc *sc, int full)
-{
- struct bridge_rtnode *brt, *nbrt;
-
- for (brt = LIST_FIRST(&sc->sc_rtlist); brt != NULL; brt = nbrt) {
- nbrt = LIST_NEXT(brt, brt_list);
- if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
- bridge_rtnode_destroy(sc, brt);
- }
-}
-
-/* APPLE MODIFICATION <cbz@apple.com> - add support for Proxy STA */
-#if IEEE80211_PROXYSTA
-/*
- * bridge_rtdiscovery:
- *
- */
-static void
-bridge_rtdiscovery(struct bridge_softc *sc)
-{
- struct bridge_rtnode *brt, *nbrt;
- struct bridge_iflist *bif;
-
- for (brt = LIST_FIRST(&sc->sc_rtlist); brt != NULL; brt = nbrt) {
- nbrt = LIST_NEXT(brt, brt_list);
- bif = bridge_lookup_member_if(sc, brt->brt_ifp);
- if ((bif) && (bif->bif_flags & IFBIF_PROXYSTA_DISCOVER) &&
- ((brt->brt_flags_ext & IFBAF_EXT_PROXYSTA) == 0)) {
-#if DIAGNOSTIC
- printf( "%s: proxysta %02x:%02x:%02x:%02x:%02x:%02x on %s; found before IFBIF_PROXYSTA_DISCOVER\n",
- __func__, brt->brt_addr[0], brt->brt_addr[1], brt->brt_addr[2], brt->brt_addr[3],
- brt->brt_addr[4], brt->brt_addr[5], brt->brt_ifp->if_xname );
-#endif
- brt->brt_flags_ext |= IFBAF_EXT_PROXYSTA;
- }
-
- if (brt->brt_ifp_proxysta == NULL) {
-#if DIAGNOSTIC
- printf( "%s: proxysta %02x:%02x:%02x:%02x:%02x:%02x on %s; discovery\n",
- __func__, brt->brt_addr[0], brt->brt_addr[1], brt->brt_addr[2], brt->brt_addr[3],
- brt->brt_addr[4], brt->brt_addr[5], brt->brt_ifp->if_xname );
-#endif
- bridge_proxysta_discover( brt->brt_ifp, brt->brt_addr );
- }
- }
-}
-
-/*
- * bridge_rtpurge:
- *
- * Remove all dynamic addresses from a specific interface on the bridge.
- */
-static void
-bridge_rtpurge(struct bridge_softc *sc, struct ifnet *ifs)
-{
- struct bridge_rtnode *brt, *nbrt;
-
- for (brt = LIST_FIRST(&sc->sc_rtlist); brt != NULL; brt = nbrt) {
- nbrt = LIST_NEXT(brt, brt_list);
- if (brt->brt_ifp == ifs) {
-#if DIAGNOSTIC
- printf( "%s: purge %s [%02x:%02x:%02x:%02x:%02x:%02x] discovered on %s\n",
- __func__, brt->brt_ifp_proxysta ? brt->brt_ifp_proxysta->if_xname : brt->brt_ifp->if_xname,
- brt->brt_addr[0], brt->brt_addr[1], brt->brt_addr[2],
- brt->brt_addr[3], brt->brt_addr[4], brt->brt_addr[5], brt->brt_ifp->if_xname );
-#endif
- bridge_rtnode_destroy(sc, brt);
- }
- }
-}
-#endif
-
-/*
- * bridge_rtdaddr:
- *
- * Remove an address from the table.
- */
-static int
-bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
-{
- struct bridge_rtnode *brt;
-
- if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
- return (ENOENT);
-
- bridge_rtnode_destroy(sc, brt);
- return (0);
-}
-
-/*
- * bridge_rtdelete:
- *
- * Delete routes to a speicifc member interface.
- */
-__private_extern__ void
-bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
-{
- struct bridge_rtnode *brt, *nbrt;
-
- for (brt = LIST_FIRST(&sc->sc_rtlist); brt != NULL; brt = nbrt) {
- nbrt = LIST_NEXT(brt, brt_list);
- if (brt->brt_ifp == ifp && (full ||
- (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC))
- bridge_rtnode_destroy(sc, brt);
- }
-}
-
-/*
- * bridge_rtable_init:
- *
- * Initialize the route table for this bridge.
- */
-static int
-bridge_rtable_init(struct bridge_softc *sc)
-{
- int i;
-
- sc->sc_rthash = _MALLOC(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
- M_DEVBUF, M_WAITOK);
- if (sc->sc_rthash == NULL)
- return (ENOMEM);
-
- for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
- LIST_INIT(&sc->sc_rthash[i]);
-
- sc->sc_rthash_key = random();
-
- LIST_INIT(&sc->sc_rtlist);
-
- return (0);
-}
-
-/*
- * bridge_rtable_fini:
- *
- * Deconstruct the route table for this bridge.
- */
-static void
-bridge_rtable_fini(struct bridge_softc *sc)
-{
-
- _FREE(sc->sc_rthash, M_DEVBUF);
-}
-
-/*
- * The following hash function is adapted from "Hash Functions" by Bob Jenkins
- * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
- */
-#define mix(a, b, c) \
-do { \
-a -= b; a -= c; a ^= (c >> 13); \
-b -= c; b -= a; b ^= (a << 8); \
-c -= a; c -= b; c ^= (b >> 13); \
-a -= b; a -= c; a ^= (c >> 12); \
-b -= c; b -= a; b ^= (a << 16); \
-c -= a; c -= b; c ^= (b >> 5); \
-a -= b; a -= c; a ^= (c >> 3); \
-b -= c; b -= a; b ^= (a << 10); \
-c -= a; c -= b; c ^= (b >> 15); \
-} while (/*CONSTCOND*/0)
-
-static uint32_t
-bridge_rthash(__unused struct bridge_softc *sc, const uint8_t *addr)
-{
- /* APPLE MODIFICATION - wasabi performance improvment - simplify the hash algorithm */
-#if 0
- uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
-
- b += addr[5] << 8;
- b += addr[4];
- a += addr[3] << 24;
- a += addr[2] << 16;
- a += addr[1] << 8;
- a += addr[0];
-
- mix(a, b, c);
-
- return (c & BRIDGE_RTHASH_MASK);
-#else
- return addr[5];
-#endif
-}
-
-#undef mix
-
-/*
- * bridge_rtnode_lookup:
- *
- * Look up a bridge route node for the specified destination.
- */
-static struct bridge_rtnode *
-bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
-{
- struct bridge_rtnode *brt;
- uint32_t hash;
- int dir;
-
- hash = bridge_rthash(sc, addr);
- LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
- dir = memcmp(addr, brt->brt_addr, ETHER_ADDR_LEN);
- if (dir == 0)
- return (brt);
- if (dir > 0)
- return (NULL);
- }
-
- return (NULL);
-}
-
-/*
- * bridge_rtnode_insert:
- *
- * Insert the specified bridge node into the route table. We
- * assume the entry is not already in the table.
- */
-static int
-bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
-{
- struct bridge_rtnode *lbrt;
- uint32_t hash;
- int dir;
-
- hash = bridge_rthash(sc, brt->brt_addr);
-
- lbrt = LIST_FIRST(&sc->sc_rthash[hash]);
- if (lbrt == NULL) {
- LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
- goto out;
- }
-
- do {
- dir = memcmp(brt->brt_addr, lbrt->brt_addr, ETHER_ADDR_LEN);
- if (dir == 0)
- return (EEXIST);
- if (dir > 0) {
- LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
- goto out;
- }
- if (LIST_NEXT(lbrt, brt_hash) == NULL) {
- LIST_INSERT_AFTER(lbrt, brt, brt_hash);
- goto out;
- }
- lbrt = LIST_NEXT(lbrt, brt_hash);
- } while (lbrt != NULL);
-
-#ifdef DIAGNOSTIC
- panic("bridge_rtnode_insert: impossible");
-#endif
-
-out:
- LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
- sc->sc_brtcnt++;
-
- return (0);
-}
-
-/*
- * bridge_rtnode_destroy:
- *
- * Destroy a bridge rtnode.
- */
-static void
-bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
-{
- lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_OWNED);
-
- /* APPLE MODIFICATION <cbz@apple.com> - add support for Proxy STA */
-#if IEEE80211_PROXYSTA
- if (brt->brt_flags_ext & IFBAF_EXT_PROXYSTA) {
-#if DIAGNOSTIC
- printf( "%s: proxysta %02x:%02x:%02x:%02x:%02x:%02x %s from %s; idle timeout\n",
- __func__, brt->brt_addr[0], brt->brt_addr[1], brt->brt_addr[2],
- brt->brt_addr[3], brt->brt_addr[4], brt->brt_addr[5],
- brt->brt_ifp_proxysta ? brt->brt_ifp_proxysta->if_xname : "unknown",
- brt->brt_ifp->if_xname );
-#endif
- bridge_proxysta_idle_timeout( brt->brt_ifp, brt->brt_addr );
- }
-#endif
-
- LIST_REMOVE(brt, brt_hash);
-
- LIST_REMOVE(brt, brt_list);
- sc->sc_brtcnt--;
- zfree(bridge_rtnode_pool, brt);
-}
-
-static errno_t
-bridge_set_bpf_tap(ifnet_t ifp, bpf_tap_mode mode, bpf_packet_func bpf_callback)
-{
- struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
-
- //printf("bridge_set_bpf_tap ifp %p mode %d\n", ifp, mode);
-
- /* TBD locking */
- if (sc == NULL || (sc->sc_flags & SCF_DETACHING)) {
- return ENODEV;
- }
-
- switch (mode) {
- case BPF_TAP_DISABLE:
- sc->sc_bpf_input = sc->sc_bpf_output = NULL;
- break;
-
- case BPF_TAP_INPUT:
- sc->sc_bpf_input = bpf_callback;
- break;
-
- case BPF_TAP_OUTPUT:
- sc->sc_bpf_output = bpf_callback;
- break;
-
- case BPF_TAP_INPUT_OUTPUT:
- sc->sc_bpf_input = sc->sc_bpf_output = bpf_callback;
- break;
-
- default:
- break;
- }
-
- return 0;
-}
-
-static void
-bridge_detach(__unused ifnet_t ifp)
-{
- struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
-
- /* Tear down the routing table. */
- bridge_rtable_fini(sc);
-
- lck_rw_lock_exclusive(bridge_list_lock);
- LIST_REMOVE(sc, sc_list);
- lck_rw_done(bridge_list_lock);
-
- ifnet_release(ifp);
-
- lck_mtx_free(sc->sc_mtx, bridge_lock_grp);
-
- _FREE(sc, M_DEVBUF);
- return;
-}
-
-__private_extern__ errno_t bridge_bpf_input(ifnet_t ifp, struct mbuf *m)
-{
- struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
-
- if (sc->sc_bpf_input) {
- if (mbuf_pkthdr_rcvif(m) != ifp)
- printf("bridge_bpf_input rcvif: %p != ifp %p\n", mbuf_pkthdr_rcvif(m), ifp);
- (*sc->sc_bpf_input)(ifp, m);
- }
- return 0;
-}
-
-__private_extern__ errno_t bridge_bpf_output(ifnet_t ifp, struct mbuf *m)
-{
- struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
-
- if (sc->sc_bpf_output) {
- (*sc->sc_bpf_output)(ifp, m);
- }
- return 0;
-}
-
+++ /dev/null
-/*
- * Copyright (c) 2004-2009 Apple Inc. All rights reserved.
- *
- * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. The rights granted to you under the License
- * may not be used to create, or enable the creation or redistribution of,
- * unlawful or unlicensed copies of an Apple operating system, or to
- * circumvent, violate, or enable the circumvention or violation of, any
- * terms of an Apple operating system software license agreement.
- *
- * Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
- */
-
-/* $apfw: if_bridgevar,v 1.7 2008/10/24 02:34:06 cbzimmer Exp $ */
-/* $NetBSD: if_bridgevar.h,v 1.8 2005/12/10 23:21:38 elad Exp $ */
-
-/*
- * Copyright 2001 Wasabi Systems, Inc.
- * All rights reserved.
- *
- * Written by Jason R. Thorpe for Wasabi Systems, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed for the NetBSD Project by
- * Wasabi Systems, Inc.
- * 4. The name of Wasabi Systems, Inc. may not be used to endorse
- * or promote products derived from this software without specific prior
- * written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by Jason L. Wright
- * 4. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * OpenBSD: if_bridge.h,v 1.14 2001/03/22 03:48:29 jason Exp
- */
-
-/*
- * Data structure and control definitions for bridge interfaces.
- */
-
-#ifndef _NET_IF_BRIDGEVAR_H_
-#define _NET_IF_BRIDGEVAR_H_
-
-#ifdef PRIVATE
-
-#include <sys/queue.h>
-
-#include <net/if.h>
-#include <net/ethernet.h>
-
-/*
- * Commands used in the SIOCSDRVSPEC ioctl. Note the lookup of the
- * bridge interface itself is keyed off the ifdrv structure.
- */
-#define BRDGADD 0 /* add bridge member (ifbreq) */
-#define BRDGDEL 1 /* delete bridge member (ifbreq) */
-#define BRDGGIFFLGS 2 /* get member if flags (ifbreq) */
-#define BRDGSIFFLGS 3 /* set member if flags (ifbreq) */
-#define BRDGSCACHE 4 /* set cache size (ifbrparam) */
-#define BRDGGCACHE 5 /* get cache size (ifbrparam) */
-#define BRDGGIFS 6 /* get member list (ifbifconf) */
-#define BRDGRTS 7 /* get address list (ifbaconf) */
-#define BRDGSADDR 8 /* set static address (ifbareq) */
-#define BRDGSTO 9 /* set cache timeout (ifbrparam) */
-#define BRDGGTO 10 /* get cache timeout (ifbrparam) */
-#define BRDGDADDR 11 /* delete address (ifbareq) */
-#define BRDGFLUSH 12 /* flush address cache (ifbreq) */
-
-#define BRDGGPRI 13 /* get priority (ifbrparam) */
-#define BRDGSPRI 14 /* set priority (ifbrparam) */
-#define BRDGGHT 15 /* get hello time (ifbrparam) */
-#define BRDGSHT 16 /* set hello time (ifbrparam) */
-#define BRDGGFD 17 /* get forward delay (ifbrparam) */
-#define BRDGSFD 18 /* set forward delay (ifbrparam) */
-#define BRDGGMA 19 /* get max age (ifbrparam) */
-#define BRDGSMA 20 /* set max age (ifbrparam) */
-#define BRDGSIFPRIO 21 /* set if priority (ifbreq) */
-#define BRDGSIFCOST 22 /* set if path cost (ifbreq) */
-#define BRDGGFILT 23 /* get filter flags (ifbrparam) */
-#define BRDGSFILT 24 /* set filter flags (ifbrparam) */
-#define BRDGPURGE 25 /* purge address cache for a particular interface (ifbreq) */
-
-/*
- * Generic bridge control request.
- */
-#pragma pack(4)
-
-struct ifbreq {
- char ifbr_ifsname[IFNAMSIZ]; /* member if name */
- uint32_t ifbr_ifsflags; /* member if flags */
- uint16_t ifbr_portno; /* member if port number */
- uint8_t ifbr_state; /* member if STP state */
- uint8_t ifbr_priority; /* member if STP priority */
- uint8_t ifbr_path_cost; /* member if STP cost */
-};
-
-#pragma pack()
-
-/* BRDGGIFFLAGS, BRDGSIFFLAGS */
-#define IFBIF_LEARNING 0x01 /* if can learn */
-#define IFBIF_DISCOVER 0x02 /* if sends packets w/ unknown dest. */
-#define IFBIF_STP 0x04 /* if participates in spanning tree */
-/* APPLE MODIFICATION <cbz@apple.com>
- add the following bits for ProxySTA:
- IFBIF_PROXYSTA, IFBIF_PROXYSTA_DISCOVER
- add the following bits for Guest Network
- IFBIF_NO_FORWARDING
- */
-#define IFBIF_PROXYSTA 0x08 /* if interface is a proxy sta */
-#define IFBIF_PROXYSTA_DISCOVER 0x10 /* if interface is used to discover proxy sta candidates */
-#define IFBIF_NO_FORWARDING 0x20 /* if interface cannot forward traffic from one interface to the next */
-
-/* APPLE MODIFICATION <cbz@apple.com>
- add the following bits for ProxySTA:
- PROXYSTA, PROXYSTA_DISCOVER
- add the following bits for Guest Network
- NO_FORWARDING
- this was...
-
- #define IFBIFBITS "\020\1LEARNING\2DISCOVER\3STP"
- */
-#define IFBIFBITS "\020\1LEARNING\2DISCOVER\3STP\4PROXYSTA\5PROXYSTA_DISCOVER\6NO_FORWARDING"
-
-/* BRDGFLUSH */
-#define IFBF_FLUSHDYN 0x00 /* flush learned addresses only */
-#define IFBF_FLUSHALL 0x01 /* flush all addresses */
-
-/* BRDGSFILT */
-#define IFBF_FILT_USEIPF 0x00000001 /* run pfil hooks on the bridge
-interface */
-#define IFBF_FILT_MEMBER 0x00000002 /* run pfil hooks on the member
-interfaces */
-#define IFBF_FILT_ONLYIP 0x00000004 /* only pass IP[46] packets when
-pfil is enabled */
-#define IFBF_FILT_MASK 0x00000007 /* mask of valid values */
-
-
-/* APPLE MODIFICATION <jhw@apple.com>: Default is to pass non-IP packets. */
-#define IFBF_FILT_DEFAULT ( IFBF_FILT_USEIPF | IFBF_FILT_MEMBER )
-#if 0
-#define IFBF_FILT_DEFAULT (IFBF_FILT_USEIPF | \
-IFBF_FILT_MEMBER | \
-IFBF_FILT_ONLYIP)
-#endif
-
-/* STP port states */
-#define BSTP_IFSTATE_DISABLED 0
-#define BSTP_IFSTATE_LISTENING 1
-#define BSTP_IFSTATE_LEARNING 2
-#define BSTP_IFSTATE_FORWARDING 3
-#define BSTP_IFSTATE_BLOCKING 4
-
-/*
- * Interface list structure.
- */
-
-#pragma pack(4)
-
-struct ifbifconf {
- uint32_t ifbic_len; /* buffer size */
- union {
- caddr_t ifbicu_buf;
- struct ifbreq *ifbicu_req;
- } ifbic_ifbicu;
-#define ifbic_buf ifbic_ifbicu.ifbicu_buf
-#define ifbic_req ifbic_ifbicu.ifbicu_req
-};
-
-#ifdef KERNEL_PRIVATE
-struct ifbifconf32 {
- uint32_t ifbic_len; /* buffer size */
- union {
- user32_addr_t ifbicu_buf;
- user32_addr_t ifbicu_req;
- } ifbic_ifbicu;
-};
-
-struct ifbifconf64 {
- uint32_t ifbic_len; /* buffer size */
- union {
- user64_addr_t ifbicu_buf;
- user64_addr_t ifbicu_req;
- } ifbic_ifbicu;
-};
-#endif /* KERNEL_PRIVATE */
-
-#pragma pack()
-
-/*
- * Bridge address request.
- */
-
-#pragma pack(4)
-
-struct ifbareq {
- char ifba_ifsname[IFNAMSIZ]; /* member if name */
- unsigned long ifba_expire; /* address expire time */
- uint8_t ifba_flags; /* address flags */
- uint8_t ifba_dst[ETHER_ADDR_LEN];/* destination address */
-};
-
-#ifdef KERNEL_PRIVATE
-struct ifbareq32 {
- char ifba_ifsname[IFNAMSIZ]; /* member if name */
- uint32_t ifba_expire; /* address expire time */
- uint8_t ifba_flags; /* address flags */
- uint8_t ifba_dst[ETHER_ADDR_LEN];/* destination address */
-};
-
-struct ifbareq64 {
- char ifba_ifsname[IFNAMSIZ]; /* member if name */
- uint64_t ifba_expire; /* address expire time */
- uint8_t ifba_flags; /* address flags */
- uint8_t ifba_dst[ETHER_ADDR_LEN];/* destination address */
-};
-#endif /* KERNEL_PRIVATE */
-
-#pragma pack()
-
-#define IFBAF_TYPEMASK 0x03 /* address type mask */
-#define IFBAF_DYNAMIC 0x00 /* dynamically learned address */
-#define IFBAF_STATIC 0x01 /* static address */
-
-#define IFBAFBITS "\020\1STATIC"
-
-/*
- * Address list structure.
- */
-
-#pragma pack(4)
-
-struct ifbaconf {
- uint32_t ifbac_len; /* buffer size */
- union {
- caddr_t ifbacu_buf;
- struct ifbareq *ifbacu_req;
- } ifbac_ifbacu;
-#define ifbac_buf ifbac_ifbacu.ifbacu_buf
-#define ifbac_req ifbac_ifbacu.ifbacu_req
-};
-
-#ifdef KERNEL_PRIVATE
-struct ifbaconf32 {
- uint32_t ifbac_len; /* buffer size */
- union {
- user32_addr_t ifbacu_buf;
- user32_addr_t ifbacu_req;
- } ifbac_ifbacu;
-};
-
-struct ifbaconf64 {
- uint32_t ifbac_len; /* buffer size */
- union {
- user64_addr_t ifbacu_buf;
- user64_addr_t ifbacu_req;
- } ifbac_ifbacu;
-};
-#endif /* KERNEL_PRIVATE */
-
-#pragma pack()
-
-/*
- * Bridge parameter structure.
- */
-
-#pragma pack(4)
-
-struct ifbrparam {
- union {
- uint32_t ifbrpu_int32;
- uint16_t ifbrpu_int16;
- uint8_t ifbrpu_int8;
- } ifbrp_ifbrpu;
-};
-
-#pragma pack()
-
-#define ifbrp_csize ifbrp_ifbrpu.ifbrpu_int32 /* cache size */
-#define ifbrp_ctime ifbrp_ifbrpu.ifbrpu_int32 /* cache time (sec) */
-#define ifbrp_prio ifbrp_ifbrpu.ifbrpu_int16 /* bridge priority */
-#define ifbrp_hellotime ifbrp_ifbrpu.ifbrpu_int8 /* hello time (sec) */
-#define ifbrp_fwddelay ifbrp_ifbrpu.ifbrpu_int8 /* fwd time (sec) */
-#define ifbrp_maxage ifbrp_ifbrpu.ifbrpu_int8 /* max age (sec) */
-#define ifbrp_filter ifbrp_ifbrpu.ifbrpu_int32 /* filtering flags */
-
-#ifdef KERNEL
-/*
- * Timekeeping structure used in spanning tree code.
- */
-struct bridge_timer {
- uint16_t active;
- uint16_t value;
-};
-
-struct bstp_config_unit {
- uint64_t cu_rootid;
- uint64_t cu_bridge_id;
- uint32_t cu_root_path_cost;
- uint16_t cu_message_age;
- uint16_t cu_max_age;
- uint16_t cu_hello_time;
- uint16_t cu_forward_delay;
- uint16_t cu_port_id;
- uint8_t cu_message_type;
- uint8_t cu_topology_change_acknowledgment;
- uint8_t cu_topology_change;
-};
-
-struct bstp_tcn_unit {
- uint8_t tu_message_type;
-};
-
-struct bridge_softc;
-
-/*
- * Bridge interface list entry.
- * (VL) bridge_ifmember would be a better name, more descriptive
- */
-struct bridge_iflist {
- LIST_ENTRY(bridge_iflist) bif_next;
- uint64_t bif_designated_root;
- uint64_t bif_designated_bridge;
- uint32_t bif_path_cost;
- uint32_t bif_designated_cost;
- struct bridge_timer bif_hold_timer;
- struct bridge_timer bif_message_age_timer;
- struct bridge_timer bif_forward_delay_timer;
- uint16_t bif_port_id;
- uint16_t bif_designated_port;
- struct bstp_config_unit bif_config_bpdu;
- uint8_t bif_state;
- uint8_t bif_topology_change_acknowledge;
- uint8_t bif_config_pending;
- uint8_t bif_change_detection_enabled;
- uint8_t bif_priority;
- struct ifnet *bif_ifp; /* member if */
- uint32_t bif_flags; /* member if flags */
- int bif_mutecap; /* member muted caps */
- interface_filter_t bif_iff_ref;
- struct bridge_softc *bif_sc;
-};
-
-/*
- * Bridge route node.
- */
-struct bridge_rtnode {
- LIST_ENTRY(bridge_rtnode) brt_hash; /* hash table linkage */
- LIST_ENTRY(bridge_rtnode) brt_list; /* list linkage */
- struct ifnet *brt_ifp; /* destination if */
- unsigned long brt_expire; /* expiration time */
- uint8_t brt_flags; /* address flags */
- uint8_t brt_addr[ETHER_ADDR_LEN];
- /* APPLE MODIFICATION <cbz@apple.com> - add the following elements:
- brt_flags_ext, brt_ifp_proxysta */
-#define IFBAF_EXT_PROXYSTA 0x01
- uint8_t brt_flags_ext; /* extended flags */
- struct ifnet *brt_ifp_proxysta; /* proxy sta if */
-};
-
-
-/*
- * Software state for each bridge.
- */
-struct bridge_softc {
- LIST_ENTRY(bridge_softc) sc_list;
- struct ifnet *sc_if;
- uint64_t sc_designated_root;
- uint64_t sc_bridge_id;
- struct bridge_iflist *sc_root_port;
- uint32_t sc_root_path_cost;
- uint16_t sc_max_age;
- uint16_t sc_hello_time;
- uint16_t sc_forward_delay;
- uint16_t sc_bridge_max_age;
- uint16_t sc_bridge_hello_time;
- uint16_t sc_bridge_forward_delay;
- uint16_t sc_topology_change_time;
- uint16_t sc_hold_time;
- uint16_t sc_bridge_priority;
- uint8_t sc_topology_change_detected;
- uint8_t sc_topology_change;
- struct bridge_timer sc_hello_timer;
- struct bridge_timer sc_topology_change_timer;
- struct bridge_timer sc_tcn_timer;
- uint32_t sc_brtmax; /* max # of addresses */
- uint32_t sc_brtcnt; /* cur. # of addresses */
- /* APPLE MODIFICATION <cbz@apple.com> - add the following elements:
- sc_brtmax_proxysta */
- uint32_t sc_brtmax_proxysta; /* max # of proxy sta addresses */
- uint32_t sc_brttimeout; /* rt timeout in seconds */
- LIST_HEAD(, bridge_iflist) sc_iflist; /* member interface list */
- LIST_HEAD(, bridge_rtnode) *sc_rthash; /* our forwarding table */
- LIST_HEAD(, bridge_rtnode) sc_rtlist; /* list version of above */
- uint32_t sc_rthash_key; /* key for hash */
- uint32_t sc_filter_flags; /* ipf and flags */
-
- //(VL)
- char sc_if_xname[IFNAMSIZ];
- bpf_packet_func sc_bpf_input;
- bpf_packet_func sc_bpf_output;
- u_int32_t sc_flags;
- lck_mtx_t *sc_mtx;
-};
-
-#define SCF_DETACHING 0x1
-
-extern const uint8_t bstp_etheraddr[];
-
-int bridgeattach(int);
-void bridge_enqueue(struct bridge_softc *, struct ifnet *, struct mbuf *);
-void bridge_rtdelete(struct bridge_softc *, struct ifnet *, int);
-
-void bstp_initialization(struct bridge_softc *);
-void bstp_stop(struct bridge_softc *);
-struct mbuf *bstp_input(struct bridge_softc *, struct ifnet *, struct mbuf *);
-
-
-#endif /* KERNEL */
-#endif /* PRIVATE */
-#endif /* !_NET_IF_BRIDGEVAR_H_ */
-
/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Computer, Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
static int make_ifmibdata(struct ifnet *, int *, struct sysctl_req *);
-
int
make_ifmibdata(struct ifnet *ifp, int *name, struct sysctl_req *req)
{
break;
case IFDATA_GENERAL:
-
bzero(&ifmd, sizeof(ifmd));
- snprintf(ifmd.ifmd_name, sizeof(ifmd.ifmd_name), "%s%d",
- ifp->if_name, ifp->if_unit);
-
+ /*
+ * Make sure the interface is in use
+ */
+ if (ifp->if_refcnt > 0) {
+ snprintf(ifmd.ifmd_name, sizeof(ifmd.ifmd_name), "%s%d",
+ ifp->if_name, ifp->if_unit);
+
#define COPY(fld) ifmd.ifmd_##fld = ifp->if_##fld
- COPY(pcount);
- COPY(flags);
- if_data_internal_to_if_data64(ifp, &ifp->if_data, &ifmd.ifmd_data);
+ COPY(pcount);
+ COPY(flags);
+ if_data_internal_to_if_data64(ifp, &ifp->if_data, &ifmd.ifmd_data);
#undef COPY
- ifmd.ifmd_snd_len = ifp->if_snd.ifq_len;
- ifmd.ifmd_snd_maxlen = ifp->if_snd.ifq_maxlen;
- ifmd.ifmd_snd_drops = ifp->if_snd.ifq_drops;
-
+ ifmd.ifmd_snd_len = ifp->if_snd.ifq_len;
+ ifmd.ifmd_snd_maxlen = ifp->if_snd.ifq_maxlen;
+ ifmd.ifmd_snd_drops = ifp->if_snd.ifq_drops;
+#if PKT_PRIORITY
+ /* stuff these into unused fields for now */
+ ifmd.ifmd_filler[0] = ifp->if_obgpackets;
+ ifmd.ifmd_filler[1] = ifp->if_obgbytes;
+#endif /* PKT_PRIORITY */
+ }
error = SYSCTL_OUT(req, &ifmd, sizeof ifmd);
if (error || !req->newptr)
break;
int
sysctl_ifdata SYSCTL_HANDLER_ARGS /* XXX bad syntax! */
{
+#pragma unused(oidp)
int *name = (int *)arg1;
int error = 0;
u_int namelen = arg2;
return EINVAL;
ifnet_head_lock_shared();
if (name[0] <= 0 || name[0] > if_index ||
- (ifp = ifindex2ifnet[name[0]]) == NULL) {
+ (ifp = ifindex2ifnet[name[0]]) == NULL ||
+ ifp->if_refcnt == 0) {
ifnet_head_done();
return ENOENT;
}
int
sysctl_ifalldata SYSCTL_HANDLER_ARGS /* XXX bad syntax! */
{
+#pragma unused(oidp)
int *name = (int *)arg1;
int error = 0;
u_int namelen = arg2;
/*
- * Copyright (c) 2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2007-2010 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/ioctl.h>
#include <net/if.h>
+#include <net/if_var.h>
#include <net/if_types.h>
#include <net/route.h>
#include <net/bpf.h>
#define DPRINTF(x)
#endif
-static int pflog_create_dev(void);
+static int pflog_clone_create(struct if_clone *, u_int32_t, void *);
+static int pflog_clone_destroy(struct ifnet *);
static errno_t pflogoutput(struct ifnet *, struct mbuf *);
static errno_t pflogioctl(struct ifnet *, unsigned long, void *);
static errno_t pflogdemux(struct ifnet *, struct mbuf *, char *,
static errno_t pflogaddproto(struct ifnet *, protocol_family_t,
const struct ifnet_demux_desc *, u_int32_t);
static errno_t pflogdelproto(struct ifnet *, protocol_family_t);
+static void pflogfree(struct ifnet *);
static LIST_HEAD(, pflog_softc) pflogif_list;
+static struct if_clone pflog_cloner =
+ IF_CLONE_INITIALIZER(PFLOGNAME, pflog_clone_create, pflog_clone_destroy,
+ 0, (PFLOGIFS_MAX - 1));
struct ifnet *pflogifs[PFLOGIFS_MAX]; /* for fast access */
-static int npflog;
-static lck_attr_t *pflog_lock_attr;
-static lck_grp_t *pflog_lock_grp;
-static lck_grp_attr_t *pflog_lock_grp_attr;
-static lck_mtx_t *pflog_lock;
void
pfloginit(void)
{
int i;
- if (pflog_lock != NULL)
- return;
-
- pflog_lock_grp_attr = lck_grp_attr_alloc_init();
- pflog_lock_grp = lck_grp_alloc_init("pflog", pflog_lock_grp_attr);
- pflog_lock_attr = lck_attr_alloc_init();
- pflog_lock = lck_mtx_alloc_init(pflog_lock_grp, pflog_lock_attr);
- if (pflog_lock == NULL) {
- panic("%s: unable to allocate lock", __func__);
+ if (pf_perim_lock == NULL || pf_lock == NULL) {
+ panic("%s: called before PF is initialized", __func__);
/* NOTREACHED */
}
LIST_INIT(&pflogif_list);
for (i = 0; i < PFLOGIFS_MAX; i++)
pflogifs[i] = NULL;
- pflog_create_dev();
+ (void) if_clone_attach(&pflog_cloner);
}
static int
-pflog_create_dev(void)
+pflog_clone_create(struct if_clone *ifc, u_int32_t unit, __unused void *params)
{
struct pflog_softc *pflogif;
struct ifnet_init_params pf_init;
int error = 0;
- lck_mtx_lock(pflog_lock);
- if (npflog >= PFLOGIFS_MAX) {
- error = EINVAL;
- goto done;
+ if (unit >= PFLOGIFS_MAX) {
+ /* Either the interface cloner or our initializer is broken */
+ panic("%s: unit (%d) exceeds max (%d)", __func__, unit,
+ PFLOGIFS_MAX);
+ /* NOTREACHED */
}
if ((pflogif = _MALLOC(sizeof (*pflogif),
}
bzero(&pf_init, sizeof (pf_init));
- pf_init.name = PFLOGNAME;
- pf_init.unit = npflog;
+ pf_init.name = ifc->ifc_name;
+ pf_init.unit = unit;
pf_init.type = IFT_PFLOG;
pf_init.family = IFNET_FAMILY_LOOPBACK;
pf_init.output = pflogoutput;
pf_init.del_proto = pflogdelproto;
pf_init.softc = pflogif;
pf_init.ioctl = pflogioctl;
+ pf_init.detach = pflogfree;
bzero(pflogif, sizeof (*pflogif));
- pflogif->sc_unit = npflog;
+ pflogif->sc_unit = unit;
error = ifnet_allocate(&pf_init, &pflogif->sc_if);
if (error != 0) {
bpfattach(pflogif->sc_if, DLT_PFLOG, PFLOG_HDRLEN);
#endif
+ lck_rw_lock_shared(pf_perim_lock);
+ lck_mtx_lock(pf_lock);
LIST_INSERT_HEAD(&pflogif_list, pflogif, sc_list);
- pflogifs[npflog] = pflogif->sc_if;
- ++npflog;
-done:
- lck_mtx_unlock(pflog_lock);
+ pflogifs[unit] = pflogif->sc_if;
+ lck_mtx_unlock(pf_lock);
+ lck_rw_done(pf_perim_lock);
+done:
return (error);
}
-#if 0
-int
-pflog_destroy_dev(struct ifnet *ifp)
+static int
+pflog_clone_destroy(struct ifnet *ifp)
{
- struct pflog_softc *pflogif = ifp->if_softc;
+ struct pflog_softc *pflogif = ifp->if_softc;
- lck_mtx_lock(pflog_lock);
+ lck_rw_lock_shared(pf_perim_lock);
+ lck_mtx_lock(pf_lock);
pflogifs[pflogif->sc_unit] = NULL;
LIST_REMOVE(pflogif, sc_list);
- lck_mtx_unlock(pflog_lock);
+ lck_mtx_unlock(pf_lock);
+ lck_rw_done(pf_perim_lock);
-#if NBPFILTER > 0
- bpfdetach(ifp);
-#endif
- if_detach(ifp);
- _FREE(pflogif, M_DEVBUF);
- return (0);
+ /* bpfdetach() is taken care of as part of interface detach */
+ (void) ifnet_detach(ifp);
+
+ return 0;
}
-#endif
static errno_t
pflogoutput(struct ifnet *ifp, struct mbuf *m)
return (0);
}
+static void
+pflogfree(struct ifnet *ifp)
+{
+ _FREE(ifp->if_softc, M_DEVBUF);
+ ifp->if_softc = NULL;
+ (void) ifnet_release(ifp);
+}
+
int
pflog_packet(struct pfi_kif *kif, struct mbuf *m, sa_family_t af, u_int8_t dir,
u_int8_t reason, struct pf_rule *rm, struct pf_rule *am,
struct ifnet *ifn;
struct pfloghdr hdr;
+ lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
+
if (kif == NULL || m == NULL || rm == NULL || pd == NULL)
return (-1);
/*
- * Copyright (c) 2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
extern "C" {
#endif
-#define PFLOGIFS_MAX 1
+#define PFLOGIFS_MAX 16
#if KERNEL_PRIVATE
struct pflog_softc {
/*
- * Copyright (c) 2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2008-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
/* Control block allocated for each kernel control connection */
struct utun_pcb {
- kern_ctl_ref ctlref;
- u_int32_t unit;
- ifnet_t ifp;
- u_int32_t flags;
+ kern_ctl_ref utun_ctlref;
+ ifnet_t utun_ifp;
+ u_int32_t utun_unit;
+ u_int32_t utun_flags;
+ int utun_ext_ifdata_stats;
};
static kern_ctl_ref utun_kctlref;
strncpy(kern_ctl.ctl_name, UTUN_CONTROL_NAME, sizeof(kern_ctl.ctl_name));
kern_ctl.ctl_name[sizeof(kern_ctl.ctl_name) - 1] = 0;
kern_ctl.ctl_flags = CTL_FLAG_PRIVILEGED; /* Require root */
+ kern_ctl.ctl_sendsize = 64 * 1024;
+ kern_ctl.ctl_recvsize = 64 * 1024;
kern_ctl.ctl_connect = utun_ctl_connect;
kern_ctl.ctl_disconnect = utun_ctl_disconnect;
kern_ctl.ctl_send = utun_ctl_send;
struct ifnet_init_params utun_init;
struct utun_pcb *pcb;
errno_t result;
+ struct ifnet_stats_param stats;
/* kernel control allocates, interface frees */
pcb = utun_alloc(sizeof(*pcb));
/* Setup the protocol control block */
bzero(pcb, sizeof(*pcb));
*unitinfo = pcb;
- pcb->ctlref = kctlref;
- pcb->unit = sac->sc_unit;
+ pcb->utun_ctlref = kctlref;
+ pcb->utun_unit = sac->sc_unit;
- printf("utun_ctl_connect: creating interface utun%d\n", pcb->unit - 1);
+ printf("utun_ctl_connect: creating interface utun%d\n", pcb->utun_unit - 1);
/* Create the interface */
bzero(&utun_init, sizeof(utun_init));
utun_init.name = "utun";
- utun_init.unit = pcb->unit - 1;
+ utun_init.unit = pcb->utun_unit - 1;
utun_init.family = utun_family;
utun_init.type = IFT_OTHER;
utun_init.output = utun_output;
utun_init.ioctl = utun_ioctl;
utun_init.detach = utun_detached;
- result = ifnet_allocate(&utun_init, &pcb->ifp);
+ result = ifnet_allocate(&utun_init, &pcb->utun_ifp);
if (result != 0) {
printf("utun_ctl_connect - ifnet_allocate failed: %d\n", result);
utun_free(pcb);
OSIncrementAtomic(&utun_ifcount);
/* Set flags and additional information. */
- ifnet_set_mtu(pcb->ifp, 1500);
- ifnet_set_flags(pcb->ifp, IFF_UP | IFF_MULTICAST | IFF_POINTOPOINT, 0xffff);
+ ifnet_set_mtu(pcb->utun_ifp, 1500);
+ ifnet_set_flags(pcb->utun_ifp, IFF_UP | IFF_MULTICAST | IFF_POINTOPOINT, 0xffff);
/* The interface must generate its own IPv6 LinkLocal address,
* if possible following the recommendation of RFC2472 to the 64bit interface ID
*/
- ifnet_set_eflags(pcb->ifp, IFEF_NOAUTOIPV6LL, IFEF_NOAUTOIPV6LL);
+ ifnet_set_eflags(pcb->utun_ifp, IFEF_NOAUTOIPV6LL, IFEF_NOAUTOIPV6LL);
+ /* Reset the stats in case as the interface may have been recycled */
+ bzero(&stats, sizeof(struct ifnet_stats_param));
+ ifnet_set_stat(pcb->utun_ifp, &stats);
+
/* Attach the interface */
- result = ifnet_attach(pcb->ifp, NULL);
+ result = ifnet_attach(pcb->utun_ifp, NULL);
if (result != 0) {
printf("utun_ctl_connect - ifnet_allocate failed: %d\n", result);
- ifnet_release(pcb->ifp);
+ ifnet_release(pcb->utun_ifp);
utun_free(pcb);
}
/* Attach to bpf */
if (result == 0)
- bpfattach(pcb->ifp, DLT_NULL, 4);
+ bpfattach(pcb->utun_ifp, DLT_NULL, 4);
+
+ /* The interfaces resoures allocated, mark it as running */
+ if (result == 0)
+ ifnet_set_flags(pcb->utun_ifp, IFF_RUNNING, IFF_RUNNING);
return result;
}
void *unitinfo)
{
struct utun_pcb *pcb = unitinfo;
- ifnet_t ifp = pcb->ifp;
+ ifnet_t ifp = pcb->utun_ifp;
errno_t result = 0;
- pcb->ctlref = NULL;
- pcb->unit = 0;
+ pcb->utun_ctlref = NULL;
+ pcb->utun_unit = 0;
/*
* We want to do everything in our power to ensure that the interface
__unused int flags)
{
struct utun_pcb *pcb = unitinfo;
- struct ifnet_stat_increment_param incs;
errno_t result;
- mbuf_pkthdr_setrcvif(m, pcb->ifp);
+ mbuf_pkthdr_setrcvif(m, pcb->utun_ifp);
- bpf_tap_in(pcb->ifp, DLT_NULL, m, 0, 0);
+ bpf_tap_in(pcb->utun_ifp, DLT_NULL, m, 0, 0);
- if (pcb->flags & UTUN_FLAGS_NO_INPUT) {
+ if (pcb->utun_flags & UTUN_FLAGS_NO_INPUT) {
/* flush data */
mbuf_freem(m);
return 0;
}
- bzero(&incs, sizeof(incs));
- incs.packets_in = 1;
- incs.bytes_in = mbuf_pkthdr_len(m);
- result = ifnet_input(pcb->ifp, m, &incs);
+ if (!pcb->utun_ext_ifdata_stats) {
+ struct ifnet_stat_increment_param incs;
+
+ bzero(&incs, sizeof(incs));
+ incs.packets_in = 1;
+ incs.bytes_in = mbuf_pkthdr_len(m);
+ result = ifnet_input(pcb->utun_ifp, m, &incs);
+ } else {
+ result = ifnet_input(pcb->utun_ifp, m, NULL);
+ }
if (result != 0) {
- ifnet_stat_increment_in(pcb->ifp, 0, 0, 1);
+ ifnet_stat_increment_in(pcb->utun_ifp, 0, 0, 1);
+
printf("utun_ctl_send - ifnet_input failed: %d\n", result);
mbuf_freem(m);
}
/* check for privileges for privileged options */
switch (opt) {
case UTUN_OPT_FLAGS:
+ case UTUN_OPT_EXT_IFDATA_STATS:
if (kauth_cred_issuser(kauth_cred_get()) == 0) {
return EPERM;
}
if (len != sizeof(u_int32_t))
result = EMSGSIZE;
else
- pcb->flags = *(u_int32_t *)data;
+ pcb->utun_flags = *(u_int32_t *)data;
+ break;
+
+ case UTUN_OPT_EXT_IFDATA_STATS:
+ if (len != sizeof(int)) {
+ result = EMSGSIZE;
+ break;
+ }
+ pcb->utun_ext_ifdata_stats = (*(int *)data) ? 1 : 0;
+ break;
+
+ case UTUN_OPT_INC_IFDATA_STATS_IN:
+ case UTUN_OPT_INC_IFDATA_STATS_OUT: {
+ struct utun_stats_param *utsp = (struct utun_stats_param *)data;
+
+ if (utsp == NULL || len < sizeof(struct utun_stats_param)) {
+ result = EINVAL;
+ break;
+ }
+ if (!pcb->utun_ext_ifdata_stats) {
+ result = EINVAL;
+ break;
+ }
+ if (opt == UTUN_OPT_INC_IFDATA_STATS_IN)
+ ifnet_stat_increment_in(pcb->utun_ifp, utsp->utsp_packets,
+ utsp->utsp_bytes, utsp->utsp_errors);
+ else
+ ifnet_stat_increment_out(pcb->utun_ifp, utsp->utsp_packets,
+ utsp->utsp_bytes, utsp->utsp_errors);
break;
+ }
+
default:
result = ENOPROTOOPT;
break;
if (*len != sizeof(u_int32_t))
result = EMSGSIZE;
else
- *(u_int32_t *)data = pcb->flags;
+ *(u_int32_t *)data = pcb->utun_flags;
break;
+
+ case UTUN_OPT_EXT_IFDATA_STATS:
+ if (*len != sizeof(int))
+ result = EMSGSIZE;
+ else
+ *(int *)data = (pcb->utun_ext_ifdata_stats) ? 1 : 0;
+ break;
+
case UTUN_OPT_IFNAME:
- *len = snprintf(data, *len, "%s%d", ifnet_name(pcb->ifp), ifnet_unit(pcb->ifp)) + 1;
+ *len = snprintf(data, *len, "%s%d", ifnet_name(pcb->utun_ifp), ifnet_unit(pcb->utun_ifp)) + 1;
break;
+
default:
result = ENOPROTOOPT;
break;
struct utun_pcb *pcb = ifnet_softc(interface);
errno_t result;
- bpf_tap_out(pcb->ifp, DLT_NULL, data, 0, 0);
+ bpf_tap_out(pcb->utun_ifp, DLT_NULL, data, 0, 0);
- if (pcb->flags & UTUN_FLAGS_NO_OUTPUT) {
+ if (pcb->utun_flags & UTUN_FLAGS_NO_OUTPUT) {
/* flush data */
mbuf_freem(data);
return 0;
}
- if (pcb->ctlref) {
+ if (pcb->utun_ctlref) {
int length = mbuf_pkthdr_len(data);
- result = ctl_enqueuembuf(pcb->ctlref, pcb->unit, data, CTL_DATA_EOR);
+ result = ctl_enqueuembuf(pcb->utun_ctlref, pcb->utun_unit, data, CTL_DATA_EOR);
if (result != 0) {
mbuf_freem(data);
printf("utun_output - ctl_enqueuembuf failed: %d\n", result);
+
ifnet_stat_increment_out(interface, 0, 0, 1);
}
else {
- ifnet_stat_increment_out(interface, 1, length, 0);
+ if (!pcb->utun_ext_ifdata_stats)
+ ifnet_stat_increment_out(interface, 1, length, 0);
}
}
else
static errno_t
utun_framer(
- __unused ifnet_t interface,
+ __unused ifnet_t interface,
mbuf_t *packet,
__unused const struct sockaddr *dest,
__unused const char *desk_linkaddr,
const char *frame_type)
{
-
if (mbuf_prepend(packet, sizeof(protocol_family_t), MBUF_DONTWAIT) != 0) {
printf("utun_framer - ifnet_output prepend failed\n");
+
ifnet_stat_increment_out(interface, 0, 0, 1);
+
// just return, because the buffer was freed in mbuf_prepend
return EJUSTRETURN;
}
static errno_t
utun_ioctl(
- __unused ifnet_t interface,
- __unused u_long command,
+ ifnet_t interface,
+ u_long command,
void *data)
{
errno_t result = 0;
case SIOCSIFMTU:
ifnet_set_mtu(interface, ((struct ifreq*)data)->ifr_mtu);
break;
-
+
+ case SIOCSIFFLAGS:
+ /* ifioctl() takes care of it */
+ break;
+
case SIOCSIFADDR:
case SIOCAIFADDR:
/* This will be called for called for IPv6 Address additions */
/*
- * Copyright (c) 2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2008-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#endif
-#ifdef PRIVATE
-
+/*
+ * Name registered by the utun kernel control
+ */
#define UTUN_CONTROL_NAME "com.apple.net.utun_control"
-#define UTUN_OPT_FLAGS 1
-#define UTUN_OPT_IFNAME 2
-enum {
- UTUN_FLAGS_NO_OUTPUT = 0x1,
- UTUN_FLAGS_NO_INPUT = 0x2,
+/*
+ * Socket option names to manage utun
+ */
+#define UTUN_OPT_FLAGS 1
+#define UTUN_OPT_IFNAME 2
+#define UTUN_OPT_EXT_IFDATA_STATS 3 /* get|set (type int) */
+#define UTUN_OPT_INC_IFDATA_STATS_IN 4 /* set to increment stat counters (type struct utun_stats_param) */
+#define UTUN_OPT_INC_IFDATA_STATS_OUT 5 /* set to increment stat counters (type struct utun_stats_param) */
+
+/*
+ * Flags for by UTUN_OPT_FLAGS
+ */
+#define UTUN_FLAGS_NO_OUTPUT 0x0001
+#define UTUN_FLAGS_NO_INPUT 0x0002
+
+/*
+ * utun stats parameter structure
+ */
+struct utun_stats_param {
+ u_int64_t utsp_packets;
+ u_int64_t utsp_bytes;
+ u_int64_t utsp_errors;
};
#endif
-#endif
/*
- * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
u_int64_t ifi_omcasts; /* packets sent via multicast */
u_int64_t ifi_iqdrops; /* dropped on input, this interface */
u_int64_t ifi_noproto; /* destined for unsupported protocol */
+#if PKT_PRIORITY
+ u_int32_t ifi_obgpackets; /* bg packets sent on interface */
+ u_int32_t ifi_obgbytes; /* total number of bg octets sent */
+#endif /* PKT_PRIORITY */
u_int32_t ifi_recvtiming; /* usec spent receiving when timing */
u_int32_t ifi_xmittiming; /* usec spent xmitting when timing */
#define IF_LASTCHANGEUPTIME 1 /* lastchange: 1-uptime 0-calendar time */
#define if_omcasts if_data.ifi_omcasts
#define if_iqdrops if_data.ifi_iqdrops
#define if_noproto if_data.ifi_noproto
+#if PKT_PRIORITY
+#define if_obgpackets if_data.ifi_obgpackets
+#define if_obgbytes if_data.ifi_obgbytes
+#endif /* PKT_PRIORITY */
#define if_lastchange if_data.ifi_lastchange
#define if_recvquota if_data.ifi_recvquota
#define if_xmitquota if_data.ifi_xmitquota
#endif
struct route if_fwd_route; /* cached IPv4 forwarding route */
void *if_bridge; /* bridge glue */
+#if IFNET_ROUTE_REFCNT
+ u_int32_t if_want_aggressive_drain;
+ u_int32_t if_idle_flags; /* idle flags */
+ u_int32_t if_route_refcnt; /* idle: route ref count */
+#endif /* IFNET_ROUTE_REFCNT */
};
#ifndef __APPLE__
LIST_ENTRY(if_clone) ifc_list; /* on list of cloners */
const char *ifc_name; /* name of device, e.g. `vlan' */
size_t ifc_namelen; /* length of name */
- int ifc_minifs; /* minimum number of interfaces */
- int ifc_maxunit; /* maximum unit number */
+ u_int32_t ifc_minifs; /* minimum number of interfaces */
+ u_int32_t ifc_maxunit; /* maximum unit number */
unsigned char *ifc_units; /* bitmap to handle units */
- int ifc_bmlen; /* bitmap length */
+ u_int32_t ifc_bmlen; /* bitmap length */
- int (*ifc_create)(struct if_clone *, int);
- void (*ifc_destroy)(struct ifnet *);
+ int (*ifc_create)(struct if_clone *, u_int32_t, void *);
+ int (*ifc_destroy)(struct ifnet *);
};
#define IF_CLONE_INITIALIZER(name, create, destroy, minifs, maxunit) \
{ { NULL, NULL }, name, sizeof(name) - 1, minifs, maxunit, NULL, 0, create, destroy }
+#define M_CLONE M_IFADDR
+
/*
* Bit values in if_ipending
*/
int if_clone_attach(struct if_clone *);
void if_clone_detach(struct if_clone *);
+struct if_clone *
+ if_clone_lookup(const char *, u_int32_t *);
void ifnet_lock_assert(struct ifnet *ifp, int what);
void ifnet_lock_shared(struct ifnet *ifp);
/*
- * Copyright (c) 2003-2009 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2003-2010 Apple Computer, Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#define M_VLAN M_DEVBUF
-static int vlan_clone_create(struct if_clone *, int);
-static void vlan_clone_destroy(struct ifnet *);
+static int vlan_clone_create(struct if_clone *, u_int32_t, void *);
+static int vlan_clone_destroy(struct ifnet *);
static int vlan_input(ifnet_t ifp, protocol_family_t protocol,
mbuf_t m, char *frame_header);
static int vlan_output(struct ifnet *ifp, struct mbuf *m);
}
static int
-vlan_clone_create(struct if_clone *ifc, int unit)
+vlan_clone_create(struct if_clone *ifc, u_int32_t unit, __unused void *params)
{
int error;
ifvlan_ref ifv;
return;
}
-static void
+static int
vlan_clone_destroy(struct ifnet *ifp)
{
ifvlan_ref ifv;
ifv = ifnet_softc(ifp);
if (ifv == NULL || ifnet_type(ifp) != IFT_L2VLAN) {
vlan_unlock();
- return;
+ return 0;
}
if (ifvlan_flags_detaching(ifv)) {
vlan_unlock();
- return;
+ return 0;
}
vlan_remove(ifv);
vlan_unlock();
vlan_if_detach(ifp);
- return;
+
+ return 0;
}
static int
/*
- * Copyright (c) 2004-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2004-2010 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
return interface == NULL ? 0 : interface->if_eflags;
}
+errno_t
+ifnet_set_idle_flags(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask)
+{
+#if IFNET_ROUTE_REFCNT
+ int lock, before, after;
+
+ if (ifp == NULL)
+ return (EINVAL);
+
+ lck_mtx_lock(rnh_lock);
+
+ lock = (ifp->if_lock != NULL);
+ if (lock)
+ ifnet_lock_exclusive(ifp);
+
+ before = ifp->if_idle_flags;
+ ifp->if_idle_flags = (new_flags & mask) | (ifp->if_idle_flags & ~mask);
+ after = ifp->if_idle_flags;
+
+ if ((after - before) < 0 && ifp->if_idle_flags == 0 &&
+ ifp->if_want_aggressive_drain != 0) {
+ ifp->if_want_aggressive_drain = 0;
+ if (ifnet_aggressive_drainers == 0)
+ panic("%s: ifp=%p negative aggdrain!", __func__, ifp);
+ if (--ifnet_aggressive_drainers == 0)
+ rt_aggdrain(0);
+ } else if ((after - before) > 0 && ifp->if_want_aggressive_drain == 0) {
+ ifp->if_want_aggressive_drain++;
+ if (++ifnet_aggressive_drainers == 0)
+ panic("%s: ifp=%p wraparound aggdrain!", __func__, ifp);
+ else if (ifnet_aggressive_drainers == 1)
+ rt_aggdrain(1);
+ }
+
+ if (lock)
+ ifnet_lock_done(ifp);
+
+ lck_mtx_unlock(rnh_lock);
+
+ return (0);
+#else
+#pragma unused(ifp, new_flags, mask)
+ return (ENOTSUP);
+#endif /* IFNET_ROUTE_REFCNT */
+}
+
+u_int32_t
+ifnet_idle_flags(ifnet_t ifp)
+{
+#if IFNET_ROUTE_REFCNT
+ return ((ifp == NULL) ? 0 : ifp->if_idle_flags);
+#else
+#pragma unused(ifp)
+ return (0);
+#endif /* IFNET_ROUTE_REFCNT */
+}
+
static const ifnet_offload_t offload_mask = IFNET_CSUM_IP | IFNET_CSUM_TCP |
IFNET_CSUM_UDP | IFNET_CSUM_FRAGMENT | IFNET_IP_FRAGMENT |
IFNET_CSUM_SUM16 | IFNET_VLAN_TAGGING | IFNET_VLAN_MTU |
if (ifmaddr == NULL || ifmaddr->ifma_ifp == NULL) return NULL;
return ifmaddr->ifma_ifp;
}
+
+/******************************************************************************/
+/* interface cloner */
+/******************************************************************************/
+
+errno_t
+ifnet_clone_attach(struct ifnet_clone_params *cloner_params, if_clone_t *ifcloner)
+{
+ errno_t error = 0;
+ struct if_clone *ifc = NULL;
+ size_t namelen;
+
+ if (cloner_params == NULL || ifcloner == NULL || cloner_params->ifc_name == NULL ||
+ cloner_params->ifc_create == NULL || cloner_params->ifc_destroy == NULL ||
+ (namelen = strlen(cloner_params->ifc_name)) >= IFNAMSIZ) {
+ error = EINVAL;
+ goto fail;
+ }
+
+ if (if_clone_lookup(cloner_params->ifc_name, NULL) != NULL) {
+ printf("ifnet_clone_attach: already a cloner for %s\n", cloner_params->ifc_name);
+ error = EEXIST;
+ goto fail;
+ }
+
+ /* Make room for name string */
+ ifc = _MALLOC(sizeof(struct if_clone) + IFNAMSIZ + 1, M_CLONE, M_WAITOK | M_ZERO);
+ if (ifc == NULL) {
+ printf("ifnet_clone_attach: _MALLOC failed\n");
+ error = ENOBUFS;
+ goto fail;
+ }
+ strlcpy((char *)(ifc + 1), cloner_params->ifc_name, IFNAMSIZ + 1);
+ ifc->ifc_name = (char *)(ifc + 1);
+ ifc->ifc_namelen = namelen;
+ ifc->ifc_maxunit = IF_MAXUNIT;
+ ifc->ifc_create = cloner_params->ifc_create;
+ ifc->ifc_destroy = cloner_params->ifc_destroy;
+
+ error = if_clone_attach(ifc);
+ if (error != 0) {
+ printf("ifnet_clone_attach: if_clone_attach failed %d\n", error);
+ goto fail;
+ }
+ *ifcloner = ifc;
+
+ return 0;
+fail:
+ if (ifc != NULL)
+ FREE(ifc, M_CLONE);
+ return error;
+}
+
+errno_t
+ifnet_clone_detach(if_clone_t ifcloner)
+{
+ errno_t error = 0;
+ struct if_clone *ifc = ifcloner;
+
+ if (ifc == NULL || ifc->ifc_name == NULL)
+ return EINVAL;
+
+ if ((if_clone_lookup(ifc->ifc_name, NULL)) == NULL) {
+ printf("ifnet_clone_attach: no cloner for %s\n", ifc->ifc_name);
+ error = EINVAL;
+ goto fail;
+ }
+
+ if_clone_detach(ifc);
+
+ FREE(ifc, M_CLONE);
+
+ return 0;
+fail:
+ return error;
+}
+
+
+
/*
- * Copyright (c) 2008 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2004-2010 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
@result Extended flags. These flags are defined in net/if.h
*/
extern u_int32_t ifnet_eflags(ifnet_t interface);
+
+/*!
+ @function ifnet_set_idle_flags
+ @discussion Sets the if_idle_flags to new_flags. This function
+ lets you specify which flags you want to change using the
+ mask. The kernel will effectively take the lock, then set
+ the interface's idle flags to:
+ (if_idle_flags & ~mask) | (new_flags & mask).
+ Setting the flags to any non-zero value will cause the
+ networking stack to aggressively purge expired objects,
+ such as route entries, etc.
+ @param interface The interface.
+ @param new_flags The new set of flags that should be set. These
+ flags are defined in net/if.h
+ @param mask The mask of flags to be modified.
+ @result 0 on success otherwise the errno error. ENOTSUP is returned
+ when this call is made on non-supporting platforms.
+*/
+extern errno_t ifnet_set_idle_flags(ifnet_t interface, u_int32_t new_flags,
+ u_int32_t mask);
+
+/*!
+ @function ifnet_idle_flags
+ @discussion Returns the value of if_idle_flags.
+ @param interface Interface to retrieve the flags from.
+ @result if_idle_flags. These flags are defined in net/if.h
+*/
+extern u_int32_t ifnet_idle_flags(ifnet_t interface);
+
#endif /* KERNEL_PRIVATE */
/*!
*/
extern ifnet_t ifmaddr_ifnet(ifmultiaddr_t ifmaddr);
+#ifdef KERNEL_PRIVATE
+/******************************************************************************/
+/* interface cloner */
+/******************************************************************************/
+
+/*
+ @typedef ifnet_clone_create_func
+ @discussion ifnet_clone_create_func is called to create an interface.
+ @param ifcloner The interface cloner.
+ @param unit The interface unit number to create.
+ @param params Additional information specific to the interface cloner.
+ @result Return zero on success or an errno error value on failure.
+ */
+typedef errno_t (*ifnet_clone_create_func)(if_clone_t ifcloner, u_int32_t unit, void *params);
+
+/*
+ @typedef ifnet_clone_destroy_func
+ @discussion ifnet_clone_create_func is called to destroy an interface created
+ by an interface cloner.
+ @param interface The interface to destroy.
+ @result Return zero on success or an errno error value on failure.
+ */
+typedef errno_t (*ifnet_clone_destroy_func)(ifnet_t interface);
+
+/*
+ @struct ifnet_clone_params
+ @discussion This structure is used to represent an interface cloner.
+ @field ifc_name The interface name handled by this interface cloner.
+ @field ifc_create The function to create an interface.
+ @field ifc_destroy The function to destroy an interface.
+*/
+struct ifnet_clone_params {
+ const char *ifc_name;
+ ifnet_clone_create_func ifc_create;
+ ifnet_clone_destroy_func ifc_destroy;
+};
+
+/*
+ @function ifnet_clone_attach
+ @discussion Attaches a new interface cloner.
+ @param cloner_params The structure that defines an interface cloner.
+ @param interface A pointer to an opaque handle that represent the interface cloner
+ that is attached upon success.
+ @result Returns 0 on success.
+ May return ENOBUFS if there is insufficient memory.
+ May return EEXIST if an interface cloner with the same name is already attached.
+ */
+extern errno_t ifnet_clone_attach(struct ifnet_clone_params *cloner_params, if_clone_t *ifcloner);
+
+/*
+ @function ifnet_clone_detach
+ @discussion Detaches a previously attached interface cloner.
+ @param ifcloner The opaque handle returned when the interface cloner was attached.
+ @result Returns 0 on success.
+ */
+extern errno_t ifnet_clone_detach(if_clone_t ifcloner);
+
+#endif /* KERNEL_PRIVATE */
+
__END_DECLS
#endif /* __KPI_INTERFACE__ */
+
/*
- * Copyright (c) 2009 Apple Inc. All rights reserved.
+ * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
-/* $apfw: git commit 7c8016ea91f7b68950cf41729c92dd8e3e423ba7 $ */
+/* $apfw: git commit 6602420f2f101b74305cd78f7cd9e0c8fdedae97 $ */
/* $OpenBSD: pf.c,v 1.567 2008/02/20 23:40:13 henning Exp $ */
/*
static int pf_test_state_tcp(struct pf_state **, int,
struct pfi_kif *, struct mbuf *, int,
void *, struct pf_pdesc *, u_short *);
+#ifndef NO_APPLE_EXTENSIONS
static int pf_test_state_udp(struct pf_state **, int,
struct pfi_kif *, struct mbuf *, int,
void *, struct pf_pdesc *, u_short *);
+#else
+static int pf_test_state_udp(struct pf_state **, int,
+ struct pfi_kif *, struct mbuf *, int,
+ void *, struct pf_pdesc *);
+#endif
static int pf_test_state_icmp(struct pf_state **, int,
struct pfi_kif *, struct mbuf *, int,
void *, struct pf_pdesc *, u_short *);
static void pf_pptp_handler(struct pf_state *, int, int,
struct pf_pdesc *, struct pfi_kif *);
static void pf_pptp_unlink(struct pf_state *);
+static void pf_grev1_unlink(struct pf_state *);
static int pf_test_state_grev1(struct pf_state **, int,
struct pfi_kif *, int, struct pf_pdesc *);
static int pf_ike_compare(struct pf_app_state *,
}
pfr_insert_kentry((*state)->rule.ptr->overload_tbl,
- &p, pf_time_second());
+ &p, pf_calendar_time_second());
/* kill existing states if that's required. */
if ((*state)->rule.ptr->flush) {
struct pf_addr_wrap *xdst = NULL;
#ifndef NO_APPLE_EXTENSIONS
struct pf_addr_wrap *xsrc = NULL;
+ union pf_rule_xport rdrxport;
#endif
if (r->action == PF_BINAT && direction == PF_IN) {
} else if (r->action == PF_RDR && direction == PF_OUT) {
dst = &r->src;
src = &r->dst;
- if (r->rpool.cur != NULL)
+ if (r->rpool.cur != NULL) {
+ rdrxport.range.op = PF_OP_EQ;
+ rdrxport.range.port[0] =
+ htons(r->rpool.proxy_port[0]);
xsrc = &r->rpool.cur->addr;
+ }
#endif
} else {
src = &r->src;
r = TAILQ_NEXT(r, entries);
else if (!xsrc && PF_MISMATCHAW(&src->addr, saddr, pd->af,
src->neg, kif))
- r = r->skip[src == &r->src ? PF_SKIP_SRC_ADDR :
- PF_SKIP_DST_ADDR].ptr;
- else if (!pf_match_xport(r->proto,
+ r = TAILQ_NEXT(r, entries);
+ else if (xsrc && (!rdrxport.range.port[0] ||
+ !pf_match_xport(r->proto, r->proto_variant, &rdrxport,
+ sxport)))
+ r = TAILQ_NEXT(r, entries);
+ else if (!xsrc && !pf_match_xport(r->proto,
r->proto_variant, &src->xport, sxport))
#else
else if (PF_MISMATCHAW(&src->addr, saddr, pd->af,
&r->dst.addr.v.a.mask,
daddr, pd->af);
}
- if (nxport && dxport)
- *nxport = *sxport;
+ if (nxport && r->dst.xport.range.port[0])
+ nxport->port =
+ r->dst.xport.range.port[0];
break;
case PF_IN:
if (pf_map_addr(pd->af, r, saddr,
#endif
s->rule.ptr = r;
s->nat_rule.ptr = nr;
- if (nr && nr->action == PF_RDR && direction == PF_OUT)
- s->anchor.ptr = a;
+ s->anchor.ptr = a;
STATE_INC_COUNTERS(s);
s->allow_opts = r->allow_opts;
s->log = r->log & PF_LOG_ALL;
{
#pragma unused(direction)
struct tcphdr *th;
- struct pf_pptp_state *as;
+ struct pf_pptp_state *pptps;
struct pf_pptp_ctrl_msg cm;
size_t plen;
struct pf_state *gs;
struct mbuf *m;
struct pf_state_key *sk;
struct pf_state_key *gsk;
+ struct pf_app_state *gas;
+
+ sk = s->state_key;
+ pptps = &sk->app_state->u.pptp;
+ gs = pptps->grev1_state;
+
+ if (gs)
+ gs->expire = pf_time_second();
m = pd->mp;
plen = min(sizeof (cm), m->m_pkthdr.len - off);
if (plen < PF_PPTP_CTRL_MSG_MINSIZE)
return;
- as = &s->state_key->app_state->u.pptp;
m_copydata(m, off, plen, &cm);
if (ntohl(cm.hdr.magic) != PF_PPTP_MAGIC_NUMBER)
if (ntohs(cm.hdr.type) != 1)
return;
- sk = s->state_key;
- gs = as->grev1_state;
if (!gs) {
gs = pool_get(&pf_state_pl, PR_WAITOK);
if (!gs)
gs->src.state = gs->dst.state = PFGRE1S_NO_TRAFFIC;
gs->src.scrub = gs->dst.scrub = 0;
+ gas = pool_get(&pf_app_state_pl, PR_NOWAIT);
+ if (!gas) {
+ pool_put(&pf_state_pl, gs);
+ return;
+ }
+
gsk = pf_alloc_state_key(gs);
if (!gsk) {
+ pool_put(&pf_app_state_pl, gas);
pool_put(&pf_state_pl, gs);
return;
}
gsk->af = sk->af;
gsk->proto = IPPROTO_GRE;
gsk->proto_variant = PF_GRE_PPTP_VARIANT;
- gsk->app_state = 0;
+ gsk->app_state = gas;
gsk->lan.xport.call_id = 0;
gsk->gwy.xport.call_id = 0;
gsk->ext.xport.call_id = 0;
-
+ memset(gas, 0, sizeof (*gas));
+ gas->u.grev1.pptp_state = s;
STATE_INC_COUNTERS(gs);
- as->grev1_state = gs;
+ pptps->grev1_state = gs;
+ (void) hook_establish(&gs->unlink_hooks, 0,
+ (hook_fn_t) pf_grev1_unlink, gs);
} else {
gsk = gs->state_key;
}
m = pf_lazy_makewritable(pd, m, off + plen);
if (!m) {
- as->grev1_state = NULL;
+ pptps->grev1_state = NULL;
STATE_DEC_COUNTERS(gs);
pool_put(&pf_state_pl, gs);
return;
case PF_PPTP_INSERT_GRE:
gs->creation = pf_time_second();
gs->expire = pf_time_second();
- gs->timeout = PFTM_GREv1_FIRST_PACKET;
+ gs->timeout = PFTM_TCP_ESTABLISHED;
if (gs->src_node != NULL) {
++gs->src_node->states;
VERIFY(gs->src_node->states != 0);
* succeed. Failures are expected to be rare enough
* that fixing this is a low priority.
*/
- as->grev1_state = NULL;
- pd->lmw = -1;
+ pptps->grev1_state = NULL;
+ pd->lmw = -1; /* Force PF_DROP on PFRES_MEMORY */
pf_src_tree_remove_state(gs);
STATE_DEC_COUNTERS(gs);
pool_put(&pf_state_pl, gs);
pf_pptp_unlink(struct pf_state *s)
{
struct pf_app_state *as = s->state_key->app_state;
- struct pf_state *gs = as->u.pptp.grev1_state;
+ struct pf_state *grev1s = as->u.pptp.grev1_state;
+
+ if (grev1s) {
+ struct pf_app_state *gas = grev1s->state_key->app_state;
- if (gs) {
- if (gs->timeout < PFTM_MAX)
- gs->timeout = PFTM_PURGE;
- as->u.pptp.grev1_state = 0;
+ if (grev1s->timeout < PFTM_MAX)
+ grev1s->timeout = PFTM_PURGE;
+ gas->u.grev1.pptp_state = NULL;
+ as->u.pptp.grev1_state = NULL;
+ }
+}
+
+static void
+pf_grev1_unlink(struct pf_state *s)
+{
+ struct pf_app_state *as = s->state_key->app_state;
+ struct pf_state *pptps = as->u.grev1.pptp_state;
+
+ if (pptps) {
+ struct pf_app_state *pas = pptps->state_key->app_state;
+
+ pas->u.pptp.grev1_state = NULL;
+ as->u.grev1.pptp_state = NULL;
}
}
* zeroing the window when it's
* truncated down to 16-bits. --jhw
*/
- u_int32_t _win = dst->max_win;
- _win <<= dst->wscale & PF_WSCALE_MASK;
- dst->max_win = MIN(0xffff, _win);
+ u_int32_t max_win = dst->max_win;
+ max_win <<=
+ dst->wscale & PF_WSCALE_MASK;
+ dst->max_win = MIN(0xffff, max_win);
#else
/* fixup other window */
dst->max_win <<= dst->wscale &
return (PF_PASS);
}
+#ifndef NO_APPLE_EXTENSIONS
static int
pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
+#else
+pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
+ struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
+#endif
{
#pragma unused(h)
struct pf_state_peer *src, *dst;
}
m = pd->mp;
}
-#endif
/* translate source/destination address, if necessary */
-#ifndef NO_APPLE_EXTENSIONS
if (STATE_TRANSLATE((*state)->state_key)) {
m = pf_lazy_makewritable(pd, m, off + sizeof (*uh));
- if (!m)
+ if (!m) {
+ REASON_SET(reason, PFRES_MEMORY);
return (PF_DROP);
+ }
if (direction == PF_OUT)
pf_change_ap(direction, pd->mp, pd->src, &uh->uh_sport,
m_copyback(m, off, sizeof (*uh), uh);
}
#else
+ /* translate source/destination address, if necessary */
if (STATE_TRANSLATE((*state)->state_key)) {
if (direction == PF_OUT)
pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum,
struct pf_grev1_hdr *grev1 = pd->hdr.grev1;
struct mbuf *m;
-#ifndef NO_APPLE_EXTENSIONS
key.app_state = 0;
-#endif
key.af = pd->af;
key.proto = IPPROTO_GRE;
key.proto_variant = PF_GRE_PPTP_VARIANT;
(*state)->expire = pf_time_second();
if (src->state >= PFGRE1S_INITIATING &&
dst->state >= PFGRE1S_INITIATING) {
- (*state)->timeout = PFTM_GREv1_ESTABLISHED;
+ if ((*state)->timeout != PFTM_TCP_ESTABLISHED)
+ (*state)->timeout = PFTM_GREv1_ESTABLISHED;
src->state = PFGRE1S_ESTABLISHED;
dst->state = PFGRE1S_ESTABLISHED;
} else {
(*state)->timeout = PFTM_GREv1_INITIATING;
}
+
+ if ((*state)->state_key->app_state)
+ (*state)->state_key->app_state->u.grev1.pptp_state->expire =
+ pf_time_second();
+
/* translate source/destination address, if necessary */
if (STATE_GRE_TRANSLATE((*state)->state_key)) {
if (direction == PF_OUT) {
REASON_SET(&reason, PFRES_SHORT);
goto done;
}
+#ifndef NO_APPLE_EXTENSIONS
action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd,
&reason);
-#ifndef NO_APPLE_EXTENSIONS
if (pd.lmw < 0)
goto done;
PF_APPLE_UPDATE_PDESC_IPv4();
+#else
+ action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
#endif
if (action == PF_PASS) {
#if NPFSYNC
REASON_SET(&reason, PFRES_SHORT);
goto done;
}
+#ifndef NO_APPLE_EXTENSIONS
action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd,
&reason);
-#ifndef NO_APPLE_EXTENSIONS
if (pd.lmw < 0)
goto done;
PF_APPLE_UPDATE_PDESC_IPv6();
+#else
+ action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
#endif
if (action == PF_PASS) {
#if NPFSYNC
/*
- * Copyright (c) 2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
return (NULL);
strlcpy(kif->pfik_name, kif_name, sizeof (kif->pfik_name));
- kif->pfik_tzero = pf_time_second();
+ kif->pfik_tzero = pf_calendar_time_second();
TAILQ_INIT(&kif->pfik_dynaddrs);
RB_INSERT(pfi_ifhead, &pfi_ifs, kif);
if (kif->pfik_ifp != NULL)
pfi_instance_add(kif->pfik_ifp, net, flags);
- if ((e = pfr_set_addrs(&kt->pfrkt_t, pfi_buffer, pfi_buffer_cnt, &size2,
- NULL, NULL, NULL, 0, PFR_TFLAG_ALLMASK)))
+ if ((e = pfr_set_addrs(&kt->pfrkt_t, CAST_USER_ADDR_T(pfi_buffer),
+ pfi_buffer_cnt, &size2, NULL, NULL, NULL, 0, PFR_TFLAG_ALLMASK)))
printf("pfi_table_update: cannot set %d new addresses "
"into table %s: %d\n", pfi_buffer_cnt, kt->pfrkt_name, e);
}
if (pfs == NULL) {
bzero(p->pfik_packets, sizeof (p->pfik_packets));
bzero(p->pfik_bytes, sizeof (p->pfik_bytes));
- p->pfik_tzero = pf_time_second();
+ p->pfik_tzero = pf_calendar_time_second();
}
for (i = 0; i < 2; i++)
for (j = 0; j < 2; j++)
}
int
-pfi_get_ifaces(const char *name, struct pfi_kif *buf, int *size)
+pfi_get_ifaces(const char *name, user_addr_t buf, int *size)
{
- struct pfi_kif *p, *nextp;
+ struct pfi_kif *p, *nextp;
int n = 0;
lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
if (pfi_skip_if(name, p))
continue;
if (*size > n++) {
+ struct pfi_uif u;
+
if (!p->pfik_tzero)
- p->pfik_tzero = pf_time_second();
+ p->pfik_tzero = pf_calendar_time_second();
pfi_kif_ref(p, PFI_KIF_REF_RULE);
- buf++;
- if (copyout(p, CAST_USER_ADDR_T(buf), sizeof (*buf))) {
+
+ /* return the user space version of pfi_kif */
+ bzero(&u, sizeof (u));
+ bcopy(p->pfik_name, &u.pfik_name, sizeof (u.pfik_name));
+ bcopy(p->pfik_packets, &u.pfik_packets,
+ sizeof (u.pfik_packets));
+ bcopy(p->pfik_bytes, &u.pfik_bytes,
+ sizeof (u.pfik_bytes));
+ u.pfik_tzero = p->pfik_tzero;
+ u.pfik_flags = p->pfik_flags;
+ u.pfik_states = p->pfik_states;
+ u.pfik_rules = p->pfik_rules;
+
+ if (copyout(&u, buf, sizeof (u))) {
pfi_kif_unref(p, PFI_KIF_REF_RULE);
return (EFAULT);
}
+ buf += sizeof (u);
nextp = RB_NEXT(pfi_ifhead, &pfi_ifs, p);
pfi_kif_unref(p, PFI_KIF_REF_RULE);
}
/*
- * Copyright (c) 2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
-/* $apfw: pf_ioctl.c,v 1.16 2008/08/27 00:01:32 jhw Exp $ */
+/* $apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */
/* $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
/*
};
static void pf_attach_hooks(void);
+#if 0
+/* currently unused along with pfdetach() */
static void pf_detach_hooks(void);
-static int pf_hooks_attached = 0;
+#endif
+
+/*
+ * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer,
+ * and used in pf_af_hook() for performance optimization, such that packets
+ * will enter pf_test() or pf_test6() only when PF is running.
+ */
+static int pf_is_enabled;
struct pf_rule pf_default_rule;
#if ALTQ
#define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
+#define PF_USER_ADDR(a, s, f) \
+ (proc_is64bit(current_proc()) ? \
+ ((struct s##_64 *)a)->f : ((struct s##_32 *)a)->f)
+
static lck_attr_t *pf_perim_lock_attr;
static lck_grp_t *pf_perim_lock_grp;
static lck_grp_attr_t *pf_perim_lock_grp_attr;
}
(void) devfs_make_node(makedev(maj, 0), DEVFS_CHAR,
UID_ROOT, GID_WHEEL, 0600, "pf", 0);
+
+ pf_attach_hooks();
}
#if 0
int i;
char r = '\0';
+ pf_detach_hooks();
+
pf_status.running = 0;
wakeup(pf_purge_thread_fn);
} else if (pf_purge_thread == NULL) {
error = ENOMEM;
} else {
+ pf_is_enabled = 1;
pf_status.running = 1;
pf_status.since = pf_calendar_time_second();
if (pf_status.stateid == 0) {
pf_status.stateid = pf_status.stateid << 32;
}
mbuf_growth_aggressive();
- pf_attach_hooks();
wakeup(pf_purge_thread_fn);
DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
}
error = ENOENT;
} else {
mbuf_growth_normal();
- pf_detach_hooks();
pf_status.running = 0;
+ pf_is_enabled = 0;
pf_status.since = pf_calendar_time_second();
wakeup(pf_purge_thread_fn);
DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
case DIOCGETSTATES: {
struct pfioc_states *ps = (struct pfioc_states *)addr;
struct pf_state *state;
- struct pfsync_state *y, *pstore;
+ struct pfsync_state *pstore;
+ user_addr_t buf;
u_int32_t nr = 0;
if (ps->ps_len == 0) {
}
pstore = _MALLOC(sizeof (*pstore), M_TEMP, M_WAITOK);
-
- y = ps->ps_states;
+ buf = PF_USER_ADDR(addr, pfioc_states, ps_buf);
state = TAILQ_FIRST(&state_list);
while (state) {
if (state->timeout != PFTM_UNLINKED) {
- if ((nr+1) * sizeof (*y) > (unsigned)ps->ps_len)
+ if ((nr + 1) * sizeof (*pstore) >
+ (unsigned)ps->ps_len)
break;
pf_state_export(pstore,
state->state_key, state);
- error = copyout(pstore, CAST_USER_ADDR_T(y),
- sizeof (*y));
+ error = copyout(pstore, buf, sizeof (*pstore));
if (error) {
_FREE(pstore, M_TEMP);
goto fail;
}
- y++;
+ buf += sizeof (*pstore);
nr++;
}
state = TAILQ_NEXT(state, entry_list);
case DIOCRADDTABLES: {
struct pfioc_table *io = (struct pfioc_table *)addr;
+ user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer);
if (io->pfrio_esize != sizeof (struct pfr_table)) {
error = ENODEV;
break;
}
- error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
+ error = pfr_add_tables(buf, io->pfrio_size,
&io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
break;
}
case DIOCRDELTABLES: {
struct pfioc_table *io = (struct pfioc_table *)addr;
+ user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer);
if (io->pfrio_esize != sizeof (struct pfr_table)) {
error = ENODEV;
break;
}
- error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
+ error = pfr_del_tables(buf, io->pfrio_size,
&io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
break;
}
case DIOCRGETTABLES: {
struct pfioc_table *io = (struct pfioc_table *)addr;
+ user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer);
if (io->pfrio_esize != sizeof (struct pfr_table)) {
error = ENODEV;
break;
}
- error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
+ error = pfr_get_tables(&io->pfrio_table, buf,
&io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
break;
}
case DIOCRGETTSTATS: {
struct pfioc_table *io = (struct pfioc_table *)addr;
+ user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer);
if (io->pfrio_esize != sizeof (struct pfr_tstats)) {
error = ENODEV;
break;
}
- error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
+ error = pfr_get_tstats(&io->pfrio_table, buf,
&io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
break;
}
case DIOCRCLRTSTATS: {
struct pfioc_table *io = (struct pfioc_table *)addr;
+ user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer);
if (io->pfrio_esize != sizeof (struct pfr_table)) {
error = ENODEV;
break;
}
- error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
+ error = pfr_clr_tstats(buf, io->pfrio_size,
&io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
break;
}
case DIOCRSETTFLAGS: {
struct pfioc_table *io = (struct pfioc_table *)addr;
+ user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer);
if (io->pfrio_esize != sizeof (struct pfr_table)) {
error = ENODEV;
break;
}
- error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
+ error = pfr_set_tflags(buf, io->pfrio_size,
io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
&io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
break;
case DIOCRADDADDRS: {
struct pfioc_table *io = (struct pfioc_table *)addr;
+ user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer);
if (io->pfrio_esize != sizeof (struct pfr_addr)) {
error = ENODEV;
break;
}
- error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
+ error = pfr_add_addrs(&io->pfrio_table, buf,
io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
PFR_FLAG_USERIOCTL);
break;
case DIOCRDELADDRS: {
struct pfioc_table *io = (struct pfioc_table *)addr;
+ user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer);
if (io->pfrio_esize != sizeof (struct pfr_addr)) {
error = ENODEV;
break;
}
- error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
+ error = pfr_del_addrs(&io->pfrio_table, buf,
io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
PFR_FLAG_USERIOCTL);
break;
case DIOCRSETADDRS: {
struct pfioc_table *io = (struct pfioc_table *)addr;
+ user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer);
if (io->pfrio_esize != sizeof (struct pfr_addr)) {
error = ENODEV;
break;
}
- error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
+ error = pfr_set_addrs(&io->pfrio_table, buf,
io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
&io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
PFR_FLAG_USERIOCTL, 0);
case DIOCRGETADDRS: {
struct pfioc_table *io = (struct pfioc_table *)addr;
+ user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer);
if (io->pfrio_esize != sizeof (struct pfr_addr)) {
error = ENODEV;
break;
}
- error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
+ error = pfr_get_addrs(&io->pfrio_table, buf,
&io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
break;
}
case DIOCRGETASTATS: {
struct pfioc_table *io = (struct pfioc_table *)addr;
+ user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer);
if (io->pfrio_esize != sizeof (struct pfr_astats)) {
error = ENODEV;
break;
}
- error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
+ error = pfr_get_astats(&io->pfrio_table, buf,
&io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
break;
}
case DIOCRCLRASTATS: {
struct pfioc_table *io = (struct pfioc_table *)addr;
+ user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer);
if (io->pfrio_esize != sizeof (struct pfr_addr)) {
error = ENODEV;
break;
}
- error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
+ error = pfr_clr_astats(&io->pfrio_table, buf,
io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
PFR_FLAG_USERIOCTL);
break;
case DIOCRTSTADDRS: {
struct pfioc_table *io = (struct pfioc_table *)addr;
+ user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer);
if (io->pfrio_esize != sizeof (struct pfr_addr)) {
error = ENODEV;
break;
}
- error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
+ error = pfr_tst_addrs(&io->pfrio_table, buf,
io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
PFR_FLAG_USERIOCTL);
break;
case DIOCRINADEFINE: {
struct pfioc_table *io = (struct pfioc_table *)addr;
+ user_addr_t buf = PF_USER_ADDR(addr, pfioc_table, pfrio_buffer);
if (io->pfrio_esize != sizeof (struct pfr_addr)) {
error = ENODEV;
break;
}
- error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
+ error = pfr_ina_define(&io->pfrio_table, buf,
io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
break;
struct pfioc_trans *io = (struct pfioc_trans *)addr;
struct pfioc_trans_e *ioe;
struct pfr_table *table;
+ user_addr_t buf;
int i;
if (io->esize != sizeof (*ioe)) {
}
ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK);
table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK);
- for (i = 0; i < io->size; i++) {
- if (copyin(CAST_USER_ADDR_T(io->array+i), ioe,
- sizeof (*ioe))) {
+ buf = PF_USER_ADDR(addr, pfioc_trans, array);
+ for (i = 0; i < io->size; i++, buf += sizeof (*ioe)) {
+ if (copyin(buf, ioe, sizeof (*ioe))) {
_FREE(table, M_TEMP);
_FREE(ioe, M_TEMP);
error = EFAULT;
}
break;
}
- if (copyout(ioe, CAST_USER_ADDR_T(io->array+i),
- sizeof (io->array[i]))) {
+ if (copyout(ioe, buf, sizeof (*ioe))) {
_FREE(table, M_TEMP);
_FREE(ioe, M_TEMP);
error = EFAULT;
struct pfioc_trans *io = (struct pfioc_trans *)addr;
struct pfioc_trans_e *ioe;
struct pfr_table *table;
+ user_addr_t buf;
int i;
if (io->esize != sizeof (*ioe)) {
}
ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK);
table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK);
- for (i = 0; i < io->size; i++) {
- if (copyin(CAST_USER_ADDR_T(io->array+i), ioe,
- sizeof (*ioe))) {
+ buf = PF_USER_ADDR(addr, pfioc_trans, array);
+ for (i = 0; i < io->size; i++, buf += sizeof (*ioe)) {
+ if (copyin(buf, ioe, sizeof (*ioe))) {
_FREE(table, M_TEMP);
_FREE(ioe, M_TEMP);
error = EFAULT;
struct pfioc_trans_e *ioe;
struct pfr_table *table;
struct pf_ruleset *rs;
+ user_addr_t _buf, buf;
int i;
if (io->esize != sizeof (*ioe)) {
}
ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK);
table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK);
+ buf = _buf = PF_USER_ADDR(addr, pfioc_trans, array);
/* first makes sure everything will succeed */
- for (i = 0; i < io->size; i++) {
- if (copyin(CAST_USER_ADDR_T(io->array+i), ioe,
- sizeof (*ioe))) {
+ for (i = 0; i < io->size; i++, buf += sizeof (*ioe)) {
+ if (copyin(buf, ioe, sizeof (*ioe))) {
_FREE(table, M_TEMP);
_FREE(ioe, M_TEMP);
error = EFAULT;
break;
}
}
+ buf = _buf;
/* now do the commit - no errors should happen here */
- for (i = 0; i < io->size; i++) {
- if (copyin(CAST_USER_ADDR_T(io->array+i), ioe,
- sizeof (*ioe))) {
+ for (i = 0; i < io->size; i++, buf += sizeof (*ioe)) {
+ if (copyin(buf, ioe, sizeof (*ioe))) {
_FREE(table, M_TEMP);
_FREE(ioe, M_TEMP);
error = EFAULT;
case DIOCGETSRCNODES: {
struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
- struct pf_src_node *n, *sn, *pstore;
+ struct pf_src_node *n, *pstore;
+ user_addr_t buf;
u_int32_t nr = 0;
int space = psn->psn_len;
}
pstore = _MALLOC(sizeof (*pstore), M_TEMP, M_WAITOK);
+ buf = PF_USER_ADDR(addr, pfioc_src_nodes, psn_buf);
- sn = psn->psn_src_nodes;
RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
uint64_t secs = pf_time_second(), diff;
- if ((nr + 1) * sizeof (*sn) > (unsigned)psn->psn_len)
+ if ((nr + 1) * sizeof (*pstore) >
+ (unsigned)psn->psn_len)
break;
bcopy(n, pstore, sizeof (*pstore));
n->conn_rate.count * diff /
n->conn_rate.seconds;
- error = copyout(pstore, CAST_USER_ADDR_T(sn),
- sizeof (*sn));
+ error = copyout(pstore, buf, sizeof (*pstore));
if (error) {
_FREE(pstore, M_TEMP);
goto fail;
}
- sn++;
+ buf += sizeof (*pstore);
nr++;
}
psn->psn_len = sizeof (struct pf_src_node) * nr;
case DIOCIGETIFACES: {
struct pfioc_iface *io = (struct pfioc_iface *)addr;
+ user_addr_t buf = PF_USER_ADDR(addr, pfioc_iface, pfiio_buffer);
- if (io->pfiio_esize != sizeof (struct pfi_kif)) {
+ /* esize must be that of the user space version of pfi_kif */
+ if (io->pfiio_esize != sizeof (struct pfi_uif)) {
error = ENODEV;
break;
}
- error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
- &io->pfiio_size);
+ io->pfiio_name[sizeof (io->pfiio_name) - 1] = '\0';
+ error = pfi_get_ifaces(io->pfiio_name, buf, &io->pfiio_size);
break;
}
case DIOCSETIFFLAG: {
struct pfioc_iface *io = (struct pfioc_iface *)addr;
+ io->pfiio_name[sizeof (io->pfiio_name) - 1] = '\0';
error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
break;
}
case DIOCCLRIFFLAG: {
struct pfioc_iface *io = (struct pfioc_iface *)addr;
+ io->pfiio_name[sizeof (io->pfiio_name) - 1] = '\0';
error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
break;
}
reentry = (ifp->if_pf_curthread == curthread);
if (!reentry) {
lck_rw_lock_shared(pf_perim_lock);
- if (!pf_hooks_attached)
+ if (!pf_is_enabled)
goto done;
lck_mtx_lock(pf_lock);
pf_ifaddr_hook(struct ifnet *ifp, unsigned long cmd)
{
lck_rw_lock_shared(pf_perim_lock);
- if (!pf_hooks_attached)
- goto done;
-
lck_mtx_lock(pf_lock);
switch (cmd) {
}
lck_mtx_unlock(pf_lock);
-done:
lck_rw_done(pf_perim_lock);
return (0);
}
pf_ifnet_hook(struct ifnet *ifp, int attach)
{
lck_rw_lock_shared(pf_perim_lock);
- if (!pf_hooks_attached)
- goto done;
-
lck_mtx_lock(pf_lock);
if (attach)
pfi_attach_ifnet(ifp);
else
pfi_detach_ifnet(ifp);
lck_mtx_unlock(pf_lock);
-done:
lck_rw_done(pf_perim_lock);
}
static void
pf_attach_hooks(void)
{
- int i;
-
- if (pf_hooks_attached)
- return;
-
ifnet_head_lock_shared();
- for (i = 0; i <= if_index; i++) {
- struct ifnet *ifp = ifindex2ifnet[i];
- if (ifp != NULL) {
- pfi_attach_ifnet(ifp);
+ /*
+ * Check against ifnet_addrs[] before proceeding, in case this
+ * is called very early on, e.g. during dlil_init() before any
+ * network interface is attached.
+ */
+ if (ifnet_addrs != NULL) {
+ int i;
+
+ for (i = 0; i <= if_index; i++) {
+ struct ifnet *ifp = ifindex2ifnet[i];
+ if (ifp != NULL) {
+ pfi_attach_ifnet(ifp);
+ }
}
}
ifnet_head_done();
- pf_hooks_attached = 1;
}
+#if 0
+/* currently unused along with pfdetach() */
static void
pf_detach_hooks(void)
{
- int i;
-
- if (!pf_hooks_attached)
- return;
-
ifnet_head_lock_shared();
- for (i = 0; i <= if_index; i++) {
- struct ifnet *ifp = ifindex2ifnet[i];
- if (ifp != NULL && ifp->if_pf_kif != NULL) {
- pfi_detach_ifnet(ifp);
+ if (ifnet_addrs != NULL) {
+ for (i = 0; i <= if_index; i++) {
+ int i;
+
+ struct ifnet *ifp = ifindex2ifnet[i];
+ if (ifp != NULL && ifp->if_pf_kif != NULL) {
+ pfi_detach_ifnet(ifp);
+ }
}
}
ifnet_head_done();
- pf_hooks_attached = 0;
}
+#endif
/*
- * Copyright (c) 2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2007-2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
/*
- * Copyright (c) 2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
if (find.fp_mss == 0)
find.fp_mss = 128;
if (f->fp_flags & PF_OSFP_WSIZE_MSS)
- find.fp_wsize *= find.fp_mss, 1;
+ find.fp_wsize *= find.fp_mss;
else if (f->fp_flags & PF_OSFP_WSIZE_MTU)
find.fp_wsize *= (find.fp_mss + 40);
else if (f->fp_flags & PF_OSFP_WSIZE_MOD)
/*
- * Copyright (c) 2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2007-2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
/*
- * Copyright (c) 2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#define COPYIN(from, to, size, flags) \
((flags & PFR_FLAG_USERIOCTL) ? \
- copyin(CAST_USER_ADDR_T(from), (to), (size)) : \
- (bcopy((from), (to), (size)), 0))
+ copyin((from), (to), (size)) : \
+ (bcopy((void *)(uintptr_t)(from), (to), (size)), 0))
#define COPYOUT(from, to, size, flags) \
((flags & PFR_FLAG_USERIOCTL) ? \
- copyout((from), CAST_USER_ADDR_T(to), (size)) : \
- (bcopy((from), (to), (size)), 0))
+ copyout((from), (to), (size)) : \
+ (bcopy((from), (void *)(uintptr_t)(to), (size)), 0))
#define FILLIN_SIN(sin, addr) \
do { \
PFRW_DYNADDR_UPDATE
} pfrw_op;
union {
- struct pfr_addr *pfrw1_addr;
- struct pfr_astats *pfrw1_astats;
+ user_addr_t pfrw1_addr;
+ user_addr_t pfrw1_astats;
struct pfr_kentryworkq *pfrw1_workq;
struct pfr_kentry *pfrw1_kentry;
struct pfi_dynaddr *pfrw1_dyn;
struct pfr_kentryworkq *, u_int64_t);
static void pfr_remove_kentries(struct pfr_ktable *, struct pfr_kentryworkq *);
static void pfr_clstats_kentries(struct pfr_kentryworkq *, u_int64_t, int);
-static void pfr_reset_feedback(struct pfr_addr *, int, int);
+static void pfr_reset_feedback(user_addr_t, int, int);
static void pfr_prepare_network(union sockaddr_union *, int, int);
static int pfr_route_kentry(struct pfr_ktable *, struct pfr_kentry *);
static int pfr_unroute_kentry(struct pfr_ktable *, struct pfr_kentry *);
}
int
-pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
+pfr_add_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
int *nadd, int flags)
{
struct pfr_ktable *kt, *tmpkt;
struct pfr_kentry *p, *q;
struct pfr_addr ad;
int i, rv, xadd = 0;
- u_int64_t tzero = pf_time_second();
+ user_addr_t addr = _addr;
+ u_int64_t tzero = pf_calendar_time_second();
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
PFR_FLAG_FEEDBACK);
if (tmpkt == NULL)
return (ENOMEM);
SLIST_INIT(&workq);
- for (i = 0; i < size; i++) {
- if (COPYIN(addr+i, &ad, sizeof (ad), flags))
+ for (i = 0; i < size; i++, addr += sizeof (ad)) {
+ if (COPYIN(addr, &ad, sizeof (ad), flags))
senderr(EFAULT);
if (pfr_validate_addr(&ad))
senderr(EINVAL);
}
}
if (flags & PFR_FLAG_FEEDBACK)
- if (COPYOUT(&ad, addr+i, sizeof (ad), flags))
+ if (COPYOUT(&ad, addr, sizeof (ad), flags))
senderr(EFAULT);
}
pfr_clean_node_mask(tmpkt, &workq);
pfr_clean_node_mask(tmpkt, &workq);
pfr_destroy_kentries(&workq);
if (flags & PFR_FLAG_FEEDBACK)
- pfr_reset_feedback(addr, size, flags);
+ pfr_reset_feedback(_addr, size, flags);
pfr_destroy_ktable(tmpkt, 0);
return (rv);
}
int
-pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
+pfr_del_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
int *ndel, int flags)
{
struct pfr_ktable *kt;
struct pfr_kentryworkq workq;
struct pfr_kentry *p;
struct pfr_addr ad;
+ user_addr_t addr = _addr;
int i, rv, xdel = 0, log = 1;
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
pfr_mark_addrs(kt);
} else {
/* iterate over addresses to delete */
- for (i = 0; i < size; i++) {
- if (COPYIN(addr+i, &ad, sizeof (ad), flags))
+ for (i = 0; i < size; i++, addr += sizeof (ad)) {
+ if (COPYIN(addr, &ad, sizeof (ad), flags))
return (EFAULT);
if (pfr_validate_addr(&ad))
return (EINVAL);
}
}
SLIST_INIT(&workq);
- for (i = 0; i < size; i++) {
- if (COPYIN(addr+i, &ad, sizeof (ad), flags))
+ for (addr = _addr, i = 0; i < size; i++, addr += sizeof (ad)) {
+ if (COPYIN(addr, &ad, sizeof (ad), flags))
senderr(EFAULT);
if (pfr_validate_addr(&ad))
senderr(EINVAL);
xdel++;
}
if (flags & PFR_FLAG_FEEDBACK)
- if (COPYOUT(&ad, addr+i, sizeof (ad), flags))
+ if (COPYOUT(&ad, addr, sizeof (ad), flags))
senderr(EFAULT);
}
if (!(flags & PFR_FLAG_DUMMY)) {
return (0);
_bad:
if (flags & PFR_FLAG_FEEDBACK)
- pfr_reset_feedback(addr, size, flags);
+ pfr_reset_feedback(_addr, size, flags);
return (rv);
}
int
-pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
+pfr_set_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
int *size2, int *nadd, int *ndel, int *nchange, int flags,
u_int32_t ignore_pfrt_flags)
{
struct pfr_kentryworkq addq, delq, changeq;
struct pfr_kentry *p, *q;
struct pfr_addr ad;
+ user_addr_t addr = _addr;
int i, rv, xadd = 0, xdel = 0, xchange = 0;
- u_int64_t tzero = pf_time_second();
+ u_int64_t tzero = pf_calendar_time_second();
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
PFR_FLAG_FEEDBACK);
SLIST_INIT(&addq);
SLIST_INIT(&delq);
SLIST_INIT(&changeq);
- for (i = 0; i < size; i++) {
- if (COPYIN(addr+i, &ad, sizeof (ad), flags))
+ for (i = 0; i < size; i++, addr += sizeof (ad)) {
+ if (COPYIN(addr, &ad, sizeof (ad), flags))
senderr(EFAULT);
if (pfr_validate_addr(&ad))
senderr(EINVAL);
}
_skip:
if (flags & PFR_FLAG_FEEDBACK)
- if (COPYOUT(&ad, addr+i, sizeof (ad), flags))
+ if (COPYOUT(&ad, addr, sizeof (ad), flags))
senderr(EFAULT);
}
pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
senderr(0);
}
i = 0;
+ addr = _addr + size;
SLIST_FOREACH(p, &delq, pfrke_workq) {
pfr_copyout_addr(&ad, p);
ad.pfra_fback = PFR_FB_DELETED;
- if (COPYOUT(&ad, addr+size+i, sizeof (ad), flags))
+ if (COPYOUT(&ad, addr, sizeof (ad), flags))
senderr(EFAULT);
+ addr += sizeof (ad);
i++;
}
}
pfr_clean_node_mask(tmpkt, &addq);
pfr_destroy_kentries(&addq);
if (flags & PFR_FLAG_FEEDBACK)
- pfr_reset_feedback(addr, size, flags);
+ pfr_reset_feedback(_addr, size, flags);
pfr_destroy_ktable(tmpkt, 0);
return (rv);
}
int
-pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
+pfr_tst_addrs(struct pfr_table *tbl, user_addr_t addr, int size,
int *nmatch, int flags)
{
struct pfr_ktable *kt;
if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
return (ESRCH);
- for (i = 0; i < size; i++) {
- if (COPYIN(addr+i, &ad, sizeof (ad), flags))
+ for (i = 0; i < size; i++, addr += sizeof (ad)) {
+ if (COPYIN(addr, &ad, sizeof (ad), flags))
return (EFAULT);
if (pfr_validate_addr(&ad))
return (EINVAL);
(p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
if (p != NULL && !p->pfrke_not)
xmatch++;
- if (COPYOUT(&ad, addr+i, sizeof (ad), flags))
+ if (COPYOUT(&ad, addr, sizeof (ad), flags))
return (EFAULT);
}
if (nmatch != NULL)
}
int
-pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
+pfr_get_addrs(struct pfr_table *tbl, user_addr_t addr, int *size,
int flags)
{
struct pfr_ktable *kt;
}
int
-pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
+pfr_get_astats(struct pfr_table *tbl, user_addr_t addr, int *size,
int flags)
{
struct pfr_ktable *kt;
struct pfr_walktree w;
struct pfr_kentryworkq workq;
int rv;
- u_int64_t tzero = pf_time_second();
+ u_int64_t tzero = pf_calendar_time_second();
/* XXX PFR_FLAG_CLSTATS disabled */
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
}
int
-pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
+pfr_clr_astats(struct pfr_table *tbl, user_addr_t _addr, int size,
int *nzero, int flags)
{
struct pfr_ktable *kt;
struct pfr_kentryworkq workq;
struct pfr_kentry *p;
struct pfr_addr ad;
+ user_addr_t addr = _addr;
int i, rv, xzero = 0;
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
return (ESRCH);
SLIST_INIT(&workq);
- for (i = 0; i < size; i++) {
- if (COPYIN(addr+i, &ad, sizeof (ad), flags))
+ for (i = 0; i < size; i++, addr += sizeof (ad)) {
+ if (COPYIN(addr, &ad, sizeof (ad), flags))
senderr(EFAULT);
if (pfr_validate_addr(&ad))
senderr(EINVAL);
if (flags & PFR_FLAG_FEEDBACK) {
ad.pfra_fback = (p != NULL) ?
PFR_FB_CLEARED : PFR_FB_NONE;
- if (COPYOUT(&ad, addr+i, sizeof (ad), flags))
+ if (COPYOUT(&ad, addr, sizeof (ad), flags))
senderr(EFAULT);
}
if (p != NULL) {
return (0);
_bad:
if (flags & PFR_FLAG_FEEDBACK)
- pfr_reset_feedback(addr, size, flags);
+ pfr_reset_feedback(_addr, size, flags);
return (rv);
}
-int
+static int
pfr_validate_addr(struct pfr_addr *ad)
{
int i;
return (0);
}
-void
+static void
pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
int *naddr, int sweep)
{
*naddr = w.pfrw_cnt;
}
-void
+static void
pfr_mark_addrs(struct pfr_ktable *kt)
{
struct pfr_walktree w;
}
-struct pfr_kentry *
+static struct pfr_kentry *
pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
{
union sockaddr_union sa, mask;
return (ke);
}
-struct pfr_kentry *
+static struct pfr_kentry *
pfr_create_kentry(struct pfr_addr *ad, int intr)
{
struct pfr_kentry *ke;
return (ke);
}
-void
+static void
pfr_destroy_kentries(struct pfr_kentryworkq *workq)
{
struct pfr_kentry *p, *q;
}
}
-void
+static void
pfr_destroy_kentry(struct pfr_kentry *ke)
{
if (ke->pfrke_intrpool)
pool_put(&pfr_kentry_pl, ke);
}
-void
+static void
pfr_insert_kentries(struct pfr_ktable *kt,
struct pfr_kentryworkq *workq, u_int64_t tzero)
{
return (0);
}
-void
+static void
pfr_remove_kentries(struct pfr_ktable *kt,
struct pfr_kentryworkq *workq)
{
pfr_destroy_kentries(workq);
}
-void
+static void
pfr_clean_node_mask(struct pfr_ktable *kt,
struct pfr_kentryworkq *workq)
{
pfr_unroute_kentry(kt, p);
}
-void
+static void
pfr_clstats_kentries(struct pfr_kentryworkq *workq, u_int64_t tzero,
int negchange)
{
}
}
-void
-pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
+static void
+pfr_reset_feedback(user_addr_t addr, int size, int flags)
{
struct pfr_addr ad;
int i;
- for (i = 0; i < size; i++) {
- if (COPYIN(addr+i, &ad, sizeof (ad), flags))
+ for (i = 0; i < size; i++, addr += sizeof (ad)) {
+ if (COPYIN(addr, &ad, sizeof (ad), flags))
break;
ad.pfra_fback = PFR_FB_NONE;
- if (COPYOUT(&ad, addr+i, sizeof (ad), flags))
+ if (COPYOUT(&ad, addr, sizeof (ad), flags))
break;
}
}
-void
+static void
pfr_prepare_network(union sockaddr_union *sa, int af, int net)
{
int i;
}
}
-int
+static int
pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
{
union sockaddr_union mask;
return (rn == NULL ? -1 : 0);
}
-int
+static int
pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
{
union sockaddr_union mask;
return (0);
}
-void
+static void
pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
{
bzero(ad, sizeof (*ad));
ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
}
-int
+static int
pfr_walktree(struct radix_node *rn, void *arg)
{
struct pfr_kentry *ke = (struct pfr_kentry *)rn;
struct pfr_addr ad;
pfr_copyout_addr(&ad, ke);
- if (copyout(&ad,
- CAST_USER_ADDR_T(w->pfrw_addr),
- sizeof (ad)))
+ if (copyout(&ad, w->pfrw_addr, sizeof (ad)))
return (EFAULT);
- w->pfrw_addr++;
+ w->pfrw_addr += sizeof (ad);
}
break;
case PFRW_GET_ASTATS:
if (COPYOUT(&as, w->pfrw_astats, sizeof (as), flags))
return (EFAULT);
- w->pfrw_astats++;
+ w->pfrw_astats += sizeof (as);
}
break;
case PFRW_POOL_GET:
}
int
-pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
+pfr_add_tables(user_addr_t tbl, int size, int *nadd, int flags)
{
struct pfr_ktableworkq addq, changeq;
struct pfr_ktable *p, *q, *r, key;
int i, rv, xadd = 0;
- u_int64_t tzero = pf_time_second();
+ u_int64_t tzero = pf_calendar_time_second();
lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
SLIST_INIT(&addq);
SLIST_INIT(&changeq);
- for (i = 0; i < size; i++) {
- if (COPYIN(tbl+i, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
+ for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) {
+ if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
senderr(EFAULT);
if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
flags & PFR_FLAG_USERIOCTL))
}
int
-pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
+pfr_del_tables(user_addr_t tbl, int size, int *ndel, int flags)
{
struct pfr_ktableworkq workq;
struct pfr_ktable *p, *q, key;
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
SLIST_INIT(&workq);
- for (i = 0; i < size; i++) {
- if (COPYIN(tbl+i, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
+ for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) {
+ if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
return (EFAULT);
if (pfr_validate_table(&key.pfrkt_t, 0,
flags & PFR_FLAG_USERIOCTL))
}
int
-pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
+pfr_get_tables(struct pfr_table *filter, user_addr_t tbl, int *size,
int flags)
{
struct pfr_ktable *p;
continue;
if (n-- <= 0)
continue;
- if (COPYOUT(&p->pfrkt_t, tbl++, sizeof (*tbl), flags))
+ if (COPYOUT(&p->pfrkt_t, tbl, sizeof (p->pfrkt_t), flags))
return (EFAULT);
+ tbl += sizeof (p->pfrkt_t);
}
if (n) {
printf("pfr_get_tables: corruption detected (%d).\n", n);
}
int
-pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
+pfr_get_tstats(struct pfr_table *filter, user_addr_t tbl, int *size,
int flags)
{
struct pfr_ktable *p;
struct pfr_ktableworkq workq;
int n, nn;
- u_int64_t tzero = pf_time_second();
+ u_int64_t tzero = pf_calendar_time_second();
lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
continue;
if (n-- <= 0)
continue;
- if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof (*tbl), flags)) {
+ if (COPYOUT(&p->pfrkt_ts, tbl, sizeof (p->pfrkt_ts), flags)) {
return (EFAULT);
}
+ tbl += sizeof (p->pfrkt_ts);
SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
}
if (flags & PFR_FLAG_CLSTATS)
}
int
-pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
+pfr_clr_tstats(user_addr_t tbl, int size, int *nzero, int flags)
{
struct pfr_ktableworkq workq;
struct pfr_ktable *p, key;
int i, xzero = 0;
- u_int64_t tzero = pf_time_second();
+ u_int64_t tzero = pf_calendar_time_second();
lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
PFR_FLAG_ADDRSTOO);
SLIST_INIT(&workq);
- for (i = 0; i < size; i++) {
- if (COPYIN(tbl+i, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
+ for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) {
+ if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
return (EFAULT);
if (pfr_validate_table(&key.pfrkt_t, 0, 0))
return (EINVAL);
}
int
-pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
+pfr_set_tflags(user_addr_t tbl, int size, int setflag, int clrflag,
int *nchange, int *ndel, int flags)
{
struct pfr_ktableworkq workq;
(setflag & clrflag))
return (EINVAL);
SLIST_INIT(&workq);
- for (i = 0; i < size; i++) {
- if (COPYIN(tbl+i, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
+ for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) {
+ if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
return (EFAULT);
if (pfr_validate_table(&key.pfrkt_t, 0,
flags & PFR_FLAG_USERIOCTL))
}
int
-pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
+pfr_ina_define(struct pfr_table *tbl, user_addr_t addr, int size,
int *nadd, int *naddr, u_int32_t ticket, int flags)
{
struct pfr_ktableworkq tableq;
return (ENOMEM);
}
SLIST_INIT(&addrq);
- for (i = 0; i < size; i++) {
- if (COPYIN(addr+i, &ad, sizeof (ad), flags))
+ for (i = 0; i < size; i++, addr += sizeof (ad)) {
+ if (COPYIN(addr, &ad, sizeof (ad), flags))
senderr(EFAULT);
if (pfr_validate_addr(&ad))
senderr(EINVAL);
struct pfr_ktableworkq workq;
struct pf_ruleset *rs;
int xadd = 0, xchange = 0;
- u_int64_t tzero = pf_time_second();
+ u_int64_t tzero = pf_calendar_time_second();
lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
return (0);
}
-void
+static void
pfr_commit_ktable(struct pfr_ktable *kt, u_int64_t tzero)
{
struct pfr_ktable *shadow = kt->pfrkt_shadow;
pfr_setflags_ktable(kt, nflags);
}
-int
+static int
pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
{
int i;
* Rewrite anchors referenced by tables to remove slashes
* and check for validity.
*/
-int
+static int
pfr_fix_anchor(char *anchor)
{
size_t siz = MAXPATHLEN;
return (0);
}
-int
+static int
pfr_table_count(struct pfr_table *filter, int flags)
{
struct pf_ruleset *rs;
return (pf_main_ruleset.tables);
}
-int
+static int
pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
{
if (flags & PFR_FLAG_ALLRSETS)
return (0);
}
-void
+static void
pfr_insert_ktables(struct pfr_ktableworkq *workq)
{
struct pfr_ktable *p;
pfr_insert_ktable(p);
}
-void
+static void
pfr_insert_ktable(struct pfr_ktable *kt)
{
lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
}
-void
+static void
pfr_setflags_ktables(struct pfr_ktableworkq *workq)
{
struct pfr_ktable *p, *q;
}
}
-void
+static void
pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
{
struct pfr_kentryworkq addrq;
kt->pfrkt_flags = newf;
}
-void
+static void
pfr_clstats_ktables(struct pfr_ktableworkq *workq, u_int64_t tzero, int recurse)
{
struct pfr_ktable *p;
pfr_clstats_ktable(p, tzero, recurse);
}
-void
+static void
pfr_clstats_ktable(struct pfr_ktable *kt, u_int64_t tzero, int recurse)
{
struct pfr_kentryworkq addrq;
kt->pfrkt_tzero = tzero;
}
-struct pfr_ktable *
+static struct pfr_ktable *
pfr_create_ktable(struct pfr_table *tbl, u_int64_t tzero, int attachruleset)
{
struct pfr_ktable *kt;
return (kt);
}
-void
+static void
pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
{
struct pfr_ktable *p, *q;
}
}
-void
+static void
pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
{
struct pfr_kentryworkq addrq;
pool_put(&pfr_ktable_pl, kt);
}
-int
+static int
pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
{
int d;
return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
}
-struct pfr_ktable *
+static struct pfr_ktable *
pfr_lookup_table(struct pfr_table *tbl)
{
lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
strlcpy(tbl.pfrt_anchor, ac->path, sizeof (tbl.pfrt_anchor));
kt = pfr_lookup_table(&tbl);
if (kt == NULL) {
- kt = pfr_create_ktable(&tbl, pf_time_second(), 1);
+ kt = pfr_create_ktable(&tbl, pf_calendar_time_second(), 1);
if (kt == NULL)
return (NULL);
if (ac != NULL) {
}
}
-struct pfr_kentry *
+static struct pfr_kentry *
pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
{
struct pfr_walktree w;
/*
- * Copyright (c) 2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
-/* $apfw: pfvar.h,v 1.12 2008/08/27 00:01:32 jhw Exp $ */
+/* $apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */
/* $OpenBSD: pfvar.h,v 1.259 2007/12/02 12:08:04 pascoe Exp $ */
/*
#define _NET_PFVAR_H_
#ifdef PRIVATE
+/*
+ * XXX
+ * XXX Private interfaces. Do not include this file; use pfctl(8) instead.
+ * XXX
+ */
#if PF || !defined(KERNEL)
#ifdef __cplusplus
struct sockaddr_in6 sin6;
};
-struct ip;
-struct ip6_hdr;
-struct tcphdr;
-
#define PF_TCPS_PROXY_SRC ((TCP_NSTATES)+0)
#define PF_TCPS_PROXY_DST ((TCP_NSTATES)+1)
#ifdef MD5_DIGEST_LENGTH
#if PF_MD5_DIGEST_LENGTH != MD5_DIGEST_LENGTH
#error
-#endif
-#endif
+#endif /* PF_MD5_DIGEST_LENGTH != MD5_DIGEST_LENGTH */
+#endif /* MD5_DIGEST_LENGTH */
+#ifdef KERNEL
+struct ip;
+struct ip6_hdr;
+struct tcphdr;
#ifndef NO_APPLE_EXTENSIONS
struct pf_grev1_hdr;
struct pf_esp_hdr;
-#endif
+#endif /* !NO_APPLE_EXTENSIONS */
+#endif /* KERNEL */
enum { PF_INOUT, PF_IN, PF_OUT };
enum { PF_PASS, PF_DROP, PF_SCRUB, PF_NOSCRUB, PF_NAT, PF_NONAT,
#ifndef NO_APPLE_EXTENSIONS
PFTM_GREv1_FIRST_PACKET, PFTM_GREv1_INITIATING, PFTM_GREv1_ESTABLISHED,
PFTM_ESP_FIRST_PACKET, PFTM_ESP_INITIATING, PFTM_ESP_ESTABLISHED,
-#endif
+#endif /* !NO_APPLE_EXTENSIONS */
PFTM_OTHER_FIRST_PACKET, PFTM_OTHER_SINGLE,
PFTM_OTHER_MULTIPLE, PFTM_FRAG, PFTM_INTERVAL,
PFTM_ADAPTIVE_START, PFTM_ADAPTIVE_END, PFTM_SRC_NODE,
#define PFTM_ESP_FIRST_PACKET_VAL 120
#define PFTM_ESP_INITIATING_VAL 30
#define PFTM_ESP_ESTABLISHED_VAL 900
-#endif
+#endif /* !NO_APPLE_EXTENSIONS */
#define PFTM_OTHER_FIRST_PACKET_VAL 60 /* First packet */
#define PFTM_OTHER_SINGLE_VAL 30 /* Unidirectional */
#define PFTM_OTHER_MULTIPLE_VAL 60 /* Bidirectional */
enum { PF_LIMIT_STATES,
#ifndef NO_APPLE_EXTENSIONS
PF_LIMIT_APP_STATES,
-#endif
+#endif /* !NO_APPLE_EXTENSIONS */
PF_LIMIT_SRC_NODES, PF_LIMIT_FRAGS,
PF_LIMIT_TABLES, PF_LIMIT_TABLE_ENTRIES, PF_LIMIT_MAX };
#define PF_POOL_IDMASK 0x0f
u_int32_t rtlabel;
} v;
union {
+#ifdef KERNEL
struct pfi_dynaddr *dyn __attribute__((aligned(8)));
struct pfr_ktable *tbl __attribute__((aligned(8)));
+#else /* !KERNEL */
+ void *dyn __attribute__((aligned(8)));
+ void *tbl __attribute__((aligned(8)));
+#endif /* !KERNEL */
int dyncnt __attribute__((aligned(8)));
int tblcnt __attribute__((aligned(8)));
} p __attribute__((aligned(8)));
u_int16_t call_id;
u_int32_t spi;
};
-#endif
+#endif /* !NO_APPLE_EXTENSIONS */
#ifdef KERNEL
struct pfi_dynaddr {
#endif /* INET6 */
#endif /* INET */
-#else
+#else /* !KERNEL */
#define PF_INET_INET6
-#endif /* KERNEL */
+#endif /* !KERNEL */
/* Both IPv4 and IPv6 */
#ifdef PF_INET_INET6
#endif /* PF_INET6_ONLY */
#endif /* PF_INET_INET6 */
+#ifdef KERNEL
#define PF_MISMATCHAW(aw, x, af, neg, ifp) \
( \
(((aw)->type == PF_ADDR_NOROUTE && \
&(aw)->v.a.mask, (x), (af))))) != \
(neg) \
)
-
+#endif /* KERNEL */
struct pf_rule_uid {
uid_t uid[2];
#ifndef NO_APPLE_EXTENSIONS
union pf_rule_xport xport;
u_int8_t neg;
-#else
+#else /* NO_APPLE_EXTENSIONS */
u_int16_t port[2];
u_int8_t neg;
u_int8_t port_op;
-#endif
+#endif /* NO_APPLE_EXTENSIONS */
};
struct pf_pooladdr {
struct pf_addr_wrap addr;
TAILQ_ENTRY(pf_pooladdr) entries;
#if !defined(__LP64__)
- u_int32_t _pad[2];
+ u_int32_t _pad[2];
#endif /* !__LP64__ */
char ifname[IFNAMSIZ];
+#ifdef KERNEL
struct pfi_kif *kif __attribute__((aligned(8)));
+#else /* !KERNEL */
+ void *kif __attribute__((aligned(8)));
+#endif /* !KERNEL */
};
TAILQ_HEAD(pf_palist, pf_pooladdr);
struct pf_pool {
struct pf_palist list;
#if !defined(__LP64__)
- u_int32_t _pad[2];
+ u_int32_t _pad[2];
#endif /* !__LP64__ */
+#ifdef KERNEL
struct pf_pooladdr *cur __attribute__((aligned(8)));
+#else /* !KERNEL */
+ void *cur __attribute__((aligned(8)));
+#endif /* !KERNEL */
struct pf_poolhashkey key __attribute__((aligned(8)));
struct pf_addr counter;
int tblidx;
TAILQ_ENTRY(pf_rule) entries;
#if !defined(__LP64__)
- u_int32_t _pad[2];
+ u_int32_t _pad[2];
#endif /* !__LP64__ */
struct pf_pool rpool;
u_int64_t packets[2];
u_int64_t bytes[2];
+#ifdef KERNEL
struct pfi_kif *kif __attribute__((aligned(8)));
+#else /* !KERNEL */
+ void *kif __attribute__((aligned(8)));
+#endif /* !KERNEL */
struct pf_anchor *anchor __attribute__((aligned(8)));
+#ifdef KERNEL
struct pfr_ktable *overload_tbl __attribute__((aligned(8)));
+#else /* !KERNEL */
+ void *overload_tbl __attribute__((aligned(8)));
+#endif /* !KERNEL */
pf_osfp_t os_fingerprint __attribute__((aligned(8)));
u_int8_t proto_variant;
u_int8_t extfilter; /* Filter mode [PF_EXTFILTER_xxx] */
u_int8_t extmap; /* Mapping mode [PF_EXTMAP_xxx] */
-#endif
+#endif /* !NO_APPLE_EXTENSIONS */
};
/* rule flags */
PF_EXTFILTER_AD, /* Address-dependent filtering */
PF_EXTFILTER_EI /* Endpoint-independent filtering */
};
-#endif
+#endif /* !NO_APPLE_EXTENSIONS */
struct pf_threshold {
u_int32_t limit;
struct pf_addr addr;
struct pf_addr raddr;
union pf_rule_ptr rule;
+#ifdef KERNEL
struct pfi_kif *kif;
+#else /* !KERNEL */
+ void *kif;
+#endif /* !KERNEL */
u_int64_t bytes[2];
u_int64_t packets[2];
u_int32_t states;
#define PFSNODE_HIWAT 10000 /* default source node table size */
+#ifdef KERNEL
struct pf_state_scrub {
struct timeval pfss_last; /* time received last packet */
u_int32_t pfss_tsecr; /* last echoed timestamp */
u_int8_t pad;
u_int32_t pfss_ts_mod; /* timestamp modulation */
};
+#endif /* KERNEL */
#ifndef NO_APPLE_EXTENSIONS
union pf_state_xport {
struct pf_addr addr;
union pf_state_xport xport;
};
-#else
+#else /* NO_APPLE_EXTENSIONS */
struct pf_state_host {
struct pf_addr addr;
u_int16_t port;
u_int16_t pad;
};
-#endif
+#endif /* NO_APPLE_EXTENSIONS */
+#ifdef KERNEL
struct pf_state_peer {
u_int32_t seqlo; /* Max sequence number sent */
u_int32_t seqhi; /* Max the other end ACKd + win */
};
TAILQ_HEAD(pf_state_queue, pf_state);
+#endif /* KERNEL */
#ifndef NO_APPLE_EXTENSIONS
#ifdef KERNEL
struct pf_state *grev1_state;
};
+struct pf_grev1_state {
+ struct pf_state *pptp_state;
+};
+
struct pf_ike_state {
u_int64_t cookie;
};
pf_app_compare compare_ext_gwy;
union {
struct pf_pptp_state pptp;
+ struct pf_grev1_state grev1;
struct pf_ike_state ike;
} u;
};
#endif /* KERNEL */
#define PF_GRE_PPTP_VARIANT 0x01
-#endif
+#endif /* !NO_APPLE_EXTENSIONS */
+#ifdef KERNEL
/* keep synced with struct pf_state, used in RB_FIND */
struct pf_state_key_cmp {
struct pf_state_host lan;
#ifndef NO_APPLE_EXTENSIONS
u_int8_t proto_variant;
struct pf_app_state *app_state;
-#else
+#else /* NO_APPLE_EXTENSIONS */
u_int8_t pad;
-#endif
+#endif /* NO_APPLE_EXTENSIONS */
};
TAILQ_HEAD(pf_statelist, pf_state);
#ifndef NO_APPLE_EXTENSIONS
u_int8_t proto_variant;
struct pf_app_state *app_state;
-#else
+#else /* NO_APPLE_EXTENSIONS */
u_int8_t pad;
-#endif
+#endif /* NO_APPLE_EXTENSIONS */
RB_ENTRY(pf_state_key) entry_lan_ext;
RB_ENTRY(pf_state_key) entry_ext_gwy;
u_int32_t creatorid;
u_int32_t pad;
};
+#endif /* KERNEL */
struct hook_desc;
TAILQ_HEAD(hook_desc_head, hook_desc);
+#ifdef KERNEL
struct pf_state {
u_int64_t id;
u_int32_t creatorid;
struct pf_addr rt_addr;
#ifndef NO_APPLE_EXTENSIONS
struct hook_desc_head unlink_hooks;
-#endif
+#endif /* !NO_APPLE_EXTENSIONS */
struct pf_state_key *state_key;
struct pfi_kif *kif;
struct pfi_kif *rt_kif;
u_int8_t allow_opts;
u_int8_t timeout;
u_int8_t sync_flags;
+};
+#endif /* KERNEL */
+
#define PFSTATE_NOSYNC 0x01
#define PFSTATE_FROMSYNC 0x02
#define PFSTATE_STALE 0x04
-};
#define __packed __attribute__((__packed__))
#ifndef NO_APPLE_EXTENSIONS
union pf_state_xport xport;
u_int16_t pad[2];
-#else
+#else /* NO_APPLE_EXTENSIONS */
u_int16_t port;
u_int16_t pad[3];
-#endif
+#endif /* NO_APPLE_EXTENSIONS */
} __packed;
struct pfsync_state_peer {
#if !defined(__LP64__)
u_int32_t _pad[2];
#endif /* !__LP64__ */
-#endif
+#endif /* !NO_APPLE_EXTENSIONS */
u_int32_t rule;
u_int32_t anchor;
u_int32_t nat_rule;
u_int32_t creatorid;
#ifndef NO_APPLE_EXTENSIONS
u_int16_t tag;
-#endif
+#endif /* !NO_APPLE_EXTENSIONS */
sa_family_t af;
u_int8_t proto;
u_int8_t direction;
u_int8_t updates;
#ifndef NO_APPLE_EXTENSIONS
u_int8_t proto_variant;
-#endif
+#endif /* !NO_APPLE_EXTENSIONS */
} __packed;
#define PFSYNC_FLAG_COMPRESS 0x01
#define PFSYNC_FLAG_SRCNODE 0x04
#define PFSYNC_FLAG_NATSRCNODE 0x08
+#ifdef KERNEL
/* for copies to/from userland via pf_ioctl() */
#define pf_state_peer_to_pfsync(s, d) do { \
(d)->seqlo = (s)->seqlo; \
(d)->scrub->pfss_ts_mod = (s)->scrub.pfss_ts_mod; \
} \
} while (0)
+#endif /* KERNEL */
#define pf_state_counter_to_pfsync(s, d) do { \
d[0] = (s>>32)&0xffffffff; \
struct pfr_astats {
struct pfr_addr pfras_a;
+#if !defined(__LP64__)
+ u_int32_t _pad;
+#endif /* !__LP64__ */
u_int64_t pfras_packets[PFR_DIR_MAX][PFR_OP_ADDR_MAX];
u_int64_t pfras_bytes[PFR_DIR_MAX][PFR_OP_ADDR_MAX];
u_int64_t pfras_tzero;
u_int64_t pfrts_tzero;
int pfrts_cnt;
int pfrts_refcnt[PFR_REFCNT_MAX];
+#if !defined(__LP64__)
+ u_int32_t _pad;
+#endif /* !__LP64__ */
};
#define pfrts_name pfrts_t.pfrt_name
#define pfrts_flags pfrts_t.pfrt_flags
+#ifdef KERNEL
SLIST_HEAD(pfr_kentryworkq, pfr_kentry);
struct pfr_kentry {
struct radix_node pfrke_node[2];
#define pfrkt_tzero pfrkt_ts.pfrts_tzero
RB_HEAD(pf_state_tree_lan_ext, pf_state_key);
-#ifdef KERNEL
RB_PROTOTYPE_SC(__private_extern__, pf_state_tree_lan_ext, pf_state_key,
entry_lan_ext, pf_state_compare_lan_ext);
-#else /* !KERNEL */
-RB_PROTOTYPE(pf_state_tree_lan_ext, pf_state_key, entry_lan_ext,
- pf_state_compare_lan_ext);
-#endif /* !KERNEL */
RB_HEAD(pf_state_tree_ext_gwy, pf_state_key);
-#ifdef KERNEL
RB_PROTOTYPE_SC(__private_extern__, pf_state_tree_ext_gwy, pf_state_key,
entry_ext_gwy, pf_state_compare_ext_gwy);
-#else /* !KERNEL */
-RB_PROTOTYPE(pf_state_tree_ext_gwy, pf_state_key, entry_ext_gwy,
- pf_state_compare_ext_gwy);
-#endif /* !KERNEL */
RB_HEAD(pfi_ifhead, pfi_kif);
/* state tables */
-#ifdef KERNEL
__private_extern__ struct pf_state_tree_lan_ext pf_statetbl_lan_ext;
__private_extern__ struct pf_state_tree_ext_gwy pf_statetbl_ext_gwy;
-#else /* !KERNEL */
-extern struct pf_state_tree_lan_ext pf_statetbl_lan_ext;
-extern struct pf_state_tree_ext_gwy pf_statetbl_ext_gwy;
-#endif /* !KERNEL */
/* keep synced with pfi_kif, used in RB_FIND */
struct pfi_kif_cmp {
PFI_KIF_REF_RULE
};
+struct pfi_uif {
+#else /* !KERNEL */
+struct pfi_kif {
+#endif /* !KERNEL */
+ char pfik_name[IFNAMSIZ];
+ u_int64_t pfik_packets[2][2][2];
+ u_int64_t pfik_bytes[2][2][2];
+ u_int64_t pfik_tzero;
+ int pfik_flags;
+ int pfik_states;
+ int pfik_rules;
+#if !defined(__LP64__)
+ u_int32_t _pad;
+#endif /* !__LP64__ */
+};
+
#define PFI_IFLAG_SKIP 0x0100 /* skip filtering on interface */
#ifdef KERNEL
#ifndef NO_APPLE_EXTENSIONS
struct pf_grev1_hdr *grev1;
struct pf_esp_hdr *esp;
-#endif
+#endif /* !NO_APPLE_EXTENSIONS */
void *any;
} hdr;
struct pf_addr baddr; /* address before translation */
#ifndef NO_APPLE_EXTENSIONS
struct mbuf *mp;
int lmw; /* lazy writable offset */
-#endif
+#endif /* !NO_APPLE_EXTENSIONS */
struct pf_mtag *pf_mtag;
u_int16_t *ip_sum;
u_int32_t p_len; /* total length of payload */
u_int8_t tos;
#ifndef NO_APPLE_EXTENSIONS
u_int8_t proto_variant;
-#endif
+#endif /* !NO_APPLE_EXTENSIONS */
};
#endif /* KERNEL */
#define PFESPS_NSTATES 3 /* number of state levels */
#define PFESPS_NAMES { "NO_TRAFFIC", "INITIATING", "ESTABLISHED", NULL }
-#endif
+#endif /* !NO_APPLE_EXTENSIONS */
/* Other protocol state enumeration */
#define PFOTHERS_NO_TRAFFIC 0
#define SCNT_SRC_NODE_REMOVALS 2
#define SCNT_MAX 3
+#ifdef KERNEL
#define ACTION_SET(a, x) \
do { \
if ((a) != NULL) \
if (x < PFRES_MAX) \
pf_status.counters[x]++; \
} while (0)
+#endif /* KERNEL */
struct pf_status {
u_int64_t counters[PFRES_MAX];
u_int8_t proto;
u_int8_t proto_variant;
u_int8_t direction;
-#else
+#else /* NO_APPLE_EXTENSIONS */
u_int16_t sport;
u_int16_t dport;
u_int16_t rsport;
sa_family_t af;
u_int8_t proto;
u_int8_t direction;
-#endif
+#endif /* NO_APPLE_EXTENSIONS */
};
struct pfioc_state {
u_int8_t neg;
union pf_rule_xport xport;
};
-#endif
+#endif /* !NO_APPLE_EXTENSIONS */
struct pfioc_state_kill {
/* XXX returns the number of states killed in psk_af */
u_int8_t _pad;
struct pfioc_state_addr_kill psk_src;
struct pfioc_state_addr_kill psk_dst;
-#else
+#else /* NO_APPLE_EXTENSIONS */
int psk_proto;
struct pf_rule_addr psk_src;
struct pf_rule_addr psk_dst;
-#endif
+#endif /* NO_APPLE_EXTENSIONS */
char psk_ifname[IFNAMSIZ];
};
#define ps_states ps_u.psu_states
};
+#ifdef KERNEL
+struct pfioc_states_32 {
+ int ps_len;
+ union {
+ user32_addr_t psu_buf;
+ user32_addr_t psu_states;
+ } ps_u __attribute__((aligned(8)));
+};
+
+struct pfioc_states_64 {
+ int ps_len;
+ union {
+ user64_addr_t psu_buf;
+ user64_addr_t psu_states;
+ } ps_u __attribute__((aligned(8)));
+};
+#endif /* KERNEL */
+
struct pfioc_src_nodes {
int psn_len;
union {
- caddr_t psu_buf;
+ caddr_t psu_buf;
struct pf_src_node *psu_src_nodes;
} psn_u __attribute__((aligned(8)));
#define psn_buf psn_u.psu_buf
#define psn_src_nodes psn_u.psu_src_nodes
};
+#ifdef KERNEL
+struct pfioc_src_nodes_32 {
+ int psn_len;
+ union {
+ user32_addr_t psu_buf;
+ user32_addr_t psu_src_nodes;
+ } psn_u __attribute__((aligned(8)));
+};
+
+struct pfioc_src_nodes_64 {
+ int psn_len;
+ union {
+ user64_addr_t psu_buf;
+ user64_addr_t psu_src_nodes;
+ } psn_u __attribute__((aligned(8)));
+};
+#endif /* KERNEL */
+
struct pfioc_if {
char ifname[IFNAMSIZ];
};
int rs_num;
char anchor[MAXPATHLEN];
u_int32_t ticket;
- } *array __attribute__((aligned(8)));
+ } *array __attribute__((aligned(8)));
+};
+
+#ifdef KERNEL
+struct pfioc_trans_32 {
+ int size; /* number of elements */
+ int esize; /* size of each element in bytes */
+ user32_addr_t array __attribute__((aligned(8)));
+};
+
+struct pfioc_trans_64 {
+ int size; /* number of elements */
+ int esize; /* size of each element in bytes */
+ user64_addr_t array __attribute__((aligned(8)));
};
+#endif /* KERNEL */
#define PFR_FLAG_ATOMIC 0x00000001
#define PFR_FLAG_DUMMY 0x00000002
#define PFR_FLAG_ALLMASK 0x0000007F
#ifdef KERNEL
#define PFR_FLAG_USERIOCTL 0x10000000
-#endif
+#endif /* KERNEL */
struct pfioc_table {
struct pfr_table pfrio_table;
#define pfrio_setflag pfrio_size2
#define pfrio_clrflag pfrio_nadd
+#ifdef KERNEL
+struct pfioc_table_32 {
+ struct pfr_table pfrio_table;
+ user32_addr_t pfrio_buffer __attribute__((aligned(8)));
+ int pfrio_esize __attribute__((aligned(8)));
+ int pfrio_size;
+ int pfrio_size2;
+ int pfrio_nadd;
+ int pfrio_ndel;
+ int pfrio_nchange;
+ int pfrio_flags;
+ u_int32_t pfrio_ticket;
+};
+
+struct pfioc_table_64 {
+ struct pfr_table pfrio_table;
+ user64_addr_t pfrio_buffer __attribute__((aligned(8)));
+ int pfrio_esize __attribute__((aligned(8)));
+ int pfrio_size;
+ int pfrio_size2;
+ int pfrio_nadd;
+ int pfrio_ndel;
+ int pfrio_nchange;
+ int pfrio_flags;
+ u_int32_t pfrio_ticket;
+};
+#endif /* KERNEL */
+
struct pfioc_iface {
char pfiio_name[IFNAMSIZ];
void *pfiio_buffer __attribute__((aligned(8)));
int pfiio_flags;
};
+#ifdef KERNEL
+struct pfioc_iface_32 {
+ char pfiio_name[IFNAMSIZ];
+ user32_addr_t pfiio_buffer __attribute__((aligned(8)));
+ int pfiio_esize __attribute__((aligned(8)));
+ int pfiio_size;
+ int pfiio_nzero;
+ int pfiio_flags;
+};
+
+struct pfioc_iface_64 {
+ char pfiio_name[IFNAMSIZ];
+ user64_addr_t pfiio_buffer __attribute__((aligned(8)));
+ int pfiio_esize __attribute__((aligned(8)));
+ int pfiio_size;
+ int pfiio_nzero;
+ int pfiio_flags;
+};
+#endif /* KERNEL */
/*
* ioctl operations
#endif /* ALTQ */
#ifndef NO_APPLE_EXTENSIONS
__private_extern__ struct pool pf_app_state_pl;
-#endif
+#endif /* !NO_APPLE_EXTENSIONS */
__private_extern__ struct thread *pf_purge_thread;
#ifndef NO_APPLE_EXTENSIONS
__private_extern__ struct mbuf *pf_lazy_makewritable(struct pf_pdesc *,
struct mbuf *, int);
-#endif
+#endif /* !NO_APPLE_EXTENSIONS */
__private_extern__ void *pf_pull_hdr(struct mbuf *, int, void *, int,
u_short *, u_short *, sa_family_t);
__private_extern__ void pf_change_a(void *, u_int16_t *, u_int32_t, u_int8_t);
#ifndef NO_APPLE_EXTENSIONS
__private_extern__ int pf_match_xport(u_int8_t, u_int8_t, union pf_rule_xport *,
union pf_state_xport *);
-#endif
+#endif /* !NO_APPLE_EXTENSIONS */
__private_extern__ int pf_match_uid(u_int8_t, uid_t, uid_t, uid_t);
__private_extern__ int pf_match_gid(u_int8_t, gid_t, gid_t, gid_t);
char *);
__private_extern__ void pfr_detach_table(struct pfr_ktable *);
__private_extern__ int pfr_clr_tables(struct pfr_table *, int *, int);
-__private_extern__ int pfr_add_tables(struct pfr_table *, int, int *, int);
-__private_extern__ int pfr_del_tables(struct pfr_table *, int, int *, int);
-__private_extern__ int pfr_get_tables(struct pfr_table *, struct pfr_table *,
+__private_extern__ int pfr_add_tables(user_addr_t, int, int *, int);
+__private_extern__ int pfr_del_tables(user_addr_t, int, int *, int);
+__private_extern__ int pfr_get_tables(struct pfr_table *, user_addr_t,
int *, int);
-__private_extern__ int pfr_get_tstats(struct pfr_table *, struct pfr_tstats *,
+__private_extern__ int pfr_get_tstats(struct pfr_table *, user_addr_t,
int *, int);
-__private_extern__ int pfr_clr_tstats(struct pfr_table *, int, int *, int);
-__private_extern__ int pfr_set_tflags(struct pfr_table *, int, int, int, int *,
+__private_extern__ int pfr_clr_tstats(user_addr_t, int, int *, int);
+__private_extern__ int pfr_set_tflags(user_addr_t, int, int, int, int *,
int *, int);
__private_extern__ int pfr_clr_addrs(struct pfr_table *, int *, int);
__private_extern__ int pfr_insert_kentry(struct pfr_ktable *, struct pfr_addr *,
u_int64_t);
-__private_extern__ int pfr_add_addrs(struct pfr_table *, struct pfr_addr *,
+__private_extern__ int pfr_add_addrs(struct pfr_table *, user_addr_t,
int, int *, int);
-__private_extern__ int pfr_del_addrs(struct pfr_table *, struct pfr_addr *,
+__private_extern__ int pfr_del_addrs(struct pfr_table *, user_addr_t,
int, int *, int);
-__private_extern__ int pfr_set_addrs(struct pfr_table *, struct pfr_addr *,
+__private_extern__ int pfr_set_addrs(struct pfr_table *, user_addr_t,
int, int *, int *, int *, int *, int, u_int32_t);
-__private_extern__ int pfr_get_addrs(struct pfr_table *, struct pfr_addr *,
+__private_extern__ int pfr_get_addrs(struct pfr_table *, user_addr_t,
int *, int);
-__private_extern__ int pfr_get_astats(struct pfr_table *, struct pfr_astats *,
+__private_extern__ int pfr_get_astats(struct pfr_table *, user_addr_t,
int *, int);
-__private_extern__ int pfr_clr_astats(struct pfr_table *, struct pfr_addr *,
+__private_extern__ int pfr_clr_astats(struct pfr_table *, user_addr_t,
int, int *, int);
-__private_extern__ int pfr_tst_addrs(struct pfr_table *, struct pfr_addr *,
+__private_extern__ int pfr_tst_addrs(struct pfr_table *, user_addr_t,
int, int *, int);
__private_extern__ int pfr_ina_begin(struct pfr_table *, u_int32_t *, int *,
int);
int);
__private_extern__ int pfr_ina_commit(struct pfr_table *, u_int32_t, int *,
int *, int);
-__private_extern__ int pfr_ina_define(struct pfr_table *, struct pfr_addr *,
+__private_extern__ int pfr_ina_define(struct pfr_table *, user_addr_t,
int, int *, int *, u_int32_t, int);
__private_extern__ struct pfi_kif *pfi_all;
__private_extern__ void pfi_dynaddr_remove(struct pf_addr_wrap *);
__private_extern__ void pfi_dynaddr_copyout(struct pf_addr_wrap *);
__private_extern__ void pfi_update_status(const char *, struct pf_status *);
-__private_extern__ int pfi_get_ifaces(const char *, struct pfi_kif *, int *);
+__private_extern__ int pfi_get_ifaces(const char *, user_addr_t, int *);
__private_extern__ int pfi_set_flags(const char *, int);
__private_extern__ int pfi_clear_flags(const char *, int);
/*
- * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
*/
#include <sys/param.h>
+#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
* be done without rt_lock: RTF_GATEWAY, RTF_HOST, RTF_DYNAMIC,
* RTF_DONE, RTF_XRESOLVE, RTF_STATIC, RTF_BLACKHOLE, RTF_ANNOUNCE,
* RTF_USETRAILERS, RTF_WASCLONED, RTF_PINNED, RTF_LOCAL,
- * RTF_BROADCAST, RTF_MULTICAST, RTF_IFSCOPE.
+ * RTF_BROADCAST, RTF_MULTICAST, RTF_IFSCOPE, RTF_IFREF.
*
* rt_key, rt_gateway, rt_ifp, rt_ifa
*
static struct rtentry *rte_alloc(void);
static void rte_free(struct rtentry *);
static void rtfree_common(struct rtentry *, boolean_t);
+#if IFNET_ROUTE_REFCNT
+static void rte_if_ref(struct ifnet *, int);
+#endif /* IFNET_ROUTE_REFCNT */
uint32_t route_generation = 0;
#define RT(r) ((struct rtentry *)r)
#define RT_HOST(r) (RT(r)->rt_flags & RTF_HOST)
+#if IFNET_ROUTE_REFCNT
+SYSCTL_DECL(_net_idle_route);
+
+static int rt_if_idle_expire_timeout = RT_IF_IDLE_EXPIRE_TIMEOUT;
+SYSCTL_INT(_net_idle_route, OID_AUTO, expire_timeout, CTLFLAG_RW,
+ &rt_if_idle_expire_timeout, 0, "Default expiration time on routes for "
+ "interface idle reference counting");
+#endif /* IFNET_ROUTE_REFCNT */
+
/*
* Given a route, determine whether or not it is the non-scoped default
* route; dst typically comes from rt_key(rt) but may be coming from
if (rt_inet_default(rt, rt_key(rt)))
set_primary_ifscope(IFSCOPE_NONE);
+#if IFNET_ROUTE_REFCNT
+ if (rt->rt_if_ref_fn != NULL) {
+ rt->rt_if_ref_fn(rt->rt_ifp, -1);
+ rt->rt_flags &= ~RTF_IFREF;
+ }
+#endif /* IFNET_ROUTE_REFCNT */
+
RT_UNLOCK(rt);
/*
RT_ADDREF_LOCKED(*ret_nrt);
}
RT_UNLOCK(*ret_nrt);
+
+#if IFNET_ROUTE_REFCNT
+ /*
+ * Enable interface reference counting for unicast
+ * cloned routes and bump up the reference count.
+ */
+ if (rt->rt_parent != NULL &&
+ !(rt->rt_flags & (RTF_BROADCAST | RTF_MULTICAST))) {
+ rt->rt_if_ref_fn = rte_if_ref;
+ rt->rt_if_ref_fn(rt->rt_ifp, 1);
+ rt->rt_flags |= RTF_IFREF;
+ }
+#endif /* IFNET_ROUTE_REFCNT */
}
/*
* Set the route's ifa.
*/
rtsetifa(rt, ifa);
+#if IFNET_ROUTE_REFCNT
+ /*
+ * Adjust route ref count for the interfaces.
+ */
+ if (rt->rt_if_ref_fn != NULL &&
+ rt->rt_ifp != ifa->ifa_ifp) {
+ rt->rt_if_ref_fn(ifa->ifa_ifp, 1);
+ rt->rt_if_ref_fn(rt->rt_ifp, -1);
+ }
+#endif /* IFNET_ROUTE_REFCNT */
/*
* And substitute in references to the ifaddr
* we are adding.
return (error);
}
+u_int64_t
+rt_expiry(struct rtentry *rt, u_int64_t base, u_int32_t delta)
+{
+#if IFNET_ROUTE_REFCNT
+ u_int64_t retval;
+
+ /*
+ * If the interface of the route doesn't demand aggressive draining,
+ * return the expiration time based on the caller-supplied delta.
+ * Otherwise use the more aggressive route expiration delta (or
+ * the caller-supplied delta, whichever is less.)
+ */
+ if (rt->rt_ifp == NULL || rt->rt_ifp->if_want_aggressive_drain == 0)
+ retval = base + delta;
+ else
+ retval = base + MIN(rt_if_idle_expire_timeout, delta);
+
+ return (retval);
+#else
+#pragma unused(rt)
+ return (base + delta);
+#endif /* IFNET_ROUTE_REFCNT */
+}
+
static void
rte_lock_init(struct rtentry *rt)
{
zfree(rte_zone, p);
}
+#if IFNET_ROUTE_REFCNT
+static void
+rte_if_ref(struct ifnet *ifp, int cnt)
+{
+ struct kev_msg ev_msg;
+ struct net_event_data ev_data;
+ uint32_t old;
+
+ /* Force cnt to 1 increment/decrement */
+ if (cnt < -1 || cnt > 1)
+ panic("%s: invalid count argument (%d)", __func__, cnt);
+
+ old = atomic_add_32_ov(&ifp->if_route_refcnt, cnt);
+ if (cnt < 0 && old == 0)
+ panic("%s: ifp=%p negative route refcnt!", __func__, ifp);
+
+ /*
+ * The following is done without first holding the ifnet lock,
+ * for performance reasons. The relevant ifnet fields, with
+ * the exception of the if_idle_flags, are never changed
+ * during the lifetime of the ifnet. The if_idle_flags
+ * may possibly be modified, so in the event that the value
+ * is stale because IFRF_IDLE_NOTIFY was cleared, we'd end up
+ * sending the event anyway. This is harmless as it is just
+ * a notification to the monitoring agent in user space, and
+ * it is expected to check via SIOCGIFGETRTREFCNT again anyway.
+ */
+ if ((ifp->if_idle_flags & IFRF_IDLE_NOTIFY) && cnt < 0 && old == 1) {
+ bzero(&ev_msg, sizeof (ev_msg));
+ bzero(&ev_data, sizeof (ev_data));
+
+ ev_msg.vendor_code = KEV_VENDOR_APPLE;
+ ev_msg.kev_class = KEV_NETWORK_CLASS;
+ ev_msg.kev_subclass = KEV_DL_SUBCLASS;
+ ev_msg.event_code = KEV_DL_IF_IDLE_ROUTE_REFCNT;
+
+ strlcpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ);
+
+ ev_data.if_family = ifp->if_family;
+ ev_data.if_unit = ifp->if_unit;
+ ev_msg.dv[0].data_length = sizeof (struct net_event_data);
+ ev_msg.dv[0].data_ptr = &ev_data;
+
+ kev_post_msg(&ev_msg);
+ }
+}
+#endif /* IFNET_ROUTE_REFCNT */
+
static inline struct rtentry *
rte_alloc_debug(void)
{
/*
- * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
*/
#define RTM_RTTUNIT 1000000 /* units for rtt, rttvar, as units per sec */
+#ifdef KERNEL_PRIVATE
+/*
+ * New expiry value (in seconds) when dealing with interfaces which implement
+ * the if_want_aggressive_drain behavior. Otherwise the event mechanism wouldn't
+ * fire quick enough to cause any sort of significant gains in performance.
+ */
+#define RT_IF_IDLE_EXPIRE_TIMEOUT 30
+#define RT_IF_IDLE_DRAIN_INTERVAL 10
+#endif /* KERNEL_PRIVATE */
+
/*
* We distinguish between routes to hosts and routes to networks,
* preferring the former if available. For each route we infer
* See bsd/net/route.c for synchronization notes.
*/
decl_lck_mtx_data(, rt_lock); /* lock for routing entry */
+#if IFNET_ROUTE_REFCNT
+ void (*rt_if_ref_fn)(struct ifnet *, int); /* interface ref func */
+#endif /* IFNET_ROUTE_REFCNT */
};
#endif /* KERNEL_PRIVATE */
#define RTF_MULTICAST 0x800000 /* route represents a mcast address */
#define RTF_IFSCOPE 0x1000000 /* has valid interface scope */
#define RTF_CONDEMNED 0x2000000 /* defunct; no longer modifiable */
- /* 0x4000000 and up unassigned */
+#define RTF_IFREF 0x4000000 /* route holds a ref to interface */
+ /* 0x8000000 and up unassigned */
/*
* Routing statistics.
extern void rt_unlock(struct rtentry *);
extern struct sockaddr *rtm_scrub_ifscope(int, struct sockaddr *,
struct sockaddr *, struct sockaddr_storage *);
+extern u_int64_t rt_expiry(struct rtentry *, u_int64_t, u_int32_t);
+#if IFNET_ROUTE_REFCNT
+extern void rt_aggdrain(int);
+#endif /* IFNET_ROUTE_REFCNT */
#endif /* KERNEL_PRIVATE */
#endif
/*
- * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <net/if.h>
#include <net/route.h>
+#include <net/dlil.h>
#include <net/raw_cb.h>
#include <netinet/in.h>
+#include <netinet/in_var.h>
+#include <netinet/in_arp.h>
+#include <netinet6/nd6.h>
#include <machine/spl.h>
extern struct rtstat rtstat;
extern int check_routeselfref;
+extern struct domain routedomain;
MALLOC_DEFINE(M_RTABLE, "routetbl", "routing tables");
static void rt_setmetrics(u_int32_t, struct rt_metrics *, struct rt_metrics *);
static void rt_setif(struct rtentry *, struct sockaddr *, struct sockaddr *,
struct sockaddr *, unsigned int);
+#if IFNET_ROUTE_REFCNT
+static void rt_drainall(void);
+#endif /* IFNET_ROUTE_REFCNT */
#define SIN(sa) ((struct sockaddr_in *)(size_t)(sa))
#define ifaaddr info.rti_info[RTAX_IFA]
#define brdaddr info.rti_info[RTAX_BRD]
+SYSCTL_NODE(_net, OID_AUTO, idle, CTLFLAG_RW, 0, "idle network monitoring");
+
+#if IFNET_ROUTE_REFCNT
+static struct timeval last_ts;
+
+SYSCTL_NODE(_net_idle, OID_AUTO, route, CTLFLAG_RW, 0, "idle route monitoring");
+
+static int rt_if_idle_drain_interval = RT_IF_IDLE_DRAIN_INTERVAL;
+SYSCTL_INT(_net_idle_route, OID_AUTO, drain_interval, CTLFLAG_RW,
+ &rt_if_idle_drain_interval, 0, "Default interval for draining "
+ "routes when doing interface idle reference counting.");
+#endif /* IFNET_ROUTE_REFCNT */
+
/*
* It really doesn't make any sense at all for this code to share much
* with raw_usrreq.c, since its functionality is so restricted. XXX
if (oifa && oifa->ifa_rtrequest)
oifa->ifa_rtrequest(RTM_DELETE, rt, Gate);
rtsetifa(rt, ifa);
+#if IFNET_ROUTE_REFCNT
+ /*
+ * Adjust route ref count for the interfaces.
+ */
+ if (rt->rt_if_ref_fn != NULL && rt->rt_ifp != ifp) {
+ rt->rt_if_ref_fn(ifp, 1);
+ rt->rt_if_ref_fn(rt->rt_ifp, -1);
+ }
+#endif /* IFNET_ROUTE_REFCNT */
rt->rt_ifp = ifp;
/*
* If this is the (non-scoped) default route, record
return 0;
}
+#if IFNET_ROUTE_REFCNT
+/*
+ * Called from pfslowtimo(), protected by domain_proto_mtx
+ */
+static void
+rt_drainall(void)
+{
+ struct timeval delta_ts, current_ts;
+
+ /*
+ * This test is done without holding rnh_lock; in the even that
+ * we read stale value, it will only cause an extra (or miss)
+ * drain and is therefore harmless.
+ */
+ if (ifnet_aggressive_drainers == 0) {
+ if (timerisset(&last_ts))
+ timerclear(&last_ts);
+ return;
+ }
+
+ microuptime(¤t_ts);
+ timersub(¤t_ts, &last_ts, &delta_ts);
+
+ if (delta_ts.tv_sec >= rt_if_idle_drain_interval) {
+ timerclear(&last_ts);
+
+ in_rtqdrain(); /* protocol cloned routes: INET */
+ in6_rtqdrain(); /* protocol cloned routes: INET6 */
+ in_arpdrain(NULL); /* cloned routes: ARP */
+ nd6_drain(NULL); /* cloned routes: ND6 */
+
+ last_ts.tv_sec = current_ts.tv_sec;
+ last_ts.tv_usec = current_ts.tv_usec;
+ }
+}
+
+void
+rt_aggdrain(int on)
+{
+ lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+
+ if (on)
+ routedomain.dom_protosw->pr_flags |= PR_AGGDRAIN;
+ else
+ routedomain.dom_protosw->pr_flags &= ~PR_AGGDRAIN;
+}
+#endif /* IFNET_ROUTE_REFCNT */
static int
sysctl_rtsock SYSCTL_HANDLER_ARGS
/*
* Definitions of protocols supported in the ROUTE domain.
*/
-
-extern struct domain routedomain; /* or at least forward */
-
static struct protosw routesw[] = {
{ SOCK_RAW, &routedomain, 0, PR_ATOMIC|PR_ADDR,
0, route_output, raw_ctlinput, 0,
0,
- raw_init, 0, 0, 0,
+ raw_init, 0, 0,
+#if IFNET_ROUTE_REFCNT
+ rt_drainall,
+#else
+ 0,
+#endif /* IFNET_ROUTE_REFCNT */
0,
&route_usrreqs,
0, 0, 0,
/*
- * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#endif
}
-static struct route igmprt;
-
static void
igmp_sendpkt(struct in_multi *inm, int type, uint32_t addr)
{
struct igmp *igmp;
struct ip *ip;
struct ip_moptions imo;
+ struct route ro;
MGETHDR(m, M_DONTWAIT, MT_HEADER); /* MAC-OK */
if (m == NULL)
* XXX
* Do we have to worry about reentrancy here? Don't think so.
*/
- ip_output(m, router_alert, &igmprt, 0, &imo, NULL);
+ bzero(&ro, sizeof (ro));
+ (void) ip_output(m, router_alert, &ro, 0, &imo, NULL);
+ if (ro.ro_rt != NULL) {
+ rtfree(ro.ro_rt);
+ ro.ro_rt = NULL;
+ }
++igmpstat.igps_snd_reports;
}
/* Background socket configuration flags */
#ifdef __APPLE_API_UNSTABLE
-#define TRAFFIC_MGT_SO_BACKGROUND 0x0001
-#define TRAFFIC_MGT_SO_BG_SUPPRESSED 0x0002
+#define TRAFFIC_MGT_SO_BACKGROUND 0x0001 /* background socket */
+#define TRAFFIC_MGT_SO_BG_SUPPRESSED 0x0002 /* currently throttled */
+#define TRAFFIC_MGT_SO_BG_REGULATE 0x0004 /* traffic is regulated */
#endif /* __APPLE_API_UNSTABLE */
/*
}
}
-/*
- * Timeout routine. Age arp_tab entries periodically.
- */
-/* ARGSUSED */
-static void
-arptimer(void *ignored_arg)
+void
+in_arpdrain(void *ignored_arg)
{
#pragma unused (ignored_arg)
struct llinfo_arp *la, *ola;
RT_UNLOCK(rt);
}
lck_mtx_unlock(rnh_lock);
+}
+
+/*
+ * Timeout routine. Age arp_tab entries periodically.
+ */
+/* ARGSUSED */
+static void
+arptimer(void *ignored_arg)
+{
+#pragma unused (ignored_arg)
+ in_arpdrain(NULL);
timeout(arptimer, (caddr_t)0, arpt_prune * hz);
}
*/
rt->rt_expire = 0;
ifnet_lladdr_copy_bytes(rt->rt_ifp, LLADDR(SDL(gate)), SDL(gate)->sdl_alen = 6);
- if (useloopback)
+ if (useloopback) {
+#if IFNET_ROUTE_REFCNT
+ /* Adjust route ref count for the interfaces */
+ if (rt->rt_if_ref_fn != NULL &&
+ rt->rt_ifp != lo_ifp) {
+ rt->rt_if_ref_fn(lo_ifp, 1);
+ rt->rt_if_ref_fn(rt->rt_ifp, -1);
+ }
+#endif /* IFNET_ROUTE_REFCNT */
rt->rt_ifp = lo_ifp;
+ }
}
break;
goto release;
} else {
route->rt_flags |= RTF_REJECT;
- route->rt_rmx.rmx_expire += arpt_down;
+ route->rt_rmx.rmx_expire = rt_expiry(route,
+ route->rt_rmx.rmx_expire, arpt_down);
llinfo->la_asked = 0;
llinfo->la_hold = NULL;
result = EHOSTUNREACH;
lck_mtx_unlock(rnh_lock);
goto respond;
}
+#if IFNET_ROUTE_REFCNT
+ /* Adjust route ref count for the interfaces */
+ if (route->rt_if_ref_fn != NULL &&
+ route->rt_ifp != ifp) {
+ route->rt_if_ref_fn(ifp, 1);
+ route->rt_if_ref_fn(route->rt_ifp, -1);
+ }
+#endif /* IFNET_ROUTE_REFCNT */
/* Change the interface when the existing route is on */
route->rt_ifp = ifp;
rtsetifa(route, &best_ia->ia_ifa);
struct timeval timenow;
getmicrotime(&timenow);
- route->rt_rmx.rmx_expire = timenow.tv_sec + arpt_keep;
+ route->rt_rmx.rmx_expire =
+ rt_expiry(route, timenow.tv_sec, arpt_keep);
}
route->rt_flags &= ~RTF_REJECT;
/*
- * Copyright (c) 2008 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#endif /* BSD_KERNEL_PRIVATE */
#ifdef KERNEL_PRIVATE
extern void arp_init(void);
+extern void in_arpdrain(void *);
/* arp_lookup_ip is obsolete, use inet_arp_lookup */
extern errno_t arp_lookup_ip(ifnet_t interface,
const struct sockaddr_in *ip_dest, struct sockaddr_dl *ll_dest,
/*
- * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
getmicrotime(&timenow);
rt->rt_flags |= RTPRF_OURS;
- rt->rt_rmx.rmx_expire = timenow.tv_sec + rtq_reallyold;
+ rt->rt_rmx.rmx_expire =
+ rt_expiry(rt, timenow.tv_sec, rtq_reallyold);
}
}
ap->killed++;
}
} else {
- if (ap->updating
- && (rt->rt_rmx.rmx_expire - timenow.tv_sec
- > rtq_reallyold)) {
- rt->rt_rmx.rmx_expire = timenow.tv_sec
- + rtq_reallyold;
+ if (ap->updating &&
+ (unsigned)(rt->rt_rmx.rmx_expire - timenow.tv_sec) >
+ rt_expiry(rt, 0, rtq_reallyold)) {
+ rt->rt_rmx.rmx_expire = rt_expiry(rt,
+ timenow.tv_sec, rtq_reallyold);
}
ap->nextstop = lmin(ap->nextstop,
rt->rt_rmx.rmx_expire);
/* Copy the cached route and take an extra reference */
inp_route_copyout(inp, &ro);
+#if PKT_PRIORITY
+ if (soisbackground(so))
+ m_prio_background(m);
+#endif /* PKT_PRIORITY */
+
socket_unlock(so, 0);
#if CONFIG_MACF_NET
mac_mbuf_label_associate_inpcb(inp, m);
if (error)
break;
- if (background)
- so->so_traffic_mgt_flags |= TRAFFIC_MGT_SO_BACKGROUND;
- else
- so->so_traffic_mgt_flags &= ~TRAFFIC_MGT_SO_BACKGROUND;
+ if (background) {
+ socket_set_traffic_mgt_flags(so,
+ TRAFFIC_MGT_SO_BACKGROUND |
+ TRAFFIC_MGT_SO_BG_REGULATE);
+ } else {
+ socket_clear_traffic_mgt_flags(so,
+ TRAFFIC_MGT_SO_BACKGROUND |
+ TRAFFIC_MGT_SO_BG_REGULATE);
+ }
break;
}
/*
- * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
register struct inpcb *inp = sotoinpcb(so);
int flags = (so->so_options & SO_DONTROUTE) | IP_ALLOWBROADCAST;
struct ip_out_args ipoa;
+ int error = 0;
/* If socket was bound to an ifindex, tell ip_output about it */
ipoa.ipoa_ifscope = (inp->inp_flags & INP_BOUND_IF) ?
inp->inp_route.ro_rt = NULL;
}
+#if PKT_PRIORITY
+ if (soisbackground(so))
+ m_prio_background(m);
+#endif /* PKT_PRIORITY */
+
#if CONFIG_MACF_NET
mac_mbuf_label_associate_inpcb(inp, m);
#endif
* to pass the PCB cached route pointer directly to IP and
* the modules beneath it.
*/
- return (ip_output(m, inp->inp_options, &inp->inp_route, flags,
- inp->inp_moptions, &ipoa));
+ error = ip_output(m, inp->inp_options, &inp->inp_route, flags,
+ inp->inp_moptions, &ipoa);
+
+#if IFNET_ROUTE_REFCNT
+ /*
+ * Always discard the cached route for unconnected socket
+ * or if it is a non-unicast route.
+ */
+ if (inp->inp_route.ro_rt != NULL &&
+ ((inp->inp_route.ro_rt->rt_flags & (RTF_MULTICAST|RTF_BROADCAST)) ||
+ inp->inp_socket == NULL ||
+ inp->inp_socket->so_state != SS_ISCONNECTED)) {
+ rtfree(inp->inp_route.ro_rt);
+ inp->inp_route.ro_rt = NULL;
+ }
+#endif /* IFNET_ROUTE_REFCNT */
+
+ return (error);
}
#if IPFIREWALL
}
#if TRAFFIC_MGT
- if (so->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BACKGROUND) {
+ if (so->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BG_REGULATE) {
tcpstat.tcps_bg_rcvtotal++;
/* Take snapshots of pkts recv;
extern u_int32_t kipf_count;
static int tcp_ip_output(struct socket *, struct tcpcb *, struct mbuf *, int,
- struct mbuf *, int, int);
+ struct mbuf *, int, int, int32_t);
static __inline__ u_int16_t
get_socket_id(struct socket * s)
error = tcp_ip_output(so, tp, packetlist,
packchain_listadd, tp_inp_options,
- (so_options & SO_DONTROUTE), (sack_rxmit | (sack_bytes_rxmt != 0)));
+ (so_options & SO_DONTROUTE), (sack_rxmit | (sack_bytes_rxmt != 0)), 0);
tp->t_flags &= ~TF_SENDINPROG;
}
tp->t_flags |= TF_SENDINPROG;
error = tcp_ip_output(so, tp, packetlist, packchain_listadd,
- tp_inp_options, (so_options & SO_DONTROUTE), (sack_rxmit | (sack_bytes_rxmt != 0)));
+ tp_inp_options, (so_options & SO_DONTROUTE), (sack_rxmit | (sack_bytes_rxmt != 0)), recwin);
tp->t_flags &= ~TF_SENDINPROG;
}
}
#endif /*IPSEC*/
m->m_pkthdr.socket_id = socket_id;
+#if PKT_PRIORITY
+ if (soisbackground(so))
+ m_prio_background(m);
+#endif /* PKT_PRIORITY */
error = ip6_output(m,
inp6_pktopts,
&tp->t_inpcb->in6p_route,
error = tcp_ip_output(so, tp, packetlist,
packchain_listadd, tp_inp_options,
- (so_options & SO_DONTROUTE), (sack_rxmit | (sack_bytes_rxmt != 0)));
+ (so_options & SO_DONTROUTE), (sack_rxmit | (sack_bytes_rxmt != 0)), recwin);
tp->t_flags &= ~TF_SENDINPROG;
if (error) {
packchain_looped++;
tcpstat.tcps_sndtotal++;
- if (recwin > 0 && SEQ_GT(tp->rcv_nxt+recwin, tp->rcv_adv))
- tp->rcv_adv = tp->rcv_nxt + recwin;
- tp->last_ack_sent = tp->rcv_nxt;
- tp->t_flags &= ~(TF_ACKNOW|TF_DELACK);
goto again;
}
}
* Data sent (as far as we can tell).
* If this advertises a larger window than any other segment,
* then remember the size of the advertised window.
- * Any pending ACK has now been sent.
+ * Make sure ACK/DELACK conditions are cleared before
+ * we unlock the socket.
+ * NOTE: for now, this is done in tcp_ip_output for IPv4
*/
- if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
- tp->rcv_adv = tp->rcv_nxt + recwin;
- tp->last_ack_sent = tp->rcv_nxt;
- tp->t_flags &= ~(TF_ACKNOW|TF_DELACK);
+#if INET6
+ if (isipv6) {
+ if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
+ tp->rcv_adv = tp->rcv_nxt + recwin;
+ tp->last_ack_sent = tp->rcv_nxt;
+ tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
+ }
+#endif
KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
if (sendalot && (!tcp_do_newreno || --maxburst))
static int
tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt,
- int cnt, struct mbuf *opt, int flags, int sack_in_progress)
+ int cnt, struct mbuf *opt, int flags, int sack_in_progress, int recwin)
{
int error = 0;
boolean_t chain;
struct inpcb *inp = tp->t_inpcb;
struct ip_out_args ipoa;
struct route ro;
+#if PKT_PRIORITY
+ boolean_t bg = FALSE;
+#endif /* PKT_PRIORITY */
/* If socket was bound to an ifindex, tell ip_output about it */
ipoa.ipoa_ifscope = (inp->inp_flags & INP_BOUND_IF) ?
/* Copy the cached route and take an extra reference */
inp_route_copyout(inp, &ro);
+#if PKT_PRIORITY
+ bg = soisbackground(so);
+#endif /* PKT_PRIORITY */
+
/*
+ * Data sent (as far as we can tell).
+ * If this advertises a larger window than any other segment,
+ * then remember the size of the advertised window.
* Make sure ACK/DELACK conditions are cleared before
* we unlock the socket.
*/
+ if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
+ tp->rcv_adv = tp->rcv_nxt + recwin;
+ tp->last_ack_sent = tp->rcv_nxt;
tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
/*
*/
cnt = 0;
}
+#if PKT_PRIORITY
+ if (bg)
+ m_prio_background(pkt);
+#endif /* PKT_PRIORITY */
error = ip_output_list(pkt, cnt, opt, &ro, flags, 0, &ipoa);
if (chain || error) {
/*
return;
}
#endif
+#if PKT_PRIORITY
+ if (tp != NULL && soisbackground(tp->t_inpcb->inp_socket))
+ m_prio_background(m);
+#endif /* PKT_PRIORITY */
#if INET6
if (isipv6) {
(void)ip6_output(m, NULL, ro6, 0, NULL, NULL, 0);
space = 0;
#if TRAFFIC_MGT
- if (tp->t_inpcb->inp_socket->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BACKGROUND) {
+ if (tp->t_inpcb->inp_socket->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BG_REGULATE) {
if (tcp_background_io_enabled &&
tp->t_inpcb->inp_socket->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BG_SUPPRESSED) {
tp->t_flags |= TF_RXWIN0SENT;
goto tpgone;
#if TRAFFIC_MGT
- if (so->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BACKGROUND &&
+ if (so->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BG_REGULATE &&
bg_cnt > BG_COUNTER_MAX) {
u_int32_t curr_recvtotal = tcpstat.tcps_rcvtotal;
u_int32_t curr_bg_recvtotal = tcpstat.tcps_bg_rcvtotal;
recv_change = 0;
if (recv_change > background_io_trigger) {
- so->so_traffic_mgt_flags |= TRAFFIC_MGT_SO_BG_SUPPRESSED;
+ socket_set_traffic_mgt_flags(so, TRAFFIC_MGT_SO_BG_SUPPRESSED);
}
tp->tot_recv_snapshot = curr_recvtotal;
if (recv_change < background_io_trigger) {
// Draconian for now: if there is any change at all, keep suppressed
if (!tot_recvdiff) {
- so->so_traffic_mgt_flags &= ~TRAFFIC_MGT_SO_BG_SUPPRESSED;
+ socket_clear_traffic_mgt_flags(so, TRAFFIC_MGT_SO_BG_SUPPRESSED);
tp->t_unacksegs = 0;
(void) tcp_output(tp); // open window
}
/*
- * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
/* Copy the cached route and take an extra reference */
inp_route_copyout(inp, &ro);
+#if PKT_PRIORITY
+ if (soisbackground(so))
+ m_prio_background(m);
+#endif /* PKT_PRIORITY */
+
socket_unlock(so, 0);
/* XXX jgraessley please look at XXX */
error = ip_output_list(m, 0, inpopts, &ro, soopts, mopts, &ipoa);
inp_route_copyin(inp, &ro);
if (udp_dodisconnect) {
+#if IFNET_ROUTE_REFCNT
+ /* Always discard the cached route for unconnected socket */
+ if (inp->inp_route.ro_rt != NULL) {
+ rtfree(inp->inp_route.ro_rt);
+ inp->inp_route.ro_rt = NULL;
+ }
+#endif /* IFNET_ROUTE_REFCNT */
in_pcbdisconnect(inp);
inp->inp_laddr = origladdr; /* XXX rehash? */
}
+#if IFNET_ROUTE_REFCNT
+ else if (inp->inp_route.ro_rt != NULL &&
+ (inp->inp_route.ro_rt->rt_flags & (RTF_MULTICAST|RTF_BROADCAST))) {
+ /* Always discard non-unicast cached route */
+ rtfree(inp->inp_route.ro_rt);
+ inp->inp_route.ro_rt = NULL;
+ }
+#endif /* IFNET_ROUTE_REFCNT */
+
KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_END, error, 0,0,0,0);
return (error);
abort:
if (udp_dodisconnect) {
- in_pcbdisconnect(inp);
- inp->inp_laddr = origladdr; /* XXX rehash? */
+#if IFNET_ROUTE_REFCNT
+ /* Always discard the cached route for unconnected socket */
+ if (inp->inp_route.ro_rt != NULL) {
+ rtfree(inp->inp_route.ro_rt);
+ inp->inp_route.ro_rt = NULL;
+ }
+#endif /* IFNET_ROUTE_REFCNT */
+ in_pcbdisconnect(inp);
+ inp->inp_laddr = origladdr; /* XXX rehash? */
}
+#if IFNET_ROUTE_REFCNT
+ else if (inp->inp_route.ro_rt != NULL &&
+ (inp->inp_route.ro_rt->rt_flags & (RTF_MULTICAST|RTF_BROADCAST))) {
+ /* Always discard non-unicast cached route */
+ rtfree(inp->inp_route.ro_rt);
+ inp->inp_route.ro_rt = NULL;
+ }
+#endif /* IFNET_ROUTE_REFCNT */
release:
m_freem(m);
/*
- * Copyright (c) 2003-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2003-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
getmicrotime(&timenow);
rt->rt_flags |= RTPRF_OURS;
- rt->rt_rmx.rmx_expire = timenow.tv_sec + rtq_reallyold;
+ rt->rt_rmx.rmx_expire =
+ rt_expiry(rt, timenow.tv_sec, rtq_reallyold);
}
}
ap->killed++;
}
} else {
- if (ap->updating
- && (rt->rt_rmx.rmx_expire - timenow.tv_sec
- > rtq_reallyold)) {
- rt->rt_rmx.rmx_expire = timenow.tv_sec
- + rtq_reallyold;
+ if (ap->updating &&
+ (unsigned)(rt->rt_rmx.rmx_expire - timenow.tv_sec) >
+ rt_expiry(rt, 0, rtq_reallyold)) {
+ rt->rt_rmx.rmx_expire = rt_expiry(rt,
+ timenow.tv_sec, rtq_reallyold);
}
ap->nextstop = lmin(ap->nextstop,
rt->rt_rmx.rmx_expire);
timeout(in6_mtutimo, rock, tvtohz(&atv));
}
-#if 0
void
in6_rtqdrain()
{
struct radix_node_head *rnh = rt_tables[AF_INET6];
struct rtqk_arg arg;
- int s;
arg.found = arg.killed = 0;
arg.rnh = rnh;
arg.nextstop = 0;
arg.draining = 1;
arg.updating = 0;
- s = splnet();
+ lck_mtx_lock(rnh_lock);
rnh->rnh_walktree(rnh, in6_rtqkill, &arg);
- splx(s);
+ lck_mtx_unlock(rnh_lock);
}
-#endif
/*
* Initialize our routing tree.
/*
- * Copyright (c) 2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
extern void in6_aliasreq_32_to_64(struct in6_aliasreq_32 *,
struct in6_aliasreq_64 *);
extern void in6_ifaddr_init(void);
+extern void in6_rtqdrain(void);
#endif /* KERNEL_PRIVATE */
#endif /* _NETINET6_IN6_VAR_H_ */
return 0;
}
-/*
- * ND6 timer routine to expire default route list and prefix list
- */
void
-nd6_timer(
- __unused void *ignored_arg)
+nd6_drain(__unused void *ignored_arg)
{
struct llinfo_nd6 *ln;
struct nd_defrouter *dr;
case ND6_LLINFO_REACHABLE:
if (ln->ln_expire) {
ln->ln_state = ND6_LLINFO_STALE;
- ln->ln_expire = timenow.tv_sec + nd6_gctimer;
+ ln->ln_expire = rt_expiry(rt, timenow.tv_sec,
+ nd6_gctimer);
}
RT_UNLOCK(rt);
break;
goto again;
}
ln->ln_state = ND6_LLINFO_STALE; /* XXX */
- ln->ln_expire = timenow.tv_sec + nd6_gctimer;
+ ln->ln_expire = rt_expiry(rt, timenow.tv_sec,
+ nd6_gctimer);
RT_UNLOCK(rt);
break;
pr = pr->ndpr_next;
}
lck_mtx_unlock(nd6_mutex);
+}
+
+/*
+ * ND6 timer routine to expire default route list and prefix list
+ */
+void
+nd6_timer(__unused void *ignored_arg)
+{
+ nd6_drain(NULL);
timeout(nd6_timer, (caddr_t)0, nd6_prune * hz);
}
ln->ln_state = ND6_LLINFO_REACHABLE;
if (ln->ln_expire) {
lck_rw_lock_shared(nd_if_rwlock);
- ln->ln_expire = timenow.tv_sec +
- nd_ifinfo[rt->rt_ifp->if_index].reachable;
+ ln->ln_expire = rt_expiry(rt, timenow.tv_sec,
+ nd_ifinfo[rt->rt_ifp->if_index].reachable);
lck_rw_done(nd_if_rwlock);
}
done:
SDL(gate)->sdl_alen = ifp->if_addrlen;
}
if (nd6_useloopback) {
+#if IFNET_ROUTE_REFCNT
+ /* Adjust route ref count for the interfaces */
+ if (rt->rt_if_ref_fn != NULL &&
+ rt->rt_ifp != lo_ifp) {
+ rt->rt_if_ref_fn(lo_ifp, 1);
+ rt->rt_if_ref_fn(rt->rt_ifp, -1);
+ }
+#endif /* IFNET_ROUTE_REFCNT */
rt->rt_ifp = lo_ifp; /* XXX */
/*
* Make sure rt_ifa be equal to the ifaddr
* we must set the timer now, although it is actually
* meaningless.
*/
- ln->ln_expire = timenow.tv_sec + nd6_gctimer;
+ ln->ln_expire = rt_expiry(rt, timenow.tv_sec,
+ nd6_gctimer);
ln->ln_hold = NULL;
if (m != NULL) {
if ((ifp->if_flags & IFF_POINTOPOINT) != 0 &&
ln->ln_state < ND6_LLINFO_REACHABLE) {
ln->ln_state = ND6_LLINFO_STALE;
- ln->ln_expire = timenow.tv_sec + nd6_gctimer;
+ ln->ln_expire = rt_expiry(rt, timenow.tv_sec, nd6_gctimer);
}
/*
if (ln->ln_state == ND6_LLINFO_STALE) {
ln->ln_asked = 0;
ln->ln_state = ND6_LLINFO_DELAY;
- ln->ln_expire = timenow.tv_sec + nd6_delay;
+ ln->ln_expire = rt_expiry(rt, timenow.tv_sec, nd6_delay);
}
/*
/*
- * Copyright (c) 2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
extern int nd6_storelladdr(struct ifnet *, struct rtentry *, struct mbuf *,
struct sockaddr *, u_char *);
extern int nd6_need_cache(struct ifnet *);
+extern void nd6_drain(void *);
/* nd6_nbr.c */
extern void nd6_na_input(struct mbuf *, int, int);
/*
- * Copyright (c) 2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
ln->ln_byhint = 0;
if (ln->ln_expire) {
lck_rw_lock_shared(nd_if_rwlock);
- ln->ln_expire = timenow.tv_sec +
- nd_ifinfo[rt->rt_ifp->if_index].reachable;
+ ln->ln_expire = rt_expiry(rt, timenow.tv_sec,
+ nd_ifinfo[rt->rt_ifp->if_index].reachable);
lck_rw_done(nd_if_rwlock);
}
} else {
ln->ln_state = ND6_LLINFO_STALE;
- ln->ln_expire = timenow.tv_sec + nd6_gctimer;
+ ln->ln_expire = rt_expiry(rt, timenow.tv_sec,
+ nd6_gctimer);
}
if ((ln->ln_router = is_router) != 0) {
/*
*/
if (ln->ln_state == ND6_LLINFO_REACHABLE) {
ln->ln_state = ND6_LLINFO_STALE;
- ln->ln_expire = timenow.tv_sec + nd6_gctimer;
+ ln->ln_expire = rt_expiry(rt, timenow.tv_sec,
+ nd6_gctimer);
}
RT_REMREF_LOCKED(rt);
RT_UNLOCK(rt);
ln->ln_byhint = 0;
if (ln->ln_expire) {
lck_rw_lock_shared(nd_if_rwlock);
- ln->ln_expire = timenow.tv_sec +
- nd_ifinfo[ifp->if_index].reachable;
+ ln->ln_expire =
+ rt_expiry(rt, timenow.tv_sec,
+ nd_ifinfo[ifp->if_index].reachable);
lck_rw_done(nd_if_rwlock);
}
} else {
if (lladdr && llchange) {
ln->ln_state = ND6_LLINFO_STALE;
- ln->ln_expire = timenow.tv_sec + nd6_gctimer;
+ ln->ln_expire = rt_expiry(rt,
+ timenow.tv_sec, nd6_gctimer);
}
}
}
/*
- * Copyright (c) 2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
in6p->in6p_route.ro_rt = NULL;
}
+#if PKT_PRIORITY
+ if (soisbackground(so))
+ m_prio_background(m);
+#endif /* PKT_PRIORITY */
+
error = ip6_output(m, optp, &in6p->in6p_route, 0,
in6p->in6p_moptions, &oifp, 0);
+
+#if IFNET_ROUTE_REFCNT
+ /*
+ * Always discard the cached route for unconnected socket
+ * or if it is a multicast route.
+ */
+ if (in6p->in6p_route.ro_rt != NULL &&
+ ((in6p->in6p_route.ro_rt->rt_flags & RTF_MULTICAST) ||
+ in6p->in6p_socket == NULL ||
+ in6p->in6p_socket->so_state != SS_ISCONNECTED)) {
+ rtfree(in6p->in6p_route.ro_rt);
+ in6p->in6p_route.ro_rt = NULL;
+ }
+#endif /* IFNET_ROUTE_REFCNT */
+
if (so->so_proto->pr_protocol == IPPROTO_ICMPV6) {
if (oifp)
icmp6_ifoutstat_inc(oifp, type, code);
/*
- * Copyright (c) 2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
}
#endif /*IPSEC*/
m->m_pkthdr.socket_id = get_socket_id(in6p->in6p_socket);
+#if PKT_PRIORITY
+ if (soisbackground(in6p->in6p_socket))
+ m_prio_background(m);
+#endif /* PKT_PRIORITY */
error = ip6_output(m, in6p->in6p_outputopts, &in6p->in6p_route,
flags, in6p->in6p_moptions, NULL, 0);
+
+#if IFNET_ROUTE_REFCNT
+ /*
+ * Always discard the cached route for unconnected socket
+ * or if it is a multicast route.
+ */
+ if (in6p->in6p_route.ro_rt != NULL &&
+ ((in6p->in6p_route.ro_rt->rt_flags & RTF_MULTICAST) ||
+ in6p->in6p_socket == NULL ||
+ in6p->in6p_socket->so_state != SS_ISCONNECTED)) {
+ rtfree(in6p->in6p_route.ro_rt);
+ in6p->in6p_route.ro_rt = NULL;
+ }
+#endif /* IFNET_ROUTE_REFCNT */
break;
case AF_INET:
error = EAFNOSUPPORT;
# Installs header file for Apple internal use for kernel extensions -
# $(DSTROOT)/System/Library/Frameworks/Kernel.framework/PrivateHeaders
PRIVATE_KERNELFILES = \
+ cprotect.h \
decmpfs.h \
disktab.h \
fbt.h \
#define ATTR_FILE_DATAALLOCSIZE 0x00000400
#define ATTR_FILE_RSRCLENGTH 0x00001000
#define ATTR_FILE_RSRCALLOCSIZE 0x00002000
+/* Only used when CONFIG_PROTECT is ON */
+#define ATTR_FILE_PROTECTION_CLASS 0x00004000
-#define ATTR_FILE_VALIDMASK 0x000037FF
-#define ATTR_FILE_SETMASK 0x00000020
+#define ATTR_FILE_VALIDMASK 0x000077FF
+#define ATTR_FILE_SETMASK 0x00004020
#define ATTR_FORK_TOTALSIZE 0x00000001
#define ATTR_FORK_ALLOCSIZE 0x00000002
buf_t buf_geteblk(int);
#ifdef KERNEL_PRIVATE
void buf_setfilter(buf_t, void (*)(buf_t, void *), void *, void **, void **);
+
+/*!
+ @function buf_getcpaddr
+ @abstract Set the address of cp_entry on a buffer.
+ @param bp Buffer whose cp entry value has to be set
+ @return void.
+ */
+void buf_setcpaddr(buf_t, void *);
+
+/*!
+ @function buf_getcpaddr
+ @abstract Get the address of cp_entry on a buffer.
+ @param bp Buffer whose error value to set.
+ @return int.
+ */
+void *buf_getcpaddr(buf_t);
#endif /* KERNEL_PRIVATE */
int b_validoff; /* Offset in buffer of valid region. */
int b_validend; /* Offset of end of valid region. */
proc_t b_proc; /* Associated proc; NULL if kernel. */
+#if CONFIG_PROTECT
+ struct cprotect *b_cpentry; /* address of cp_entry, to be passed further down */
+#endif /* CONFIG_PROTECT */
#ifdef JOE_DEBUG
void * b_owner;
int b_tag;
--- /dev/null
+/*
+ * Copyright (c) 2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+
+#ifndef _SYS_CPROTECT_H_
+#define _SYS_CPROTECT_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if KERNEL_PRIVATE
+
+#include <sys/cdefs.h>
+#include <sys/kernel_types.h>
+
+#define PROTECTION_CLASS_A 1
+#define PROTECTION_CLASS_B 2
+#define PROTECTION_CLASS_C 3
+#define PROTECTION_CLASS_D 4
+#define PROTECTION_CLASS_E 5
+
+#define KEYSIZE 8 /* 8x4 = 32, 32x8 = 256 */
+#define INTEGRITYSIZE 2 /* 2x4 = 8, 8x8 = 64 */
+
+#define LOCKED_STATE 0
+#define UNLOCKED_STATE 1
+
+#define LOCKED_KEYCHAIN 0
+#define UNLOCKED_KEYCHAIN 1
+
+#define CONTENT_PROTECTION_XATTR_NAME "com.apple.system.cprotect"
+
+#define kEMBCKeyHandleSpecial ~1
+
+/* SLIST_HEAD(cp_list, cp_entry) cp_head = LIST_HEAD_INITIALIZER(cp_head); */
+/* struct cp_list *cprotect_list_headp; /\* List head *\/ */
+
+typedef struct cprotect *cprotect_t;
+typedef struct cp_wrap_func *cp_wrap_func_t;
+typedef struct cp_global_state *cp_global_state_t;
+typedef struct cp_xattr *cp_xattr_t;
+
+
+typedef int wrapper_t(uint32_t properties, void *key_bytes, size_t key_length, void **wrapped_data, uint32_t *wrapped_length);
+typedef int unwrapper_t(uint32_t properties, void *wrapped_data, size_t wrapped_data_length, void **key_bytes, uint32_t *key_length);
+
+struct cprotect {
+ uint32_t cache_key[KEYSIZE];
+ uint32_t special_data;
+ uint32_t pclass;
+ uint8_t cache_key_flushed;
+ uint8_t lock_state; /* lock_state: 0 means unlocked. 1 means locked */
+};
+
+struct cp_entry {
+ SLIST_ENTRY(cp_entry) cp_list;
+ struct cprotect *protected_entry;
+};
+
+struct cp_wrap_func {
+ wrapper_t *wrapper;
+ unwrapper_t *unwrapper;
+};
+
+struct cp_global_state {
+ uint8_t lock_state;
+ uint8_t wrap_functions_set;
+};
+
+struct cp_xattr {
+ uint32_t persistent_class;
+ uint8_t persistent_key[32];
+ uint8_t persistent_integrity[8];
+ uint8_t xattr_version;
+};
+
+int cp_create_init(vnode_t, vfs_context_t);
+int cp_key_store_action(int);
+int cp_register_wraps(cp_wrap_func_t);
+struct cprotect *cp_vnode_entry_alloc(void);
+void cp_vnode_entry_init(vnode_t);
+int cp_vnode_entry_init_needed(vnode_t);
+struct cp_xattr * cp_vn_getxattr(vnode_t, vfs_context_t);
+int cp_vn_setxattr(vnode_t, uint32_t, vfs_context_t);
+
+#endif /* KERNEL_PRIVATE */
+
+#ifdef __cplusplus
+};
+#endif
+
+#endif /* !_SYS_CPROTECT_H_ */
#define F_ADDFILESIGS 61 /* add signature from same file (used by dyld for shared libs) */
+#define F_GETPROTECTIONCLASS 62 /* Get the protection class of a file from the EA, returns int */
+#define F_SETPROTECTIONCLASS 63 /* Set the protection class of a file for the EA, requires int */
+
// FS-specific fcntl()'s numbers begin at 0x00010000 and go up
#define FCNTL_FS_SPECIFIC_BASE 0x00010000
int creat(const char *, mode_t) __DARWIN_ALIAS_C(creat);
int fcntl(int, int, ...) __DARWIN_ALIAS_C(fcntl);
#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
+
+#ifdef PRIVATE
+#ifndef _FILEPORT_T
+#define _FILEPORT_T
+typedef __darwin_mach_port_t fileport_t;
+#define FILEPORT_NULL ((fileport_t)0)
+#endif /* _FILEPORT_T */
+
+int fileport_makeport(int, fileport_t*);
+int fileport_makefd(fileport_t);
+#endif /* PRIVATE */
int openx_np(const char *, int, filesec_t);
int flock(int, int);
filesec_t filesec_init(void);
#define FG_WINSMSGQ 0x04 /* wait for the fielglob is in msgque */
#define FG_RMMSGQ 0x08 /* the fileglob is being removed from msgqueue */
#define FG_WRMMSGQ 0x10 /* wait for the fileglob to be removed from msgqueue */
+#define FG_PORTMADE 0x20 /* a port was at some point created for this fileglob */
struct fileglob {
LIST_ENTRY(fileglob) f_list;/* list of active files */
#define DBG_DRVFIREWIRE 16 /* FireWire */
#define DBG_DRVINFINIBAND 17 /* Infiniband */
#define DBG_DRVGRAPHICS 18 /* Graphics */
+#define DBG_DRVSD 19 /* Secure Digital */
/* Backwards compatibility */
#define DBG_DRVPOINTING DBG_DRVHID /* OBSOLETE: Use DBG_DRVHID instead */
} jetsam_snapshot_t;
enum {
- kJetsamFlagsFrontmost = (1 << 0),
- kJetsamFlagsKilled = (1 << 1)
+ kJetsamFlagsFrontmost = (1 << 0),
+ kJetsamFlagsKilled = (1 << 1),
+ kJetsamFlagsKilledHiwat = (1 << 2)
};
#endif /* !MACH_KERNEL_PRIVATE */
#ifdef KERNEL
extern void kern_memorystatus_init(void) __attribute__((section("__TEXT, initcode")));
+extern int jetsam_kill_top_proc(void);
extern int kern_memorystatus_wakeup;
extern int kern_memorystatus_level;
+/*
+ * Copyright (c) 2004-2010 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+
#ifndef _KERN_SYS_KERNELTYPES_H_
#define _KERN_SYS_KERNELTYPES_H_
struct __ifmultiaddr;
struct __ifnet_filter;
struct __rtentry;
+struct __if_clone;
typedef struct __ifnet* ifnet_t;
typedef struct __mbuf* mbuf_t;
typedef struct __ifmultiaddr* ifmultiaddr_t;
typedef struct __ifnet_filter* interface_filter_t;
typedef struct __rtentry* route_t;
+typedef struct __if_clone* if_clone_t;
#else /* BSD_BUILD */
typedef struct ifmultiaddr* ifmultiaddr_t;
typedef struct ifnet_filter* interface_filter_t;
typedef struct rtentry* route_t;
+typedef struct if_clone* if_clone_t;
#endif /* KERNEL_PRIVATE */
#endif /* !BSD_BUILD */
*/
extern void mbuf_stats(struct mbuf_stat *stats);
+#ifdef KERNEL_PRIVATE
+/*
+ @enum mbuf_priority_t
+ @abstract Priority of a packet.
+ @discussion Some mbufs represent packets containing application data.
+ The priority of the application data is represented by the
+ mbuf priority, as determined by the system.
+ @constant MBUF_PRIORITY_NORMAL Indicates the packet contains
+ normal priority data.
+ @constant MBUF_PRIORITY_BACKGROUND Indicates the packet contains
+ background priority data.
+ */
+typedef enum {
+ MBUF_PRIORITY_NORMAL = 0,
+ MBUF_PRIORITY_BACKGROUND = 1
+} mbuf_priority_t;
+
+/*
+ @function mbuf_get_priority
+ @discussion Get the priority value of the packet.
+ @param mbuf The mbuf to obtain the priority value from.
+ @result The priority value of the packet.
+ */
+extern mbuf_priority_t mbuf_get_priority(mbuf_t mbuf);
+#endif /* KERNEL_PRIVATE */
/* IF_QUEUE interaction */
@result 0 on success otherwise the errno error.
*/
extern errno_t sock_gettclassopt(socket_t so, void* optval, size_t* optlen);
+
+#ifdef BSD_KERNEL_PRIVATE
+extern void socket_set_traffic_mgt_flags(socket_t so, u_int32_t flags);
+extern void socket_clear_traffic_mgt_flags(socket_t so, u_int32_t flags);
+#endif /* BSD_KERNEL_PRIVATE */
#endif
/*!
*/
struct pf_mtag pf_mtag;
#endif /* PF_PKTHDR */
+#if PKT_PRIORITY
+ u_int32_t prio; /* packet priority */
+#endif /* PKT_PRIORITY */
};
struct m_tag *m_tag_first(struct mbuf *);
struct m_tag *m_tag_next(struct mbuf *, struct m_tag *);
+extern void m_prio_init(struct mbuf *);
+extern void m_prio_background(struct mbuf *);
+
__END_DECLS
#endif /* KERNEL */
#define MNT_NODEV 0x00000010 /* don't interpret special files */
#define MNT_UNION 0x00000020 /* union with underlying filesystem */
#define MNT_ASYNC 0x00000040 /* file system written asynchronously */
+#define MNT_CPROTECT 0x00000080 /* file system supports content protection */
/*
* NFS export related mount flags.
MNT_LOCAL | MNT_QUOTA | \
MNT_ROOTFS | MNT_DOVOLFS | MNT_DONTBROWSE | \
MNT_IGNORE_OWNERSHIP | MNT_AUTOMOUNTED | MNT_JOURNALED | \
- MNT_NOUSERXATTR | MNT_DEFWRITE | MNT_MULTILABEL | MNT_NOATIME)
+ MNT_NOUSERXATTR | MNT_DEFWRITE | MNT_MULTILABEL | MNT_NOATIME | MNT_CPROTECT )
/*
* External filesystem command modifier flags.
* Unmount can use the MNT_FORCE flag.
void vfs_markdependency(mount_t);
vnode_t vfs_vnodecovered(mount_t mp); /* Returns vnode with an iocount that must be released with vnode_put() */
void * vfs_mntlabel(mount_t mp); /* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers. */
+void vfs_setunmountpreflight(mount_t mp);
#endif /* KERNEL_PRIVATE */
__END_DECLS
#define P_LSIGNALWAIT 0x00200000
#define P_LRAGE_VNODES 0x00400000
#define P_LREGISTER 0x00800000 /* thread start fns registered */
+#define P_LBACKGROUND 0x01000000
/* Process control state for resource starvation */
#define P_PCTHROTTLE 1
/*
- * Copyright (c) 2000-2008 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#define PR_PROTOLOCK 0x80 /* protocol takes care of it's own locking */
#define PR_PCBLOCK 0x100 /* protocol supports per pcb finer grain locking */
#define PR_DISPOSE 0x200 /* protocol requires late lists disposal */
+#define PR_AGGDRAIN 0x400 /* protocol requires aggressive draining */
/*
* The arguments to usrreq are:
extern void soisconnecting(struct socket *so);
extern void soisdisconnected(struct socket *so);
extern void soisdisconnecting(struct socket *so);
+extern int soisbackground(struct socket *so);
extern int solisten(struct socket *so, int backlog);
extern struct socket *sodropablereq(struct socket *head);
extern struct socket *sonewconn(struct socket *head, int connstatus,
/*
- * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#define SIOCGIFBOND _IOWR('i', 71, struct ifreq) /* get bond if config */
#define SIOCIFCREATE _IOWR('i', 120, struct ifreq) /* create clone if */
#define SIOCIFDESTROY _IOW('i', 121, struct ifreq) /* destroy clone if */
+#define SIOCIFCREATE2 _IOWR('i', 122, struct ifreq) /* create clone if with data */
#define SIOCSDRVSPEC _IOW('i', 123, struct ifdrv) /* set driver-specific
parameters */
#define SIOCGIFWAKEFLAGS _IOWR('i', 136, struct ifreq) /* get interface wake property flags */
+#ifdef PRIVATE
+#define SIOCGIFGETRTREFCNT _IOWR('i', 137, struct ifreq) /* get interface route refcnt */
+#endif /* PRIVATE */
+
+
#endif /* !_SYS_SOCKIO_H_ */
#endif /* __INIT_SYSENT_C__ */
extern int nsysent;
-#define NUM_SYSENT 430 /* Current number of defined syscalls */
+#define NUM_SYSENT 434 /* Current number of defined syscalls */
/* sy_funnel flags bits */
#define FUNNEL_MASK 0x07f
#define _SYSCALL_RET_ADDR_T 4
#define _SYSCALL_RET_SIZE_T 5
#define _SYSCALL_RET_SSIZE_T 6
+#define _SYSCALL_RET_UINT64_T 7
#endif /* __APPLE_API_PRIVATE */
#endif /* KERNEL_PRIVATE */
struct uthread {
/* syscall parameters, results and catches */
- user_addr_t uu_arg[8]; /* arguments to current system call */
+ u_int64_t uu_arg[8]; /* arguments to current system call */
int *uu_ap; /* pointer to arglist */
int uu_rval[2];
#define UT_PROCEXIT 0x00000200 /* this thread completed the proc exit */
#define UT_RAGE_VNODES 0x00000400 /* rapid age any vnodes created by this thread */
#define UT_BACKGROUND 0x00000800 /* this thread is in background state */
+#define UT_BACKGROUND_TRAFFIC_MGT 0x00001000 /* background traffic is regulated */
#define UT_VFORK 0x02000000 /* thread has vfork children */
#define UT_SETUID 0x04000000 /* thread is settugid() */
ab.actual.fileattr |= ATTR_FILE_RSRCALLOCSIZE;
}
}
+ if (al.fileattr & ATTR_FILE_PROTECTION_CLASS) {
+ }
}
/* diagnostic */
VFS_DEBUG(ctx, vp, "ATTRLIST - XXX device type change not implemented");
goto out;
}
+ if (al.fileattr & ATTR_FILE_PROTECTION_CLASS) {
+ }
/*
* Validate and authorize.
SET(bp->b_flags, B_FUA);
}
+#ifdef CONFIG_PROTECT
+void *
+buf_getcpaddr(buf_t bp) {
+ return bp->b_cpentry;
+}
+
+void
+buf_setcpaddr(buf_t bp, void *cp_entry_addr) {
+ bp->b_cpentry = (struct cprotect *) cp_entry_addr;
+}
+
+#else
+void *
+buf_getcpaddr(buf_t bp __unused) {
+ return NULL;
+}
+
+void
+buf_setcpaddr(buf_t bp __unused, void *cp_entry_addr __unused) {
+ return;
+}
+#endif /* CONFIG_PROTECT */
+
errno_t
buf_error(buf_t bp) {
bp->b_resid = 0;
}
-
-
/*
* Read or write a buffer that is not contiguous on disk.
* buffer is marked done/error at the conclusion
bp->b_bcount = 0;
bp->b_dirtyoff = bp->b_dirtyend = 0;
bp->b_validoff = bp->b_validend = 0;
+#ifdef CONFIG_PROTECT
+ bp->b_cpentry = 0;
+#endif
lck_mtx_lock_spin(buf_mtxp);
}
bp->b_bufsize = 0;
bp->b_upl = NULL;
bp->b_vp = vp;
+#ifdef CONFIG_PROTECT
+ bp->b_cpentry = 0;
+#endif
if (vp && (vp->v_type == VBLK || vp->v_type == VCHR))
bp->b_dev = vp->v_rdev;
}
if (jnl->flags & JOURNAL_INVALID) {
+ /* Still need to buf_brelse(). Callers assume we consume the bp. */
+ buf_brelse(bp);
return EINVAL;
}
CHECK_JOURNAL(jnl);
if (jnl->flags & JOURNAL_INVALID) {
+ /* Still need to buf_brelse(). Callers assume we consume the bp. */
+ buf_brelse(bp);
return EINVAL;
}
#include <sys/kdebug.h>
#include <sys/kauth.h>
#include <sys/user.h>
+#include <sys/kern_memorystatus.h>
#include <miscfs/fifofs/fifo.h>
#include <string.h>
static int mount_getvfscnt(void);
static int mount_fillfsids(fsid_t *, int );
static void vnode_iterate_setup(mount_t);
-static int vnode_umount_preflight(mount_t, vnode_t, int);
+int vnode_umount_preflight(mount_t, vnode_t, int);
static int vnode_iterate_prepare(mount_t);
static int vnode_iterate_reloadq(mount_t);
static void vnode_iterate_clear(mount_t);
}
-static int
+int
vnode_umount_preflight(mount_t mp, vnode_t skipvp, int flags)
{
vnode_t vp;
}
if (vp == NULL) {
- /*
+ /*
* we've reached the system imposed maximum number of vnodes
* but there isn't a single one available
* wait a bit and then retry... if we can't get a vnode
desiredvnodes, numvnodes, freevnodes, deadvnodes, ragevnodes);
#if CONFIG_EMBEDDED
/*
- * Running out of vnodes tends to make a system unusable. On an
- * embedded system, it's unlikely that the user can do anything
- * about it (or would know what to do, if they could). So panic
- * the system so it will automatically restart (and hopefully we
- * can get a panic log that tells us why we ran out).
+ * Running out of vnodes tends to make a system unusable. Start killing
+ * processes that jetsam knows are killable.
*/
- panic("vnode table is full\n");
+ if (jetsam_kill_top_proc() < 0) {
+ /*
+ * If jetsam can't find any more processes to kill and there
+ * still aren't any free vnodes, panic. Hopefully we'll get a
+ * panic log to tell us why we ran out.
+ */
+ panic("vnode table is full\n");
+ }
+
+ delay_for_interval(1, 1000 * 1000);
+ goto retry;
#endif
+
*vpp = NULL;
return (ENFILE);
}
mount_unlock(mp);
}
+void
+vfs_setunmountpreflight(mount_t mp)
+{
+ mount_lock_spin(mp);
+ mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT;
+ mount_unlock(mp);
+}
+
void
vn_setunionwait(vnode_t vp)
{
mp->mnt_flag |= MNT_RDONLY;
else if (mp->mnt_flag & MNT_RDONLY)
mp->mnt_kern_flag |= MNTK_WANTRDWR;
+
mp->mnt_flag &= ~(MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC |
MNT_UNKNOWNPERMISSIONS | MNT_DONTBROWSE | MNT_AUTOMOUNTED |
- MNT_DEFWRITE | MNT_NOATIME | MNT_QUARANTINE);
+ MNT_DEFWRITE | MNT_NOATIME | MNT_QUARANTINE | MNT_CPROTECT );
+
mp->mnt_flag |= uap->flags & (MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC |
MNT_UNKNOWNPERMISSIONS | MNT_DONTBROWSE | MNT_AUTOMOUNTED |
- MNT_DEFWRITE | MNT_NOATIME | MNT_QUARANTINE);
+ MNT_DEFWRITE | MNT_NOATIME | MNT_QUARANTINE | MNT_CPROTECT );
#if CONFIG_MACF
if (uap->flags & MNT_MULTILABEL) {
if ( (error = file_vnode(uap->fd, &vp)) )
return (error);
+ error = vnode_getwithref(vp);
+ if (error) {
+ file_drop(uap->fd);
+ return (error);
+ }
+
AUDIT_ARG(vnpath_withref, vp, ARG_VNODE1);
mp = vp->v_mount;
if (!mp) {
- file_drop(uap->fd);
- return (EBADF);
+ error = EBADF;
+ goto out;
}
sp = &mp->mnt_vfsstat;
if ((error = vfs_update_vfsstat(mp,vfs_context_current(),VFS_USER_EVENT)) != 0) {
- file_drop(uap->fd);
- return (error);
+ goto out;
}
- file_drop(uap->fd);
error = munge_statfs(mp, sp, uap->buf, NULL, IS_64BIT_PROCESS(p), TRUE);
+out:
+ file_drop(uap->fd);
+ vnode_put(vp);
+
return (error);
}
if ( (error = file_vnode(uap->fd, &vp)) )
return (error);
+ error = vnode_getwithref(vp);
+ if (error) {
+ file_drop(uap->fd);
+ return (error);
+ }
+
AUDIT_ARG(vnpath_withref, vp, ARG_VNODE1);
mp = vp->v_mount;
if (!mp) {
- file_drop(uap->fd);
- return (EBADF);
+ error = EBADF;
+ goto out;
}
sp = &mp->mnt_vfsstat;
if ((error = vfs_update_vfsstat(mp, vfs_context_current(), VFS_USER_EVENT)) != 0) {
- file_drop(uap->fd);
- return (error);
+ goto out;
}
- file_drop(uap->fd);
error = statfs64_common(mp, sp, uap->buf);
+out:
+ file_drop(uap->fd);
+ vnode_put(vp);
+
return (error);
}
return(error);
}
+kern_return_t
+pid_suspend(struct proc *p __unused, struct pid_suspend_args *args, int *ret)
+{
+ task_t target = NULL;
+ proc_t targetproc = PROC_NULL;
+ int pid = args->pid;
+ int error = 0;
+
+#if CONFIG_MACF
+ error = mac_proc_check_suspend_resume(p, 0); /* 0 for suspend */
+ if (error) {
+ error = KERN_FAILURE;
+ goto out;
+ }
+#endif
+
+ if (pid == 0) {
+ error = KERN_FAILURE;
+ goto out;
+ }
+
+ targetproc = proc_find(pid);
+ if (!task_for_pid_posix_check(targetproc)) {
+ error = KERN_FAILURE;
+ goto out;
+ }
+
+ target = targetproc->task;
+#ifndef CONFIG_EMBEDDED
+ if (target != TASK_NULL) {
+ mach_port_t tfpport;
+
+ /* If we aren't root and target's task access port is set... */
+ if (!kauth_cred_issuser(kauth_cred_get()) &&
+ targetproc != current_proc() &&
+ (task_get_task_access_port(target, &tfpport) == 0) &&
+ (tfpport != IPC_PORT_NULL)) {
+
+ if (tfpport == IPC_PORT_DEAD) {
+ error = KERN_PROTECTION_FAILURE;
+ goto out;
+ }
+
+ /* Call up to the task access server */
+ error = check_task_access(tfpport, proc_selfpid(), kauth_getgid(), pid);
+
+ if (error != MACH_MSG_SUCCESS) {
+ if (error == MACH_RCV_INTERRUPTED)
+ error = KERN_ABORTED;
+ else
+ error = KERN_FAILURE;
+ goto out;
+ }
+ }
+ }
+#endif
+
+ task_reference(target);
+ error = task_suspend(target);
+ task_deallocate(target);
+
+out:
+ if (targetproc != PROC_NULL)
+ proc_rele(targetproc);
+ *ret = error;
+ return error;
+}
+
+kern_return_t
+pid_resume(struct proc *p __unused, struct pid_resume_args *args, int *ret)
+{
+ task_t target = NULL;
+ proc_t targetproc = PROC_NULL;
+ int pid = args->pid;
+ int error = 0;
+
+#if CONFIG_MACF
+ error = mac_proc_check_suspend_resume(p, 1); /* 1 for resume */
+ if (error) {
+ error = KERN_FAILURE;
+ goto out;
+ }
+#endif
+
+ if (pid == 0) {
+ error = KERN_FAILURE;
+ goto out;
+ }
+
+ targetproc = proc_find(pid);
+ if (!task_for_pid_posix_check(targetproc)) {
+ error = KERN_FAILURE;
+ goto out;
+ }
+
+ target = targetproc->task;
+#ifndef CONFIG_EMBEDDED
+ if (target != TASK_NULL) {
+ mach_port_t tfpport;
+
+ /* If we aren't root and target's task access port is set... */
+ if (!kauth_cred_issuser(kauth_cred_get()) &&
+ targetproc != current_proc() &&
+ (task_get_task_access_port(target, &tfpport) == 0) &&
+ (tfpport != IPC_PORT_NULL)) {
+
+ if (tfpport == IPC_PORT_DEAD) {
+ error = KERN_PROTECTION_FAILURE;
+ goto out;
+ }
+
+ /* Call up to the task access server */
+ error = check_task_access(tfpport, proc_selfpid(), kauth_getgid(), pid);
+
+ if (error != MACH_MSG_SUCCESS) {
+ if (error == MACH_RCV_INTERRUPTED)
+ error = KERN_ABORTED;
+ else
+ error = KERN_FAILURE;
+ goto out;
+ }
+ }
+ }
+#endif
+
+ task_reference(target);
+ error = task_resume(target);
+ task_deallocate(target);
+
+out:
+ if (targetproc != PROC_NULL)
+ proc_rele(targetproc);
+ *ret = error;
+ return error;
+
+ return 0;
+}
+
static int
sysctl_settfp_policy(__unused struct sysctl_oid *oidp, void *arg1,
__unused int arg2, struct sysctl_req *req)
-10.3.0
+10.4.0
# The first line of this file contains the master version number for the kernel.
# All other instances of the kernel version in xnu are derived from this file.
_boot
_bsd_hostname
_bsd_set_dependency_capable
+_buf_getcpaddr
+_buf_setcpaddr
_buf_setfilter
_cdevsw
_clalloc
_clfree
_cons_cinput
+_cp_key_store_action
+_cp_register_wraps
_fd_rdwr
_get_aiotask
_hz
+_ifnet_clone_attach
+_ifnet_clone_detach
+_ifnet_idle_flags
+_ifnet_set_idle_flags
_in6addr_local
_inaddr_local
_inet_domain_mutex
_m_split
_m_trailingspace:_mbuf_trailingspace
_mac_proc_set_enforce
+_mbuf_get_priority
_mcl_to_paddr
_mountroot_post_hook
_net_add_domain
_vfs_context_set_special_port
_vfs_get_notify_attributes
_vfs_mntlabel
+_vfs_setunmountpreflight
_vfs_vnodecovered
_vm_map_copy_copy
_vm_map_copy_discard
_aes_decrypt_cbc
_aes_decrypt_key
_aes_decrypt_key128
+_aes_decrypt_key256
_aes_encrypt_cbc
_aes_encrypt_key128
+_aes_encrypt_key256
_appleClut8
_boot
_cons_cinput
uint32_t runtimePages;
uint32_t runtimePageCount;
uint64_t runtimeVirtualPages __attribute__ ((packed));
- uint8_t reserved2[8];
+
+ uint32_t performanceDataStart;
+ uint32_t performanceDataSize;
uint64_t encryptStart __attribute__ ((packed));
uint64_t machineSignature __attribute__ ((packed));
kIOUCScalarIScalarO = 0,
kIOUCScalarIStructO = 2,
kIOUCStructIStructO = 3,
- kIOUCScalarIStructI = 4
+ kIOUCScalarIStructI = 4,
+
+ kIOUCForegroundOnly = 0x00000010,
};
/*! @enum
// keys for clientHasPrivilege
#define kIOClientPrivilegeAdministrator "root"
#define kIOClientPrivilegeLocalUser "local"
+#define kIOClientPrivilegeForeground "foreground"
/*! @enum
@abstract Constants to specify the maximum number of scalar arguments in the IOExternalMethodArguments structure. These constants are documentary since the scalarInputCount, scalarOutputCount fields reflect the actual number passed.
queue_head_t * iocpu_get_platform_active_queue(void)
{
+ if (!iocpu_active_queue.next)
+ {
+ queue_init(&iocpu_quiesce_queue);
+ queue_init(&iocpu_active_queue);
+ iocpu_platform_cpu_action_init(&iocpu_quiesce_queue, &iocpu_active_queue);
+ }
return (&iocpu_active_queue);
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-OSDefineMetaClassAndAbstractStructors(IOPolledInterface, OSObject);
-
-OSMetaClassDefineReservedUnused(IOPolledInterface, 0);
-OSMetaClassDefineReservedUnused(IOPolledInterface, 1);
-OSMetaClassDefineReservedUnused(IOPolledInterface, 2);
-OSMetaClassDefineReservedUnused(IOPolledInterface, 3);
-OSMetaClassDefineReservedUnused(IOPolledInterface, 4);
-OSMetaClassDefineReservedUnused(IOPolledInterface, 5);
-OSMetaClassDefineReservedUnused(IOPolledInterface, 6);
-OSMetaClassDefineReservedUnused(IOPolledInterface, 7);
-OSMetaClassDefineReservedUnused(IOPolledInterface, 8);
-OSMetaClassDefineReservedUnused(IOPolledInterface, 9);
-OSMetaClassDefineReservedUnused(IOPolledInterface, 10);
-OSMetaClassDefineReservedUnused(IOPolledInterface, 11);
-OSMetaClassDefineReservedUnused(IOPolledInterface, 12);
-OSMetaClassDefineReservedUnused(IOPolledInterface, 13);
-OSMetaClassDefineReservedUnused(IOPolledInterface, 14);
-OSMetaClassDefineReservedUnused(IOPolledInterface, 15);
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
extern uint32_t gIOHibernateState;
uint32_t gIOHibernateMode;
static char gIOHibernateBootSignature[256+1];
setProperty(kRootDomainSleepReasonKey, sleepReason);
}
- tracePoint(kIOPMTracePointSleepStarted);
-
patriarch->sleepSystem();
return kIOReturnSuccess;
}
{
if ( getPowerState() == DOZE_STATE )
{
+ tracePoint(kIOPMTracePointSystemWakeDriversPhase);
changePowerStateToPriv(ON_STATE);
patriarch->wakeSystem();
}
gMessageClientType);
tellClients(kIOMessageSystemWillPowerOn, clientMessageFilter);
}
+
+ if (SLEEP_STATE == newPowerState)
+ {
+ tracePoint(kIOPMTracePointSleepStarted);
+ }
}
if (*rdFlags & kServiceFlagTopLevelPCI)
*/
setProperty(kRootDomainSleepReasonKey, kIOPMIdleSleepKey);
- tracePoint(kIOPMTracePointSleepStarted);
-
sleepASAP = false;
changePowerStateToPriv(SLEEP_STATE);
}
--- /dev/null
+/*
+ * Copyright (c) 2006-2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+
+#include <IOKit/IOService.h>
+#include <IOKit/IOPolledInterface.h>
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+OSDefineMetaClassAndAbstractStructors(IOPolledInterface, OSObject);
+
+OSMetaClassDefineReservedUnused(IOPolledInterface, 0);
+OSMetaClassDefineReservedUnused(IOPolledInterface, 1);
+OSMetaClassDefineReservedUnused(IOPolledInterface, 2);
+OSMetaClassDefineReservedUnused(IOPolledInterface, 3);
+OSMetaClassDefineReservedUnused(IOPolledInterface, 4);
+OSMetaClassDefineReservedUnused(IOPolledInterface, 5);
+OSMetaClassDefineReservedUnused(IOPolledInterface, 6);
+OSMetaClassDefineReservedUnused(IOPolledInterface, 7);
+OSMetaClassDefineReservedUnused(IOPolledInterface, 8);
+OSMetaClassDefineReservedUnused(IOPolledInterface, 9);
+OSMetaClassDefineReservedUnused(IOPolledInterface, 10);
+OSMetaClassDefineReservedUnused(IOPolledInterface, 11);
+OSMetaClassDefineReservedUnused(IOPolledInterface, 12);
+OSMetaClassDefineReservedUnused(IOPolledInterface, 13);
+OSMetaClassDefineReservedUnused(IOPolledInterface, 14);
+OSMetaClassDefineReservedUnused(IOPolledInterface, 15);
+
+#if !HIBERNATION
+/* KPI stub if hibernate is configured off */
+IOReturn
+IOPolledInterface::checkAllForWork(void)
+{
+ IOReturn err = kIOReturnNotReady;
+
+ return err;
+}
+#endif /* !HIBERNATION */
return user;
}
+static bool IOUCIsBackgroundTask(task_t task, bool * isBg)
+{
+ kern_return_t kr;
+ task_category_policy_data_t info;
+ mach_msg_type_number_t count = TASK_CATEGORY_POLICY_COUNT;
+ boolean_t get_default = false;
+
+ kr = task_policy_get(current_task(),
+ TASK_CATEGORY_POLICY,
+ (task_policy_t) &info,
+ &count,
+ &get_default);
+
+ *isBg = ((KERN_SUCCESS == kr) && (info.role == TASK_THROTTLE_APPLICATION));
+ return (kr);
+}
+
IOReturn IOUserClient::clientHasPrivilege( void * securityToken,
const char * privilegeName )
{
OSDictionary * user;
bool secureConsole;
+
+ if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
+ sizeof(kIOClientPrivilegeForeground)))
+ {
+ bool isBg;
+ kern_return_t kr = IOUCIsBackgroundTask(current_task(), &isBg);
+
+ if (KERN_SUCCESS != kr)
+ return (kr);
+ return (isBg ? kIOReturnNotPrivileged : kIOReturnSuccess);
+ }
+
if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
sizeof(kIOClientPrivilegeSecureConsoleProcess))))
task = (task_t)((IOUCProcessToken *)securityToken)->token;
if( !(method = getAsyncTargetAndMethodForIndex(&object, selector)) )
return (kIOReturnUnsupported);
+ if (kIOUCForegroundOnly & method->flags)
+ {
+ bool isBg;
+ kern_return_t kr = IOUCIsBackgroundTask(current_task(), &isBg);
+
+ if ((KERN_SUCCESS == kr) && isBg)
+ return (kIOReturnNotPermitted);
+ }
+
switch (method->flags & kIOUCTypeMask)
{
case kIOUCScalarIStructI:
if( !(method = getTargetAndMethodForIndex(&object, selector)) )
return (kIOReturnUnsupported);
+ if (kIOUCForegroundOnly & method->flags)
+ {
+ bool isBg;
+ kern_return_t kr = IOUCIsBackgroundTask(current_task(), &isBg);
+
+ if ((KERN_SUCCESS == kr) && isBg)
+ return (kIOReturnNotPermitted);
+ }
+
switch (method->flags & kIOUCTypeMask)
{
case kIOUCScalarIStructI:
#include <IOKit/IOBSD.h>
#include <IOKit/IOLib.h>
#include <IOKit/IOService.h>
+#include <IOKit/IOCatalogue.h>
#include <IOKit/IODeviceTreeSupport.h>
#include <IOKit/IOKitKeys.h>
#include <IOKit/IOPlatformExpert.h>
return( kIOReturnSuccess );
}
+void
+IOServicePublishResource( const char * property, boolean_t value )
+{
+ if ( value)
+ IOService::publishResource( property, kOSBooleanTrue );
+ else
+ IOService::getResourceService()->removeProperty( property );
+}
+
+boolean_t
+IOServiceWaitForMatchingResource( const char * property, uint64_t timeout )
+{
+ OSDictionary * dict = 0;
+ IOService * match = 0;
+ boolean_t found = false;
+
+ do {
+
+ dict = IOService::resourceMatching( property );
+ if( !dict)
+ continue;
+ match = IOService::waitForMatchingService( dict, timeout );
+ if ( match)
+ found = true;
+
+ } while( false );
+
+ if( dict)
+ dict->release();
+ if( match)
+ match->release();
+
+ return( found );
+}
+
+boolean_t
+IOCatalogueMatchingDriversPresent( const char * property )
+{
+ OSDictionary * dict = 0;
+ OSOrderedSet * set = 0;
+ SInt32 generationCount = 0;
+ boolean_t found = false;
+
+ do {
+
+ dict = OSDictionary::withCapacity(1);
+ if( !dict)
+ continue;
+ dict->setObject( property, kOSBooleanTrue );
+ set = gIOCatalogue->findDrivers( dict, &generationCount );
+ if ( set && (set->getCount() > 0))
+ found = true;
+
+ } while( false );
+
+ if( dict)
+ dict->release();
+ if( set)
+ set->release();
+
+ return( found );
+}
+
OSDictionary * IOBSDNameMatching( const char * name )
{
OSDictionary * dict;
kern_return_t
IOKitBSDInit( void );
+void
+IOServicePublishResource( const char * property, boolean_t value );
+
+boolean_t
+IOServiceWaitForMatchingResource( const char * property, uint64_t timeout );
+
+boolean_t
+IOCatalogueMatchingDriversPresent( const char * property );
+
#ifdef __cplusplus
}
#endif
$(OBJS_WERROR): WERROR=-Werror
+# Files that must go in the __HIB segment:
+UNCONFIGURED_HIB_FILES= \
+ IOHibernateRestoreKernel.o \
+ WKdmDecompress.o
+
+HIB_FILES=$(filter $(UNCONFIGURED_HIB_FILES),$(OBJS))
+
######################################################################
#END Machine dependent Makefile fragment for i386
######################################################################
$(OBJS_WERROR): WERROR=-Werror
+# Files that must go in the __HIB segment:
+UNCONFIGURED_HIB_FILES= \
+ IOHibernateRestoreKernel.o \
+ WKdmDecompress.o
+
+HIB_FILES=$(filter $(UNCONFIGURED_HIB_FILES),$(OBJS))
+
######################################################################
#END Machine dependent Makefile fragment for ppc
######################################################################
LDOBJS = $(OBJS)
$(COMPONENT).o: $(LDOBJS)
- $(_v)$(SEG_HACK) __HIB IOHibernateRestoreKernel.o -o _IOHibernateRestoreKernel.o
- $(_v)mv _IOHibernateRestoreKernel.o IOHibernateRestoreKernel.o
- $(_v)$(SEG_HACK) __HIB WKdmDecompress.o -o _WKdmDecompress.o
- $(_v)mv _WKdmDecompress.o WKdmDecompress.o
+ $(_v)for hib_file in ${HIB_FILES}; \
+ do \
+ $(SEG_HACK) __HIB $${hib_file} -o $${hib_file}__; \
+ mv $${hib_file}__ $${hib_file} ; \
+ done;
@echo LDFILELIST $(COMPONENT)
$(_v)( for obj in ${LDOBJS}; do \
echo $(TARGET)$(COMP_OBJ_DIR)/$(KERNEL_CONFIG)/$${obj}; \
$(OBJS_WERROR): WERROR=-Werror
+# Files that must go in the __HIB segment:
+UNCONFIGURED_HIB_FILES= \
+ IOHibernateRestoreKernel.o \
+ WKdmDecompress.o
+
+HIB_FILES=$(filter $(UNCONFIGURED_HIB_FILES),$(OBJS))
+
######################################################################
#END Machine dependent Makefile fragment for x86_64
######################################################################
# libIOKit
-
iokit/Kernel/WKdmCompress.c optional hibernation
iokit/Kernel/WKdmDecompress.c optional hibernation
iokit/Kernel/IOHibernateIO.cpp optional hibernation
iokit/Kernel/IOCatalogue.cpp optional iokitcpp
iokit/Kernel/IOPMPowerSource.cpp optional iokitcpp
iokit/Kernel/IOPMPowerSourceList.cpp optional iokitcpp
+iokit/Kernel/IOPolledInterface.cpp optional iokitcpp
iokit/Kernel/IOWorkLoop.cpp optional iokitcpp
iokit/Kernel/IOEventSource.cpp optional iokitcpp
if ($kgm_kotype == 31)
printf "UPL"
end
+ if ($kgm_kotype == 34)
+ printf "FD"
+ end
printf ")\n"
end
case N_OLEVEL:
case N_PSYM:
case N_EINCL:
- case N_LBRAC:
case N_EXCL:
- case N_RBRAC:
case N_BCOMM:
case N_LENG:
case N_OPT:
case N_ENTRY:
case N_ECOMM:
case N_ECOML:
+ /* These are labeled as NO_SECT in stab.h, but they are actually
+ * section-based on OS X. We must mark them as such so they get
+ * relocated.
+ */
+ case N_LBRAC:
+ case N_RBRAC:
sym->predicates.is_section = 1;
break;
default:
}
if (n_desc & N_WEAK_REF) {
- sym->predicates.is_weak = 1;
+ sym->predicates.is_weak = 1;
}
if (n_desc & N_ARM_THUMB_DEF) {
- sym->predicates.is_thumb = 1;
+ sym->predicates.is_thumb = 1;
}
/* The first set of type fields are mutually exclusive, so they can be
#define windowm1_loc [sp,#28]
#define lmask_loc [sp,#32]
#define dmask_loc [sp,#36]
+ #define op_loc [sp,#44]
#define dist_loc [sp,#48]
#define local_size 52
cmphi r2, bits // internel (bits < op)
ldrhib r3, [in, #1]! // if (op > bits) (PUP(in))
addhi hold, hold, r3, asl bits // hold += (unsigned long)(PUP(in)) << bits;
+ addhi bits, bits, #8 // bits += 8
rsb ip, r2, #32 // (32-op)
ror r3, hold, r2 // hold<<(32-op)
add r3, r1, r3, lsr ip // dist += (unsigned)hold & ((1U << op) - 1);
-
- ldr ip, beg_loc // beg
-
-#ifdef INFLATE_STRICT
- ldr r1, state_dmax // r1 = dmax
-#endif
-
str r3, dist_loc // save dist
+
#ifdef INFLATE_STRICT
+ ldr r1, state_dmax // r1 = dmax
cmp r3, r1 // dist vs dmax
bgt invalid_distance_too_far_back // if dist > dmax, set up msg/mode = bad and break
#endif
+ mov hold, hold, lsr r2 // hold >>= op ;
+ rsb bits, r2, bits // bits -= op;
+
+ ldr ip, beg_loc // beg
ldr r1, dist_loc // dist
rsb r3, ip, out // (out - beg);
- addhi bits, bits, #8 // this is the internel bits += 8 from above
cmp r1, r3 // dist vs (out - beg)
- mov hold, hold, lsr r2 // hold >>= op ;
- rsb bits, r2, bits // bits -= op;
rsbls r2, r1, out // if (dist<=op) r2 = from = out-dist
bls copy_direct_from_output // if (dist<=op) branch to copy_direct_from_output
ldr r2, whave_loc // whave
rsb r1, r3, r1 // op = dist-op
cmp r2, r1 // whave vs op
- nop // pad dummy for better performance
+ str r1, op_loc // save a copy of op
bcc invalid_distance_too_far_back // if whave < op, message invalid distance too far back, and break
cmp write, #0 // write
bne non_very_common_case // if (write ==0) non_very_common_case
// the following : if (write == 0) { /* very common case */ }
- nop // pad dummy for better performance
+ ldr r1, op_loc // restore op in r1
ldr ip, wsize_loc // wsize
cmp r6, r1 // len vs op
rsb r3, r1, ip // wsize - op
ldr ip, windowm1_loc // window - 1
add r2, ip, r3 // from = window - 1 + wsize - op : setup for using PUP(from)
- movhi r3, r1 // if len > op, r3 = op
- movhi r1, out // if len > op, r1 = out
+ //movhi r3, r1 // if len > op, r3 = op
+ //movhi r1, out // if len > op, r1 = out
bhi some_from_window // if (len > op), branch to some_from_window
finish_copy:
some_from_window:
- add out, r3, out // out += op
- rsb r6, r3, r6 // len -= op
+ ldr r3, dist_loc // dist
+ rsb r6, r1, r6 // len -= op
some_from_window_loop: // do {
ldrb ip, [r2, #1]! // PUP(from);
- subs r3, r3, #1 // --op
- strb ip, [r1, #1]! // PUP(out) = PUP(from);
+ subs r1, #1 // --op
+ strb ip, [out, #1]! // PUP(out) = PUP(from);
bne some_from_window_loop // } while(op);
- ldr r3, dist_loc // dist
rsb r2, r3, out // from = out - dist;
b finish_copy
non_very_common_case:
+ ldr r1, op_loc // restore op in r1
cmp write, r1 // write vs op
- nop // pad dummy for better performance
bcs contiguous_in_window // if (write >= op) branch to contiguous_in_window
/* wrap around window */
bne waw_loop // } while (op);
cmp write, r6 // write vs len
- ldrcs r2, windowm1_loc // if (write>=len) r2 = from = window-1;
+ ldr r2, windowm1_loc // if (write>=len) r2 = from = window-1;
bcs finish_copy // if (write>=len) branch to finish_copy
// some from start of window
mov r1, write // op = write
sub r6, write // len -= op
- sub ip, out
- add ip, #1 // out+ip -> from
sow_loop: // do {
- ldrb r3,[out, ip] // PUP(from)
+ ldrb r3,[r2, #1]! // PUP(from)
subs r1, #1 // --op;
strb r3, [out,#1]! // PUP(out) = PUP(from);
bne sow_loop // } while (op);
ldr r2, dist_loc // dist
- sub r6, r6, write // len -= write
rsb r2, r2, out // r2 = from = out-dist
b finish_copy // continue to finish_copy
export LDFLAGS_KERNEL_DEVELOPMENTX86_64 = $(LDFLAGS_KERNEL_RELEASEX86_64)
export LDFLAGS_KERNEL_PROFILEX86_64 = $(LDFLAGS_KERNEL_RELEASEX86_64)
-export LDFLAGS_KERNEL_ARM = \
+export LDFLAGS_KERNEL_RELEASEARM = \
-Wl,-new_linker \
-Wl,-pagezero_size,0x0 \
- -Wl,-segaddr,__HIB,0xC0000000 \
- -Wl,-image_base,0xC0008000 \
+ -Wl,-image_base,0xC0001000 \
-Wl,-exported_symbols_list,$(TARGET)/kernel-kpi.exp
+export LDFLAGS_KERNEL_DEVELOPMENTARM = \
+ -Wl,-new_linker \
+ -Wl,-pagezero_size,0x0 \
+ -Wl,-image_base,0xC0001000
+
+export LDFLAGS_KERNEL_DEBUGARM = $(LDFLAGS_KERNEL_DEVELOPMENTARM)
+
export LDFLAGS_KERNEL = $(LDFLAGS_KERNEL_GEN) \
$($(addsuffix $(MACHINE_CONFIG),MACHINE_FLAGS_)) \
osfmk/kern/ipc_host.c standard
osfmk/kern/ipc_kobject.c standard
osfmk/kern/ipc_mig.c standard
+osfmk/kern/ipc_misc.c optional config_embedded
osfmk/kern/ipc_sync.c standard
osfmk/kern/ipc_tt.c standard
osfmk/kern/kalloc.c standard
ps->ps_special_clusters = 0;
ps->ps_pgcount = ps->ps_pgnum;
ps->ps_clcount = ps->ps_ncls = ps->ps_pgcount >> ps->ps_clshift;
+ dp_pages_reserve += ps->ps_pgcount;
PS_UNLOCK(ps);
- dp_pages_reserve += interim_pages_removed;
} else {
paging_segments[i] = PAGING_SEGMENT_NULL;
paging_segment_count--;
/*
- * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2010 Apple, Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
if (PE_parse_boot_argn("yield", &boot_arg, sizeof (boot_arg))) {
sched_poll_yield_shift = boot_arg;
}
- if (PE_parse_boot_argn("idlehalt", &boot_arg, sizeof (boot_arg))) {
- idlehalt = boot_arg;
- }
/* The I/O port to issue a read from, in the event of a panic. Useful for
* triggering logic analyzers.
*/
if (tscFreq <= SLOW_TSC_THRESHOLD) /* is TSC too slow for _commpage_nanotime? */
bits |= kSlow;
+ if (cpuid_features() & CPUID_FEATURE_AES)
+ bits |= kHasAES;
+
_cpu_capabilities = bits; // set kernel version for use by drivers etc
}
#define k64Bit 0x00000200 /* processor supports EM64T (not what mode you're running in) */
#define kHasSSE4_1 0x00000400
#define kHasSSE4_2 0x00000800
+#define kHasAES 0x00001000
#define kInOrderPipeline 0x00002000 /* in-order execution */
#define kSlow 0x00004000 /* tsc < nanosecond */
#define kUP 0x00008000 /* set if (kNumCPUs == 1) */
/*
- * Copyright (c) 2003-2009 Apple Inc. All rights reserved.
+ * Copyright (c) 2003-2010 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
{
i386_cpu_info_t *cpuinfo;
+ topoParms.stable = FALSE;
+
cpuinfo = cpuid_info();
/*
/*
- * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
+ * Copyright (c) 2007-2010 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
ml_set_interrupts_enabled(istate);
DBG("cpu_topology_start() LLC is L%d\n", topoParms.LLCDepth + 1);
+ /*
+ * Let the CPU Power Management know that the topology is stable.
+ */
+ topoParms.stable = TRUE;
+ pmCPUStateInit();
+
/*
* Iterate over all logical cpus finding or creating the affinity set
* for their LLC cache. Each affinity set possesses a processor set
/*
- * Copyright (c) 2003-2009 Apple Inc. All rights reserved.
+ * Copyright (c) 2003-2010 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
uint32_t nLCoresPerPackage;
uint32_t nPCoresPerPackage;
uint32_t nPackages;
+ boolean_t stable;
} x86_topology_parameters_t;
/* Called after cpu discovery */
case CPUID_MODEL_NEHALEM_EX:
cpufamily = CPUFAMILY_INTEL_NEHALEM;
break;
+ case CPUID_MODEL_DALES_32NM:
+ case CPUID_MODEL_WESTMERE:
+ case CPUID_MODEL_WESTMERE_EX:
+ cpufamily = CPUFAMILY_INTEL_WESTMERE;
+ break;
}
break;
}
* (which determines whether SMT/Hyperthreading is active).
*/
switch (info_p->cpuid_cpufamily) {
+ /*
+ * This should be the same as Nehalem but an A0 silicon bug returns
+ * invalid data in the top 12 bits. Hence, we use only bits [19..16]
+ * rather than [31..16] for core count - which actually can't exceed 8.
+ */
+ case CPUFAMILY_INTEL_WESTMERE: {
+ uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT);
+ info_p->core_count = bitfield32((uint32_t)msr, 19, 16);
+ info_p->thread_count = bitfield32((uint32_t)msr, 15, 0);
+ break;
+ }
case CPUFAMILY_INTEL_NEHALEM: {
uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT);
info_p->core_count = bitfield32((uint32_t)msr, 31, 16);
{CPUID_FEATURE_HTT, "HTT",},
{CPUID_FEATURE_TM, "TM",},
{CPUID_FEATURE_SSE3, "SSE3"},
+ {CPUID_FEATURE_PCLMULQDQ, "PCLMULQDQ"},
{CPUID_FEATURE_MONITOR, "MON"},
{CPUID_FEATURE_DSCPL, "DSCPL"},
{CPUID_FEATURE_VMX, "VMX"},
{CPUID_FEATURE_SSE4_2, "SSE4.2"},
{CPUID_FEATURE_xAPIC, "xAPIC"},
{CPUID_FEATURE_POPCNT, "POPCNT"},
+ {CPUID_FEATURE_AES, "AES"},
{CPUID_FEATURE_VMM, "VMM"},
{0, 0}
},
extfeature_map[] = {
{CPUID_EXTFEATURE_SYSCALL, "SYSCALL"},
{CPUID_EXTFEATURE_XD, "XD"},
+ {CPUID_EXTFEATURE_1GBPAGE, "1GBPAGE"},
{CPUID_EXTFEATURE_RDTSCP, "RDTSCP"},
{CPUID_EXTFEATURE_EM64T, "EM64T"},
{CPUID_EXTFEATURE_LAHF, "LAHF"},
#define CPUID_FEATURE_PBE _Bit(31) /* Pend Break Enable */
#define CPUID_FEATURE_SSE3 _HBit(0) /* Streaming SIMD extensions 3 */
+#define CPUID_FEATURE_PCLMULQDQ _HBit(1) /* PCLMULQDQ Instruction */
#define CPUID_FEATURE_MONITOR _HBit(3) /* Monitor/mwait */
#define CPUID_FEATURE_DSCPL _HBit(4) /* Debug Store CPL */
#define CPUID_FEATURE_SSE4_2 _HBit(20) /* Streaming SIMD extensions 4.2 */
#define CPUID_FEATURE_xAPIC _HBit(21) /* Extended APIC Mode */
#define CPUID_FEATURE_POPCNT _HBit(23) /* POPCNT instruction */
+#define CPUID_FEATURE_AES _HBit(25) /* AES instructions */
#define CPUID_FEATURE_VMM _HBit(31) /* VMM (Hypervisor) present */
/*
#define CPUID_EXTFEATURE_SYSCALL _Bit(11) /* SYSCALL/sysret */
#define CPUID_EXTFEATURE_XD _Bit(20) /* eXecute Disable */
+#define CPUID_EXTFEATURE_1GBPAGE _Bit(26) /* 1G-Byte Page support */
#define CPUID_EXTFEATURE_RDTSCP _Bit(27) /* RDTSCP */
#define CPUID_EXTFEATURE_EM64T _Bit(29) /* Extended Mem 64 Technology */
#define CPUID_MODEL_MEROM 15
#define CPUID_MODEL_PENRYN 23
#define CPUID_MODEL_NEHALEM 26
+#define CPUID_MODEL_ATOM 28
#define CPUID_MODEL_FIELDS 30 /* Lynnfield, Clarksfield, Jasper */
#define CPUID_MODEL_DALES 31 /* Havendale, Auburndale */
#define CPUID_MODEL_NEHALEM_EX 46
+#define CPUID_MODEL_DALES_32NM 37 /* Clarkdale, Arrandale */
+#define CPUID_MODEL_WESTMERE 44 /* Gulftown, Westmere-EP, Westmere-WS */
+#define CPUID_MODEL_WESTMERE_EX 47
#ifndef ASSEMBLER
#include <stdint.h>
case kEfiBootServicesCode:
case kEfiBootServicesData:
case kEfiConventionalMemory:
- case kEfiACPIReclaimMemory:
case kEfiACPIMemoryNVS:
case kEfiPalCode:
// runtime services will be restarted, so no save
case kEfiRuntimeServicesCode:
case kEfiRuntimeServicesData:
+ // contents are volatile once the platform expert starts
+ case kEfiACPIReclaimMemory:
// non dram
case kEfiReservedMemoryType:
case kEfiUnusableMemory:
header->runtimePages = args->efiRuntimeServicesPageStart;
header->runtimePageCount = args->efiRuntimeServicesPageCount;
- if (args->Version == kBootArgsVersion1 && args->Revision >= kBootArgsRevision1_5) {
- header->runtimeVirtualPages = args->efiRuntimeServicesVirtualPageStart;
+ header->runtimeVirtualPages = args->efiRuntimeServicesVirtualPageStart;
+ if (args->Version == kBootArgsVersion1 && args->Revision >= kBootArgsRevision1_6) {
+ header->performanceDataStart = args->performanceDataStart;
+ header->performanceDataSize = args->performanceDataSize;
} else {
- header->runtimeVirtualPages = 0;
+ header->performanceDataStart = 0;
+ header->performanceDataSize = 0;
}
return (KERN_SUCCESS);
/*
- * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
if (this_cpu_datap->lcpu.core == NULL)
goto failed;
- pmCPUStateInit();
-
#if NCOPY_WINDOWS > 0
this_cpu_datap->cpu_pmap = pmap_cpu_alloc(boot_cpu);
if (this_cpu_datap->cpu_pmap == NULL)
/*
- * Copyright (c) 2004-2009 Apple Inc. All rights reserved.
+ * Copyright (c) 2004-2010 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <kern/sched_prim.h>
#include <i386/lapic.h>
-/*
- * Kernel parameter determining whether threads are halted unconditionally
- * in the idle state. This is the default behavior.
- * See machine_idle() for use.
- */
-int idlehalt = 1;
-
extern int disableConsoleOutput;
decl_simple_lock_data(,pm_init_lock);
if (my_cpu == NULL)
goto out;
- /*
- * If idlehalt isn't set, then don't do any power management related
- * idle handling.
- */
- if (!idlehalt)
- goto out;
-
my_cpu->lcpu.state = LCPU_IDLE;
DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
MARK_CPU_IDLE(cpu_number());
/*
- * Copyright (c) 2006-2009 Apple Inc. All rights reserved.
+ * Copyright (c) 2006-2010 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This value should be changed each time that pmDsipatch_t or pmCallBacks_t
* changes.
*/
-#define PM_DISPATCH_VERSION 19
+#define PM_DISPATCH_VERSION 20
/*
* Dispatch table for functions that get installed when the power
*/
typedef struct
{
- int (*pmCPUStateInit)(void);
+ kern_return_t (*pmCPUStateInit)(void);
void (*cstateInit)(void);
uint64_t (*MachineIdle)(uint64_t maxIdleDuration);
uint64_t (*GetDeadline)(x86_lcpu_t *lcpu);
busFreq = EFI_FSB_frequency();
switch (cpuid_cpufamily()) {
+ case CPUFAMILY_INTEL_WESTMERE:
case CPUFAMILY_INTEL_NEHALEM: {
uint64_t cpu_mhz;
uint64_t msr_flex_ratio;
"(UPL) ",
"(MEM_OBJ_CONTROL) ",
"(AU_SESSIONPORT) ", /* 33 */
+ "(FILEPORT)", /* 34 */
#if CONFIG_MACF_MACH
"(LABELH) ",
#endif
debug.h \
etimer.h \
ipc_mig.h \
+ ipc_misc.h \
kalloc.h \
kext_alloc.h \
kern_types.h \
#include <kern/ipc_tt.h>
#include <kern/ipc_mig.h>
+#include <kern/ipc_misc.h>
#include <kern/ipc_kobject.h>
#include <kern/host_notify.h>
#include <kern/mk_timer.h>
return TRUE;
}
#endif
+#if CONFIG_EMBEDDED
+ if (ip_kotype(port) == IKOT_FILEPORT) {
+ fileport_notify(request_header);
+ return TRUE;
+ }
+#endif
break;
#define IKOT_UPL 31
#define IKOT_MEM_OBJ_CONTROL 32
#define IKOT_AU_SESSIONPORT 33
-#define IKOT_LABELH 34
+#define IKOT_FILEPORT 34
+#define IKOT_LABELH 35
/*
* Add new entries here and adjust IKOT_UNKNOWN.
* Please keep ipc/ipc_object.c:ikot_print_array up to date.
*/
-#define IKOT_UNKNOWN 35 /* magic catchall */
+#define IKOT_UNKNOWN 36 /* magic catchall */
#define IKOT_MAX_TYPE (IKOT_UNKNOWN+1) /* # of IKOT_ types */
--- /dev/null
+/*
+ * Copyright (c) 2008 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+#include <mach/mach_types.h>
+#include <mach/notify.h>
+#include <ipc/ipc_port.h>
+#include <kern/ipc_kobject.h>
+#include <kern/ipc_misc.h>
+
+extern void fileport_releasefg(struct fileglob *);
+
+/*
+ * fileport_alloc
+ *
+ * Description: Obtain a send right for the given fileglob, which must be
+ * referenced.
+ *
+ * Parameters: fg A fileglob.
+ *
+ * Returns: Port of type IKOT_FILEPORT with fileglob set as its kobject.
+ * Port is returned with a send right.
+ */
+ipc_port_t
+fileport_alloc(struct fileglob *fg)
+{
+ ipc_port_t fileport;
+ ipc_port_t sendport;
+ ipc_port_t notifyport;
+
+ fileport = ipc_port_alloc_kernel();
+ if (fileport == IP_NULL) {
+ goto out;
+ }
+
+ ipc_kobject_set(fileport, (ipc_kobject_t)fg, IKOT_FILEPORT);
+ notifyport = ipc_port_make_sonce(fileport);
+ ip_lock(fileport); /* unlocked by ipc_port_nsrequest */
+ ipc_port_nsrequest(fileport, 1, notifyport, ¬ifyport);
+
+ sendport = ipc_port_make_send(fileport);
+ if (!IP_VALID(sendport)) {
+ panic("Couldn't allocate send right for fileport!\n");
+ }
+
+out:
+ return fileport;
+}
+
+
+/*
+ * fileport_get_fileglob
+ *
+ * Description: Obtain the fileglob associated with a given port.
+ *
+ * Parameters: port A Mach port of type IKOT_FILEPORT.
+ *
+ * Returns: NULL The given Mach port did not reference a
+ * fileglob.
+ * !NULL The fileglob that is associated with the
+ * Mach port.
+ *
+ * Notes: The caller must have a reference on the fileport.
+ */
+struct fileglob *
+fileport_port_to_fileglob(ipc_port_t port)
+{
+ struct fileglob *fg = NULL;
+
+ if (!IP_VALID(port))
+ return NULL;
+
+ ip_lock(port);
+ if (ip_active(port) && IKOT_FILEPORT == ip_kotype(port))
+ fg = (void *)port->ip_kobject;
+ ip_unlock(port);
+
+ return fg;
+}
+
+
+/*
+ * fileport_notify
+ *
+ * Description: Handle a no-senders notification for a fileport. Unless
+ * the message is spoofed, destroys the port and releases
+ * its reference on the fileglob.
+ *
+ * Parameters: msg A Mach no-senders notification message.
+ */
+void
+fileport_notify(mach_msg_header_t *msg)
+{
+ mach_no_senders_notification_t *notification = (void *)msg;
+ ipc_port_t port = notification->not_header.msgh_remote_port;
+ struct fileglob *fg = NULL;
+
+ if (!IP_VALID(port))
+ panic("Invalid port passed to fileport_notify()\n");
+
+ ip_lock(port);
+
+ fg = (struct fileglob *)port->ip_kobject;
+
+ if (!ip_active(port))
+ panic("Inactive port passed to fileport_notify()\n");
+ if (ip_kotype(port) != IKOT_FILEPORT)
+ panic("Port of type other than IKOT_FILEPORT passed to fileport_notify()\n");
+ if (fg == NULL)
+ panic("fileport without an assocated fileglob\n");
+
+ if (port->ip_srights == 0) {
+ ip_unlock(port);
+
+ fileport_releasefg(fg);
+ ipc_port_dealloc_kernel(port);
+ } else {
+ ip_unlock(port);
+ }
+
+ return;
+}
--- /dev/null
+/*
+ * Copyright (c) 2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+#ifdef KERNEL_PRIVATE
+#ifndef _KERN_IPC_MISC_H_
+#define _KERN_IPC_MISC_H_
+
+struct fileglob;
+ipc_port_t fileport_alloc(struct fileglob *);
+struct fileglob *fileport_port_to_fileglob(ipc_port_t);
+void fileport_notify(mach_msg_header_t *);
+
+#endif /* _KERN_IPC_MISC_H_ */
+#endif /* KERNEL_PRIVATE */
*
* Calculate the timesharing priority based upon usage and load.
*/
+#ifdef CONFIG_EMBEDDED
+
+#define do_priority_computation(thread, pri) \
+ MACRO_BEGIN \
+ (pri) = (thread)->priority /* start with base priority */ \
+ - ((thread)->sched_usage >> (thread)->pri_shift); \
+ if ((pri) < MAXPRI_THROTTLE) { \
+ if ((thread)->task->max_priority > MAXPRI_THROTTLE) \
+ (pri) = MAXPRI_THROTTLE; \
+ else \
+ if ((pri) < MINPRI_USER) \
+ (pri) = MINPRI_USER; \
+ } else \
+ if ((pri) > MAXPRI_KERNEL) \
+ (pri) = MAXPRI_KERNEL; \
+ MACRO_END
+
+#else
+
#define do_priority_computation(thread, pri) \
MACRO_BEGIN \
(pri) = (thread)->priority /* start with base priority */ \
(pri) = MAXPRI_KERNEL; \
MACRO_END
+#endif
+
/*
* set_priority:
*
#define MAXPRI (NRQS-1)
#define MINPRI IDLEPRI /* lowest legal priority schedulable */
#define IDLEPRI 0 /* idle thread priority */
-#define DEPRESSPRI MINPRI /* depress priority */
/*
* High-level priority assignments
#define MAXPRI_THROTTLE (MINPRI + 4) /* 4 */
#define MINPRI_USER MINPRI /* 0 */
+#ifdef CONFIG_EMBEDDED
+#define DEPRESSPRI MAXPRI_THROTTLE
+#else
+#define DEPRESSPRI MINPRI /* depress priority */
+#endif
+
/*
* Macro to check for invalid priorities.
*/
* bound to a different processor, nor be in the wrong
* processor set.
*/
- if ( thread->state == TH_RUN &&
+ if (
+#if CONFIG_EMBEDDED
+ ((thread->state & ~TH_SUSP) == TH_RUN) &&
+#else
+ thread->state == TH_RUN &&
+#endif
(thread->sched_pri >= BASEPRI_RTQUEUES ||
processor->processor_meta == PROCESSOR_META_NULL ||
processor->processor_meta->primary == processor) &&
); \
MACRO_END
+#define PMAP_ENTER_OPTIONS(pmap, virtual_address, page, protection, \
+ flags, wired, options, result) \
+ MACRO_BEGIN \
+ result=KERN_SUCCESS; \
+ PMAP_ENTER(pmap, virtual_address, page, protection, \
+ flags, wired); \
+ MACRO_END
+
#define IS_USERADDR64_CANONICAL(addr) \
((addr) < (VM_MAX_USER_PAGE_ADDRESS + PAGE_SIZE))
#define CPUFAMILY_INTEL_MEROM 0x426f69ef
#define CPUFAMILY_INTEL_PENRYN 0x78ea4fbc
#define CPUFAMILY_INTEL_NEHALEM 0x6b5a4cd2
+#define CPUFAMILY_INTEL_WESTMERE 0x573b5eec
#define CPUFAMILY_ARM_9 0xe73283ae
#define CPUFAMILY_ARM_11 0x8ff620d8
#define CPUFAMILY_ARM_XSCALE 0x53b005f5
#define VM32_MIN_ADDRESS ((vm32_offset_t) 0)
#define VM32_MAX_ADDRESS ((vm32_offset_t) (VM_MAX_PAGE_ADDRESS & 0xFFFFFFFF))
+
+#define PMAP_ENTER_OPTIONS(pmap, virtual_address, page, protection, \
+ flags, wired, options, result) \
+ MACRO_BEGIN \
+ result=KERN_SUCCESS; \
+ PMAP_ENTER(pmap, virtual_address, page, protection, \
+ flags, wired); \
+ MACRO_END
+
+
#endif /* MACH_KERNEL_PRIVATE */
#endif /* KERNEL_PRIVATE */
unsigned int flags,
boolean_t wired);
+extern kern_return_t pmap_enter_options(
+ pmap_t pmap,
+ vm_map_offset_t v,
+ ppnum_t pn,
+ vm_prot_t prot,
+ unsigned int flags,
+ boolean_t wired,
+ unsigned int options);
+
extern void pmap_remove_some_phys(
pmap_t pmap,
ppnum_t pn);
pmap_t __pmap = (pmap); \
vm_page_t __page = (page); \
\
- if (__pmap != kernel_pmap) { \
- ASSERT_PAGE_DECRYPTED(__page); \
+ PMAP_ENTER_CHECK(__pmap, __page) \
+ pmap_enter(__pmap, \
+ (virtual_address), \
+ __page->phys_page, \
+ (protection), \
+ (flags), \
+ (wired)); \
+ MACRO_END
+#endif /* !PMAP_ENTER */
+
+#ifndef PMAP_ENTER_OPTIONS
+#define PMAP_ENTER_OPTIONS(pmap, virtual_address, page, protection, \
+ flags, wired, options, result) \
+ MACRO_BEGIN \
+ pmap_t __pmap = (pmap); \
+ vm_page_t __page = (page); \
+ \
+ PMAP_ENTER_CHECK(__pmap, __page) \
+ result = pmap_enter_options(__pmap, \
+ (virtual_address), \
+ __page->phys_page, \
+ (protection), \
+ (flags), \
+ (wired), \
+ options); \
+ MACRO_END
+#endif /* !PMAP_ENTER_OPTIONS */
+
+#define PMAP_ENTER_CHECK(pmap, page) \
+{ \
+ if ((pmap) != kernel_pmap) { \
+ ASSERT_PAGE_DECRYPTED(page); \
} \
- if (__page->error) { \
+ if ((page)->error) { \
panic("VM page %p should not have an error\n", \
- __page); \
+ (page)); \
} \
- pmap_enter(__pmap, \
- (virtual_address), \
- __page->phys_page, \
- (protection), \
- (flags), \
- (wired)); \
- MACRO_END
-#endif /* !PMAP_ENTER */
+}
/*
* Routines to manage reference/modify bits based on
#define VM_WIMG_USE_DEFAULT 0x80000000
#define VM_MEM_SUPERPAGE 0x100 /* map a superpage instead of a base page */
+
+#define PMAP_OPTIONS_NOWAIT 0x1 /* don't block, return
+ * KERN_RESOURCE_SHORTAGE
+ * instead */
+
#if !defined(__LP64__)
extern vm_offset_t pmap_extract(pmap_t pmap,
vm_map_offset_t va);
extern void vm_fault_classify_init(void);
#endif
+unsigned long vm_pmap_enter_blocked = 0;
unsigned long vm_cs_validates = 0;
unsigned long vm_cs_revalidates = 0;
unsigned long vm_cs_query_modified = 0;
unsigned long vm_cs_validated_dirtied = 0;
-
#if CONFIG_ENFORCE_SIGNED_CODE
int cs_enforcement_disable=0;
#else
int *type_of_fault)
{
unsigned int cache_attr;
- kern_return_t kr;
+ kern_return_t kr, pe_result;
boolean_t previously_pmapped = m->pmapped;
boolean_t must_disconnect = 0;
boolean_t map_is_switched, map_is_switch_protected;
prot &= ~VM_PROT_EXECUTE;
}
}
- PMAP_ENTER(pmap, vaddr, m, prot, cache_attr, wired);
+
+ /* Prevent a deadlock by not
+ * holding the object lock if we need to wait for a page in
+ * pmap_enter() - <rdar://problem/7138958> */
+ PMAP_ENTER_OPTIONS(pmap, vaddr, m, prot, cache_attr,
+ wired, PMAP_OPTIONS_NOWAIT, pe_result);
+
+ if(pe_result == KERN_RESOURCE_SHORTAGE) {
+ /* The nonblocking version of pmap_enter did not succeed.
+ * Use the blocking version instead. Requires marking
+ * the page busy and unlocking the object */
+ boolean_t was_busy = m->busy;
+ m->busy = TRUE;
+ vm_object_unlock(m->object);
+
+ PMAP_ENTER(pmap, vaddr, m, prot, cache_attr, wired);
+
+ /* Take the object lock again. */
+ vm_object_lock(m->object);
+
+ /* If the page was busy, someone else will wake it up.
+ * Otherwise, we have to do it now. */
+ assert(m->busy);
+ if(!was_busy) {
+ PAGE_WAKEUP_DONE(m);
+ }
+ vm_pmap_enter_blocked++;
+ }
}
/*
#define VM_OBJECT_HASH_COUNT 1024
#define VM_OBJECT_HASH_LOCK_COUNT 512
-static lck_mtx_t vm_object_hashed_lock_data[VM_OBJECT_HASH_COUNT];
-static lck_mtx_ext_t vm_object_hashed_lock_data_ext[VM_OBJECT_HASH_COUNT];
+static lck_mtx_t vm_object_hashed_lock_data[VM_OBJECT_HASH_LOCK_COUNT];
+static lck_mtx_ext_t vm_object_hashed_lock_data_ext[VM_OBJECT_HASH_LOCK_COUNT];
static queue_head_t vm_object_hashtable[VM_OBJECT_HASH_COUNT];
static struct zone *vm_object_hash_zone;
*
* the laundry and pageout_queue flags are cleared...
*/
+#if CONFIG_EMBEDDED
+ if (p->laundry)
+ vm_pageout_throttle_up(p);
+#else
vm_pageout_throttle_up(p);
+#endif
/*
* toss the wire count we picked up
m->dirty = FALSE;
clear_refmod |= VM_MEM_MODIFIED;
-#if CONFIG_EMBEDDED
- dwp->dw_mask |= DW_move_page;
-#endif
+ if (m->throttled) {
+ /*
+ * This page is now clean and
+ * reclaimable. Move it out
+ * of the throttled queue, so
+ * that vm_pageout_scan() can
+ * find it.
+ */
+ dwp->dw_mask |= DW_move_page;
+ }
#if MACH_PAGEMAP
vm_external_state_clr(object->existence_map, offset);
#endif /* MACH_PAGEMAP */
object->reusable_page_count++;
assert(object->resident_page_count >= object->reusable_page_count);
reusable++;
+#if CONFIG_EMBEDDED
+ } else {
+ if (m->reusable) {
+ m->reusable = FALSE;
+ object->reusable_page_count--;
+ }
+#endif
}
}
pmap_clear_refmod(m->phys_page, clear_refmod);
reusable_page = FALSE;
}
+#if CONFIG_EMBEDDED
+ if ((reusable_page || all_reusable) && object->all_reusable) {
+ /* This means MADV_FREE_REUSABLE has been called twice, which
+ * is probably illegal. */
+ return;
+ }
+#endif
+
while (size) {
length = deactivate_a_chunk(object, offset, size, kill_page, reusable_page, all_reusable);
m->list_req_pending = TRUE;
m->cleaning = TRUE;
m->pageout = TRUE;
- m->laundry = TRUE;
if (object->internal == TRUE)
q = &vm_pageout_queue_internal;
else
q = &vm_pageout_queue_external;
+
+ /*
+ * pgo_laundry count is tied to the laundry bit
+ */
+ m->laundry = TRUE;
q->pgo_laundry++;
m->pageout_queue = TRUE;
{
struct vm_pageout_queue *q;
- assert(m->laundry);
assert(m->object != VM_OBJECT_NULL);
assert(m->object != kernel_object);
vm_pageout_throttle_up_count++;
-
+
if (m->object->internal == TRUE)
- q = &vm_pageout_queue_internal;
+ q = &vm_pageout_queue_internal;
else
- q = &vm_pageout_queue_external;
+ q = &vm_pageout_queue_external;
if (m->pageout_queue == TRUE) {
- m->pageout_queue = FALSE;
queue_remove(&q->pgo_pending, m, vm_page_t, pageq);
+ m->pageout_queue = FALSE;
+
m->pageq.next = NULL;
m->pageq.prev = NULL;
vm_object_paging_end(m->object);
}
- m->laundry = FALSE;
- q->pgo_laundry--;
- if (q->pgo_throttled == TRUE) {
- q->pgo_throttled = FALSE;
- thread_wakeup((event_t) &q->pgo_laundry);
+ if ( m->laundry == TRUE ) {
+
+ m->laundry = FALSE;
+ q->pgo_laundry--;
+ if (q->pgo_throttled == TRUE) {
+ q->pgo_throttled = FALSE;
+ thread_wakeup((event_t) &q->pgo_laundry);
+ }
}
}
if (inactive_throttled == TRUE) {
throttle_inactive:
if (!IP_VALID(memory_manager_default) &&
- object->internal &&
- (object->purgable == VM_PURGABLE_DENY ||
- object->purgable == VM_PURGABLE_NONVOLATILE ||
- object->purgable == VM_PURGABLE_VOLATILE )) {
+ object->internal && m->dirty &&
+ (object->purgable == VM_PURGABLE_DENY ||
+ object->purgable == VM_PURGABLE_NONVOLATILE ||
+ object->purgable == VM_PURGABLE_VOLATILE)) {
queue_enter(&vm_page_queue_throttled, m,
vm_page_t, pageq);
m->throttled = TRUE;
vm_page_lockspin_queues();
- if (dst_page->pageout_queue == TRUE) {
+#if CONFIG_EMBEDDED
+ if (dst_page->laundry)
+#else
+ if (dst_page->pageout_queue == TRUE)
+#endif
+ {
/*
* we've buddied up a page for a clustered pageout
* that has already been moved to the pageout
*
* the laundry and pageout_queue flags are cleared...
*/
+#if CONFIG_EMBEDDED
+ if (page->laundry)
+ vm_pageout_throttle_up(page);
+#else
vm_pageout_throttle_up(page);
+#endif
/*
* toss the wire count we picked up
assert(m->pageq.next == NULL && m->pageq.prev == NULL);
if (!IP_VALID(memory_manager_default) &&
- m->dirty && m->object->internal &&
- (m->object->purgable == VM_PURGABLE_DENY ||
- m->object->purgable == VM_PURGABLE_NONVOLATILE ||
- m->object->purgable == VM_PURGABLE_VOLATILE )) {
+ m->dirty && m->object->internal &&
+ (m->object->purgable == VM_PURGABLE_DENY ||
+ m->object->purgable == VM_PURGABLE_NONVOLATILE ||
+ m->object->purgable == VM_PURGABLE_VOLATILE)) {
queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
m->throttled = TRUE;
vm_page_throttled_count++;
assert(!m->laundry);
assert(m->pageq.next == NULL && m->pageq.prev == NULL);
if (!IP_VALID(memory_manager_default) &&
- !m->fictitious && m->dirty && m->object->internal &&
- (m->object->purgable == VM_PURGABLE_DENY ||
- m->object->purgable == VM_PURGABLE_NONVOLATILE ||
- m->object->purgable == VM_PURGABLE_VOLATILE )) {
+ !m->fictitious && m->dirty && m->object->internal &&
+ (m->object->purgable == VM_PURGABLE_DENY ||
+ m->object->purgable == VM_PURGABLE_NONVOLATILE ||
+ m->object->purgable == VM_PURGABLE_VOLATILE)) {
queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
m->throttled = TRUE;
vm_page_throttled_count++;
*/
static unsigned int
vm_page_verify_free_list(
+ queue_head_t *vm_page_queue,
unsigned int color,
vm_page_t look_for_page,
boolean_t expect_page)
found_page = FALSE;
npages = 0;
- prev_m = (vm_page_t) &vm_page_queue_free[color];
- queue_iterate(&vm_page_queue_free[color],
+ prev_m = (vm_page_t) vm_page_queue;
+ queue_iterate(vm_page_queue,
m,
vm_page_t,
pageq) {
if ( ! m->busy )
panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not busy\n",
color, npages, m);
- if ( (m->phys_page & vm_color_mask) != color)
+ if ( color != (unsigned int) -1 && (m->phys_page & vm_color_mask) != color)
panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u\n",
color, npages, m, m->phys_page & vm_color_mask, color);
++npages;
other_color++) {
if (other_color == color)
continue;
- vm_page_verify_free_list(other_color, look_for_page, FALSE);
+ vm_page_verify_free_list(&vm_page_queue_free[other_color],
+ other_color, look_for_page, FALSE);
}
+ if (color != (unsigned int) -1) {
+ vm_page_verify_free_list(&vm_lopage_queue_free,
+ (unsigned int) -1, look_for_page, FALSE);
+ }
+
panic("vm_page_verify_free_list(color=%u)\n", color);
}
if (!expect_page && found_page) {
static void
vm_page_verify_free_lists( void )
{
- unsigned int color, npages;
+ unsigned int color, npages, nlopages;
if (! vm_page_verify_free_lists_enabled)
return;
lck_mtx_lock(&vm_page_queue_free_lock);
for( color = 0; color < vm_colors; color++ ) {
- npages += vm_page_verify_free_list(color, VM_PAGE_NULL, FALSE);
+ npages += vm_page_verify_free_list(&vm_page_queue_free[color],
+ color, VM_PAGE_NULL, FALSE);
}
- if (npages != vm_page_free_count)
- panic("vm_page_verify_free_lists: npages %u free_count %d",
- npages, vm_page_free_count);
+ nlopages = vm_page_verify_free_list(&vm_lopage_queue_free,
+ (unsigned int) -1,
+ VM_PAGE_NULL, FALSE);
+ if (npages != vm_page_free_count || nlopages != vm_lopage_free_count)
+ panic("vm_page_verify_free_lists: "
+ "npages %u free_count %d nlopages %u lo_free_count %u",
+ npages, vm_page_free_count, nlopages, vm_lopage_free_count);
lck_mtx_unlock(&vm_page_queue_free_lock);
}
*/
RESET_STATE_OF_RUN();
- } else if (!npages & ((m->phys_page & pnum_mask) != 0)) {
+ } else if (!npages && ((m->phys_page & pnum_mask) != 0)) {
/*
* not aligned
*/
#endif
if (m1->free) {
- unsigned int color;
+ if ( m1->phys_page <= vm_lopage_poolend &&
+ m1->phys_page >= vm_lopage_poolstart) {
- color = m1->phys_page & vm_color_mask;
+ assert( flags & KMA_LOMEM );
#if MACH_ASSERT
- vm_page_verify_free_list(color, m1, TRUE);
+ vm_page_verify_free_list(&vm_lopage_queue_free,
+ (unsigned int) -1, m1, TRUE);
#endif
- queue_remove(&vm_page_queue_free[color],
- m1,
- vm_page_t,
- pageq);
- m1->pageq.next = NULL;
- m1->pageq.prev = NULL;
+ queue_remove(&vm_lopage_queue_free,
+ m1,
+ vm_page_t,
+ pageq);
+ vm_lopage_free_count--;
+
+#if MACH_ASSERT
+ vm_page_verify_free_list(&vm_lopage_queue_free,
+ (unsigned int) -1, VM_PAGE_NULL, FALSE);
+#endif
+ } else {
+
+ unsigned int color;
+
+ color = m1->phys_page & vm_color_mask;
#if MACH_ASSERT
- vm_page_verify_free_list(color, VM_PAGE_NULL, FALSE);
+ vm_page_verify_free_list(&vm_page_queue_free[color],
+ color, m1, TRUE);
#endif
+ queue_remove(&vm_page_queue_free[color],
+ m1,
+ vm_page_t,
+ pageq);
+ vm_page_free_count--;
+#if MACH_ASSERT
+ vm_page_verify_free_list(&vm_page_queue_free[color],
+ color, VM_PAGE_NULL, FALSE);
+#endif
+ }
+
+ m1->pageq.next = NULL;
+ m1->pageq.prev = NULL;
/*
* Clear the "free" bit so that this page
* does not get considered for another
*/
m1->free = FALSE;
assert(m1->busy);
-
- vm_page_free_count--;
}
}
/*
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+#if HIBERNATION
+
static vm_page_t hibernate_gobble_queue;
static void
count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative);
}
+#endif /* HIBERNATION */
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <mach_vm_debug.h>
/* Boot argument structure - passed into Mach kernel at boot time.
* "Revision" can be incremented for compatible changes
*/
-#define kBootArgsRevision 5
+#define kBootArgsRevision 6
#define kBootArgsVersion 1
/* Snapshot constants of previous revisions that are supported */
#define kBootArgsVersion1 1
#define kBootArgsRevision1_4 4
#define kBootArgsRevision1_5 5
+#define kBootArgsRevision1_6 6
#define kBootArgsEfiMode32 32
#define kBootArgsEfiMode64 64
uint8_t efiMode; /* 32 = 32-bit, 64 = 64-bit */
uint8_t __reserved1[3];
- uint32_t __reserved2[3];
+ uint32_t __reserved2[1];
+ uint32_t performanceDataStart; /* physical address of log */
+ uint32_t performanceDataSize;
uint64_t efiRuntimeServicesVirtualPageStart; /* virtual address of defragmented runtime pages */
uint32_t __reserved3[2];
/*
- * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
char v_pixelFormat[64];
unsigned long v_offset; /* offset into video memory to start at */
unsigned long v_length; /* length of video memory (0 for v_rowBytes * v_height) */
- long v_resv[ 2 ];
+ unsigned char v_rotate; /* Rotation: 0:normal, 1:right 90, 2:left 180, 3:left 90 */
+ unsigned char v_scale; /* Scale Factor for both X & Y */
+ char reserved1[2];
+ long reserved2;
};
typedef struct PE_Video PE_Video;
void mac_posixshm_label_init(struct pshminfo *pshm);
int mac_proc_check_debug(proc_t proc1, proc_t proc2);
int mac_proc_check_fork(proc_t proc);
+int mac_proc_check_suspend_resume(proc_t proc, int sr);
int mac_proc_check_get_task_name(kauth_cred_t cred, struct proc *p);
int mac_proc_check_get_task(kauth_cred_t cred, struct proc *p);
int mac_proc_check_getaudit(proc_t proc);
kauth_cred_t cred,
struct proc *proc
);
+/**
+ @brief Access control over pid_suspend and pid_resume
+ @param cred Subject credential
+ @param proc Subject process trying to run pid_suspend or pid_resume
+ @param sr Call is suspend (0) or resume (1)
+
+ Determine whether the subject identified is allowed to suspend or resume
+ other processes.
+
+ @return Return 0 if access is granted, otherwise an appropriate value for
+ errno should be returned.
+*/
+typedef int mpo_proc_check_suspend_resume_t(
+ kauth_cred_t cred,
+ struct proc *proc,
+ int sr
+);
/**
@brief Access control check for retrieving audit information
@param cred Subject credential
/*!
\struct mac_policy_ops
*/
+#define MAC_POLICY_OPS_VERSION 2 /* inc when new reserved slots are taken */
struct mac_policy_ops {
mpo_audit_check_postselect_t *mpo_audit_check_postselect;
mpo_audit_check_preselect_t *mpo_audit_check_preselect;
mpo_vnode_check_uipc_bind_t *mpo_vnode_check_uipc_bind;
mpo_vnode_check_uipc_connect_t *mpo_vnode_check_uipc_connect;
mac_proc_check_run_cs_invalid_t *mpo_proc_check_run_cs_invalid;
- mpo_reserved_hook_t *mpo_reserved4;
+ mpo_proc_check_suspend_resume_t *mpo_proc_check_suspend_resume;
mpo_reserved_hook_t *mpo_reserved5;
mpo_reserved_hook_t *mpo_reserved6;
mpo_reserved_hook_t *mpo_reserved7;
}
#endif /* LCTX */
-
void
mac_thread_userret(int code, int error, struct thread *thread)
{
if (mac_late)
MAC_PERFORM(thread_userret, code, error, thread);
}
+
+int
+mac_proc_check_suspend_resume(proc_t curp, int sr)
+{
+ kauth_cred_t cred;
+ int error;
+
+ if (!mac_proc_enforce ||
+ !mac_proc_check_enforce(curp, MAC_PROC_ENFORCE))
+ return (0);
+
+ cred = kauth_cred_proc_ref(curp);
+ MAC_CHECK(proc_check_suspend_resume, cred, curp, sr);
+ kauth_cred_unref(&cred);
+
+ return (error);
+}
{
return 0;
}
+int mac_proc_check_suspend_resume(void)
+{
+ return 0;
+}
int mac_set_enforce_proc(void)
{
return 0;