dev_t dev;
short retval;
-#if HFS_DIAGNOSTIC
- DBG_ASSERT(vcb != NULL);
- DBG_ASSERT(catInfo != NULL);
- DBG_ASSERT(vpp != NULL);
- DBG_ASSERT((forkType == kDirectory) || (forkType == kDataFork) || (forkType == kRsrcFork));
- if (catInfo->nodeData.cnd_type == kCatalogFolderNode) {
- DBG_ASSERT(forkType == kDirectory);
- } else {
- DBG_ASSERT(forkType != kDirectory);
- }
-#endif
-
- if ( ! ((forkType == kDirectory) || (forkType == kDataFork) || (forkType == kRsrcFork)))
- panic("Bad fork type");
- if (catInfo->nodeData.cnd_type == kCatalogFolderNode) {
- if (forkType != kDirectory)
- panic("non directory type");
- } else {
- if (forkType != kDataFork && forkType != kRsrcFork)
- panic("non fork type");
- }
-
hfsmp = VCBTOHFS(vcb);
mp = HFSTOVFS(hfsmp);
dev = hfsmp->hfs_raw_dev;
}
}
- /* Must malloc() here, since getnewvnode() can sleep */
MALLOC_ZONE(hp, struct hfsnode *, sizeof(struct hfsnode), M_HFSNODE, M_WAITOK);
bzero((caddr_t)hp, sizeof(struct hfsnode));
-
- /*
- * Set that this node is in the process of being allocated
- * Set it as soon as possible, so context switches well always hit upon it.
- * if this is set then wakeup() MUST be called on hp after the flag is cleared
- * DO NOT exit without clearing and waking up !!!!
- */
- hp->h_nodeflags |= IN_ALLOCATING; /* Mark this as being allocating */
+ hp->h_nodeflags |= IN_ALLOCATING;
lockinit(&hp->h_lock, PINOD, "hfsnode", 0, 0);
-
-
- /* getnewvnode() does a VREF() on the vnode */
- /* Allocate a new vnode. If unsuccesful, leave after freeing memory */
- if ((retval = getnewvnode(VT_HFS, mp, hfs_vnodeop_p, &vp))) {
- wakeup(hp); /* Shouldnt happen, but just to make sure */
- FREE_ZONE(hp, sizeof(struct hfsnode), M_HFSNODE);
- *vpp = NULL;
- return (retval);
- };
+ H_FORKTYPE(hp) = forkType;
+ rl_init(&hp->h_invalidranges);
/*
- * Set the essentials before locking it down
+ * There were several blocking points since we first
+ * checked the hash. Now that we're through blocking,
+ * check the hash again in case we're racing for the
+ * same hnode.
*/
- hp->h_vp = vp; /* Make HFSTOV work */
- vp->v_data = hp; /* Make VTOH work */
- H_FORKTYPE(hp) = forkType;
- rl_init(&hp->h_invalidranges);
- fm = NULL;
-
+ vp = hfs_vhashget(dev, catInfo->nodeData.cnd_nodeID, forkType);
+ if (vp != NULL) {
+ /* We lost the race, use the winner's vnode */
+ FREE_ZONE(hp, sizeof(struct hfsnode), M_HFSNODE);
+ *vpp = vp;
+ UBCINFOCHECK("hfs_vcreate", vp);
+ return (0);
+ }
+
/*
- * Lock the hfsnode and insert the hfsnode into the hash queue, also if meta exists
+ * Insert the hfsnode into the hash queue, also if meta exists
* add to sibling list and return the meta address
*/
+ fm = NULL;
if (SIBLING_FORKTYPE(forkType))
hfs_vhashins_sibling(dev, catInfo->nodeData.cnd_nodeID, hp, &fm);
else
hfs_vhashins(dev, catInfo->nodeData.cnd_nodeID, hp);
+ /* Allocate a new vnode. If unsuccesful, leave after freeing memory */
+ if ((retval = getnewvnode(VT_HFS, mp, hfs_vnodeop_p, &vp))) {
+ hfs_vhashrem(hp);
+ if (hp->h_nodeflags & IN_WANT) {
+ hp->h_nodeflags &= ~IN_WANT;
+ wakeup(hp);
+ }
+ FREE_ZONE(hp, sizeof(struct hfsnode), M_HFSNODE);
+ *vpp = NULL;
+ return (retval);
+ }
+ hp->h_vp = vp;
+ vp->v_data = hp;
+
+ hp->h_nodeflags &= ~IN_ALLOCATING;
+ if (hp->h_nodeflags & IN_WANT) {
+ hp->h_nodeflags &= ~IN_WANT;
+ wakeup((caddr_t)hp);
+ }
+
/*
* If needed allocate and init the object meta data:
*/
};
fm->h_usecount++;
-
/*
* Init the File Control Block.
*/
ubc_info_init(vp);
}
- /*
+ /*
* Initialize the vnode from the inode, check for aliases, sets the VROOT flag.
* Note that the underlying vnode may have changed.
*/
if ((retval = hfs_vinit(mp, hfs_specop_p, hfs_fifoop_p, &vp))) {
- wakeup((caddr_t)hp);
vput(vp);
*vpp = NULL;
return (retval);
}
- /*
- * Finish inode initialization now that aliasing has been resolved.
- */
- hp->h_meta->h_devvp = hfsmp->hfs_devvp;
- VREF(hp->h_meta->h_devvp);
+ /*
+ * Finish inode initialization now that aliasing has been resolved.
+ */
+ hp->h_meta->h_devvp = hfsmp->hfs_devvp;
+ VREF(hp->h_meta->h_devvp);
-#if HFS_DIAGNOSTIC
- hp->h_valid = HFS_VNODE_MAGIC;
-#endif
- hp->h_nodeflags &= ~IN_ALLOCATING; /* vnode is completely initialized */
-
- /* Wake up anybody waiting for us to finish..see hfs_vhash.c */
- wakeup((caddr_t)hp);
-
-#if HFS_DIAGNOSTIC
-
- /* Lets do some testing here */
- DBG_ASSERT(hp->h_meta);
- DBG_ASSERT(VTOH(vp)==hp);
- DBG_ASSERT(HTOV(hp)==vp);
- DBG_ASSERT(hp->h_meta->h_usecount>=1 && hp->h_meta->h_usecount<=2);
- if (catInfo->nodeData.cnd_type == kCatalogFolderNode) {
- DBG_ASSERT(vp->v_type == VDIR);
- DBG_ASSERT(H_FORKTYPE(VTOH(vp)) == kDirectory);
- }
-#endif // HFS_DIAGNOSTIC
-
-
*vpp = vp;
return 0;
-
}
void CopyCatalogToObjectMeta(struct hfsCatalogInfo *catalogInfo, struct vnode *vp, struct hfsfilemeta *fm)
UInt32 nodeID;
UInt8 forkType;
{
- struct proc *p = current_proc(); /* XXX */
+ struct proc *p = current_proc();
struct hfsnode *hp;
struct vnode *vp;
- DBG_ASSERT(forkType!=kUndefinedFork);
/*
* Go through the hash list
* If a vnode is in the process of being cleaned out or being
loop:
simple_lock(&hfs_vhash_slock);
for (hp = HFSNODEHASH(dev, nodeID)->lh_first; hp; hp = hp->h_hash.le_next) {
- /* The vnode might be in an incomplete state, so sleep until its ready */
if (hp->h_nodeflags & IN_ALLOCATING) {
+ /*
+ * vnode is being created. Wait for it to finish...
+ */
+ hp->h_nodeflags |= IN_WANT;
simple_unlock(&hfs_vhash_slock);
- tsleep((caddr_t)hp, PINOD, "hfs_vhashlookup", 0);
+ tsleep((caddr_t)hp, PINOD, "hfs_vhashget", 0);
goto loop;
- };
-
- DBG_ASSERT(hp->h_meta != NULL);
- if ((H_FILEID(hp) == nodeID) &&
- (H_DEV(hp) == dev) &&
- !(hp->h_meta->h_metaflags & IN_NOEXISTS)) {
- /* SER XXX kDefault of meta data (ksysfile) is not assumed here */
- if ( (forkType == kAnyFork) ||
- (H_FORKTYPE(hp) == forkType) ||
- ((forkType == kDefault) && ((H_FORKTYPE(hp) == kDirectory)
- || (H_FORKTYPE(hp) == kDataFork)))) {
- vp = HTOV(hp);
- simple_lock(&vp->v_interlock);
- simple_unlock(&hfs_vhash_slock);
- if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
- goto loop;
- return (vp);
- };
- };
- };
+ }
+ if ((H_FILEID(hp) != nodeID) || (H_DEV(hp) != dev) ||
+ (hp->h_meta->h_metaflags & IN_NOEXISTS))
+ continue;
+
+ /* SER XXX kDefault of meta data (ksysfile) is not assumed here */
+ if ( (forkType == kAnyFork) ||
+ (H_FORKTYPE(hp) == forkType) ||
+ ((forkType == kDefault) && ((H_FORKTYPE(hp) == kDirectory)
+ || (H_FORKTYPE(hp) == kDataFork)))) {
+ vp = HTOV(hp);
+ simple_lock(&vp->v_interlock);
+ simple_unlock(&hfs_vhash_slock);
+ if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
+ goto loop;
+ return (vp);
+ }
+ }
simple_unlock(&hfs_vhash_slock);
return (NULL);
}
struct hfsnode *thp;
struct hfsfilemeta *tfm;
- DBG_ASSERT(fm != NULL);
- DBG_ASSERT(hp != NULL);
- DBG_ASSERT(hp->h_meta == NULL);
- DBG_ASSERT(H_FORKTYPE(hp)==kDataFork || H_FORKTYPE(hp)==kRsrcFork);
-
tfm = NULL;
lockmgr(&hp->h_lock, LK_EXCLUSIVE, (struct slock *)0, current_proc());
-
/*
* Go through the hash list to see if a sibling exists
* If it does, store it to return
loop:
simple_lock(&hfs_vhash_slock);
- for (thp = ipp->lh_first; thp; thp = thp->h_hash.le_next) {
- if (thp->h_nodeflags & IN_ALLOCATING) { /* Its in the process of being allocated */
+ for (thp = ipp->lh_first; thp; thp = thp->h_hash.le_next) {
+ if (thp->h_nodeflags & IN_ALLOCATING) {
+ /*
+ * vnode is being created. Wait for it to finish...
+ */
+ thp->h_nodeflags |= IN_WANT;
simple_unlock(&hfs_vhash_slock);
- tsleep((caddr_t)thp, PINOD, "hfs_vhash_ins_meta", 0);
+ tsleep((caddr_t)thp, PINOD, "hfs_vhashins_sibling", 0);
goto loop;
- };
-
- DBG_ASSERT(thp->h_meta != NULL);
+ }
if ((H_FILEID(thp) == nodeID) && (H_DEV(thp) == dev)) {
tfm = hp->h_meta = thp->h_meta;
break;
- };
- };
+ }
+ }
/* Add to sibling list..if it can have them */
if (tfm && (H_FORKTYPE(hp)==kDataFork || H_FORKTYPE(hp)==kRsrcFork)) {
- DBG_ASSERT(tfm->h_siblinghead.cqh_first != NULL && tfm->h_siblinghead.cqh_last != NULL);
simple_lock(&tfm->h_siblinglock);
CIRCLEQ_INSERT_HEAD(&tfm->h_siblinghead, hp, h_sibling);
simple_unlock(&tfm->h_siblinglock);
simple_lock(&hfs_vhash_slock);
/* Test to see if there are siblings, should only apply to forks */
- if (hp->h_meta->h_siblinghead.cqh_first != NULL) {
+ if (hp->h_meta != NULL && hp->h_meta->h_siblinghead.cqh_first != NULL) {
simple_lock(&hp->h_meta->h_siblinglock);
CIRCLEQ_REMOVE(&hp->h_meta->h_siblinghead, hp, h_sibling);
simple_unlock(&hp->h_meta->h_siblinglock);
struct hfsnode *thp, *nextNode;
UInt32 newNodeID;
- DBG_ASSERT(hp != NULL);
- DBG_ASSERT(hp->h_meta != NULL);
-
- newNodeID = H_FILEID(hp);
-
- oldHeadIndex = HFSNODEHASH(H_DEV(hp), oldNodeID);
- newHeadIndex = HFSNODEHASH(H_DEV(hp), newNodeID);
+ newNodeID = H_FILEID(hp);
+ oldHeadIndex = HFSNODEHASH(H_DEV(hp), oldNodeID);
+ newHeadIndex = HFSNODEHASH(H_DEV(hp), newNodeID);
/* If it is moving to the same bucket...then we are done */
- if (oldHeadIndex == newHeadIndex)
+ if (oldHeadIndex == newHeadIndex)
return;
loop:
-
/*
* Go through the old hash list
* If there is a nodeid mismatch, or the nodeid doesnt match the current bucket
* allocated, wait for it to be finished and then try again
*/
simple_lock(&hfs_vhash_slock);
- for (nextNode = oldHeadIndex->lh_first; nextNode; ) {
- if (nextNode->h_nodeflags & IN_ALLOCATING) { /* Its in the process of being allocated */
+ for (nextNode = oldHeadIndex->lh_first; nextNode; ) {
+ if (nextNode->h_nodeflags & IN_ALLOCATING) {
+ /*
+ * vnode is being created. Wait for it to finish...
+ */
+ nextNode->h_nodeflags |= IN_WANT;
simple_unlock(&hfs_vhash_slock);
- tsleep((caddr_t)nextNode, PINOD, "hfs_vhashmove", 0);
+ tsleep((caddr_t)nextNode, PINOD, "hfs_vhashmove", 0);
goto loop;
- };
+ }
- DBG_ASSERT(nextNode->h_meta != NULL);
thp = nextNode;
- nextNode = nextNode->h_hash.le_next;
+ nextNode = nextNode->h_hash.le_next;
if (newNodeID == H_FILEID(thp)) {
LIST_REMOVE(thp, h_hash);
- thp->h_hash.le_next = NULL;
- thp->h_hash.le_next = NULL;
- LIST_INSERT_HEAD(newHeadIndex, thp, h_hash);
- };
- };
+ thp->h_hash.le_next = NULL;
+ thp->h_hash.le_next = NULL;
+ LIST_INSERT_HEAD(newHeadIndex, thp, h_hash);
+ }
+ }
simple_unlock(&hfs_vhash_slock);
}
thread_t self = current_thread();
thread_act_t th_act_self = current_act();
-
/*
* Remove proc from allproc queue and from pidhash chain.
* Need to do this before we do anything that can block.
ut = get_bsdthread_info(th_act_self);
ut->uu_sig = 0;
untimeout(realitexpire, (caddr_t)p);
-
}
void
thread_act_t th_act_self = current_act();
struct task *task = p->task;
register int i,s;
- struct uthread *ut;
boolean_t funnel_state;
/* This can happen if thread_terminate of the single thread
}
}
-
/*
* Save exit status and final rusage info, adding in child rusage
* info and self times.
timeradd(&st,&p->p_ru->ru_stime,&p->p_ru->ru_stime);
}
-
ruadd(p->p_ru, &p->p_stats->p_cru);
/*
struct wait4_args *uap;
int *retval;
{
-
return (wait1(p, uap, retval, 0));
}
int
wait1continue(result)
{
- void *vt;
- thread_act_t thread;
- struct uthread *ut;
- int *retval;
- struct proc *p;
-
- p = current_proc();
- p->p_flag &= ~P_WAITING;
+ void *vt;
+ thread_act_t thread;
+ int *retval;
+ struct proc *p;
- if (result != 0) {
- return(result);
- }
+ if (result)
+ return(result);
- thread = current_act();
- ut = get_bsdthread_info(thread);
+ p = current_proc();
+ thread = current_act();
vt = get_bsduthreadarg(thread);
retval = get_bsduthreadrval(thread);
wait1((struct proc *)p, (struct wait4_args *)vt, retval, 0);
register struct proc *p, *t;
int status, error;
-
-#if 0
- /* since we are funneled we don't need to do this atomically, yet */
- if (q->p_flag & P_WAITING) {
- return(EINVAL);
- }
- q->p_flag |= P_WAITING; /* only allow single thread to wait() */
-#endif
-
+retry:
if (uap->pid == 0)
uap->pid = -q->p_pgid;
p->p_pgid != -(uap->pid))
continue;
nfound++;
+ if (p->p_flag & P_WAITING) {
+ (void)tsleep(&p->p_stat, PWAIT, "waitcoll", 0);
+ goto loop;
+ }
+ p->p_flag |= P_WAITING; /* only allow single thread to wait() */
+
if (p->p_stat == SZOMB) {
retval[0] = p->p_pid;
#if COMPAT_43
if (error = copyout((caddr_t)&status,
(caddr_t)uap->status,
sizeof(status))) {
- q->p_flag &= ~P_WAITING;
+ p->p_flag &= ~P_WAITING;
+ wakeup(&p->p_stat);
return (error);
}
}
(error = copyout((caddr_t)p->p_ru,
(caddr_t)uap->rusage,
sizeof (struct rusage)))) {
- q->p_flag &= ~P_WAITING;
+ p->p_flag &= ~P_WAITING;
+ wakeup(&p->p_stat);
return (error);
}
/*
proc_reparent(p, t);
psignal(t, SIGCHLD);
wakeup((caddr_t)t);
- q->p_flag &= ~P_WAITING;
+ p->p_flag &= ~P_WAITING;
+ wakeup(&p->p_stat);
return (0);
}
p->p_xstat = 0;
leavepgrp(p);
LIST_REMOVE(p, p_list); /* off zombproc */
LIST_REMOVE(p, p_sibling);
+ p->p_flag &= ~P_WAITING;
FREE_ZONE(p, sizeof *p, M_PROC);
nprocs--;
- q->p_flag &= ~P_WAITING;
+ wakeup(&p->p_stat);
return (0);
}
if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 &&
sizeof(status));
} else
error = 0;
- q->p_flag &= ~P_WAITING;
+ p->p_flag &= ~P_WAITING;
+ wakeup(&p->p_stat);
return (error);
}
+ p->p_flag &= ~P_WAITING;
+ wakeup(&p->p_stat);
}
- if (nfound == 0) {
- q->p_flag &= ~P_WAITING;
+ if (nfound == 0)
return (ECHILD);
- }
+
if (uap->options & WNOHANG) {
retval[0] = 0;
- q->p_flag &= ~P_WAITING;
return (0);
}
- if (error = tsleep0((caddr_t)q, PWAIT | PCATCH, "wait", 0, wait1continue)) {
- q->p_flag &= ~P_WAITING;
+ if (error = tsleep0((caddr_t)q, PWAIT | PCATCH, "wait", 0, wait1continue))
return (error);
- }
+
goto loop;
}
child->p_pptr = parent;
}
-kern_return_t
-init_process(void)
/*
* Make the current process an "init" process, meaning
* that it doesn't have a parent, and that it won't be
* gunned down by kill(-1, 0).
*/
+kern_return_t
+init_process(void)
{
register struct proc *p = current_proc();
/*NOTREACHED*/
}
}
+
/*
* Exit: deallocate address space and other resources, change proc state
* to zombie, and unlink proc from allproc and parent's lists. Save exit
vproc_exit(p);
}
-
void
vproc_exit(struct proc *p)
{
thread_act_t th_act_self = current_act();
struct task *task = p->task;
register int i,s;
- struct uthread *ut;
boolean_t funnel_state;
MALLOC_ZONE(p->p_ru, struct rusage *,
vrele(p->p_tracep);
#endif
-
q = p->p_children.lh_first;
if (q) /* only need this if any child is S_ZOMB */
wakeup((caddr_t) initproc);
}
}
-
/*
* Save exit status and final rusage info, adding in child rusage
* info and self times.
/* and now wakeup the parent */
wakeup((caddr_t)p->p_pptr);
-
}
-
thread_act_t *cur_act;
int mask;
kern_return_t kret;
+ int sw_funnel = 0;
if ((u_int)signum >= NSIG || signum == 0)
panic("psignal signal number");
ram_printf(3);
}
#endif /* SIGNAL_DEBUG */
+
+ if (thread_funnel_get() == (funnel_t *)network_flock) {
+ sw_funnel = 1;
+ thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
+ }
/*
* We will need the task pointer later. Grab it now to
* check for a zombie process. Also don't send signals
* to kernel internal tasks.
*/
- if (((sig_task = p->task) == TASK_NULL) || is_kerneltask(sig_task))
+ if (((sig_task = p->task) == TASK_NULL) || is_kerneltask(sig_task)) {
+ if (sw_funnel)
+ thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
return;
+ }
/*
* do not send signals to the process that has the thread
* doing a reboot(). Not doing so will mark that thread aborted
* and can cause IO failures wich will cause data loss.
*/
- if (ISSET(p->p_flag, P_REBOOT))
+ if (ISSET(p->p_flag, P_REBOOT)) {
+ if (sw_funnel)
+ thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
return;
+ }
/*
* if the traced process is blocked waiting for
thread_call_func((thread_call_func_t)psignal_pend, p,
FALSE);
}
+ if (sw_funnel)
+ thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
return;
}
psigout:
if (withlock)
signal_unlock(p);
+ if (sw_funnel)
+ thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
}
__inline__ void
};
+/* Obsolete types */
#define DLIL_DESC_RAW 1
#define DLIL_DESC_802_2 2
#define DLIL_DESC_802_2_SNAP 3
+/*
+ * DLIL_DESC_RAW - obsolete type, data in variants.bitmask or native_type
+ * if variants.bitmask.proto_id_length, native_type in host
+ * byte order.
+ * DLIL_DESC_802_2 - obsolete, data in variants.desc_802_2
+ * DLIL_DESC_802_2_SNAP - obsolete, data in variants.desc_802_2_SNAP
+ * protocol field in host byte order
+ */
+
+/* Ehernet specific types */
+#define DLIL_DESC_ETYPE2 4
+#define DLIL_DESC_SAP 5
+#define DLIL_DESC_SNAP 6
+/*
+ * DLIL_DESC_ETYPE2 - native_type must point to 2 byte ethernet raw protocol,
+ * variants.native_type_length must be set to 2
+ * DLIL_DESC_SAP - native_type must point to 3 byte SAP protocol
+ * variants.native_type_length must be set to 3
+ * DLIL_DESC_SNAP - native_type must point to 5 byte SNAP protocol
+ * variants.native_type_length must be set to 5
+ *
+ * All protocols must be in Network byte order.
+ *
+ * Future interface families may define more protocol types they know about.
+ * The type implies the offset and context of the protocol data at native_type.
+ * The length of the protocol data specified at native_type must be set in
+ * variants.native_type_length.
+ */
struct dlil_demux_desc {
TAILQ_ENTRY(dlil_demux_desc) next;
- int type;
-
- u_char *native_type;
+
+ int type;
+ u_char *native_type;
+
union {
- struct {
- u_long proto_id_length; /* IN LONGWORDS!!! */
- u_char *proto_id;
- u_char *proto_id_mask;
-
- } bitmask;
-
- struct {
- u_char dsap;
- u_char ssap;
- u_char control_code;
- u_char pad;
- } desc_802_2;
-
- struct {
- u_char dsap;
- u_char ssap;
- u_char control_code;
- u_char org[3];
- u_short protocol_type;
- } desc_802_2_SNAP;
+ /* Structs in this union are obsolete. They exist for binary compatability only */
+ /* Only the native_type_length is used */
+ struct {
+ u_long proto_id_length; /* IN LONGWORDS!!! */
+ u_char *proto_id; /* No longer supported by Ethernet family */
+ u_char *proto_id_mask;
+ } bitmask;
+
+ struct {
+ u_char dsap;
+ u_char ssap;
+ u_char control_code;
+ u_char pad;
+ } desc_802_2;
+
+ struct {
+ u_char dsap; /* Ignored, assumed to be 0xAA */
+ u_char ssap; /* Ignored, assumed to be 0xAA */
+ u_char control_code; /* Ignored, assumed to be 0x03 */
+ u_char org[3];
+ u_short protocol_type; /* In host byte order */
+ } desc_802_2_SNAP;
+
+ /* Length of data pointed to by native_type, must be set correctly */
+ u_int32_t native_type_length;
} variants;
};
short unit_number;
int default_proto; /* 0 or 1 */
dl_input_func input;
- dl_pre_output_func pre_output;
+ dl_pre_output_func pre_output;
dl_event_func event;
dl_offer_func offer;
dl_ioctl_func ioctl;
#include <net/if_llc.h>
#include <net/if_dl.h>
#include <net/if_types.h>
-#include <net/ndrv.h>
#include <netinet/if_ether.h>
#include <sys/socketvar.h>
desc.variants.desc_802_2_SNAP.org[1] = 0x00;
desc.variants.desc_802_2_SNAP.org[2] = 0x07;
desc.variants.desc_802_2_SNAP.protocol_type = 0x809B;
- desc.native_type = (char *) &native;
+ desc.native_type = NULL;
TAILQ_INSERT_TAIL(®.demux_desc_head, &desc, next);
reg.interface_family = ifp->if_family;
reg.unit_number = ifp->if_unit;
#include <net/if_llc.h>
#include <net/if_dl.h>
#include <net/if_types.h>
-#include <net/ndrv.h>
#include <netinet/if_ether.h>
/*
#define IFP2AC(IFP) ((struct arpcom *)IFP)
-u_char etherbroadcastaddr[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
-
-
-#define DB_HEADER_SIZE 20
struct en_desc {
- short total_len;
- u_short ethertype;
- u_long dl_tag;
- struct ifnet *ifp;
- struct if_proto *proto;
- u_long proto_id_length;
- u_long proto_id_data[8]; /* probably less - proto-id and bitmasks */
+ u_int16_t type; /* Type of protocol stored in data */
+ struct if_proto *proto; /* Protocol structure */
+ u_long data[2]; /* Protocol data */
};
-
-#define LITMUS_SIZE 16
-#define ETHER_DESC_BLK_SIZE 50
+
+#define ETHER_DESC_BLK_SIZE (10)
#define MAX_INTERFACES 50
/*
*/
struct ether_desc_blk_str {
- u_long n_blocks;
- u_long *block_ptr;
-};
-
-struct dl_es_at_entry
-{
- struct ifnet *ifp;
- u_long dl_tag;
- int ref_count;
+ u_long n_max_used;
+ u_long n_count;
+ struct en_desc *block_ptr;
};
static struct ether_desc_blk_str ether_desc_blk[MAX_INTERFACES];
-static u_long litmus_mask[LITMUS_SIZE];
-static u_long litmus_length = 0;
-
-
-/*
- * Temp static for protocol registration XXX
- */
-
-#define MAX_EN_COUNT 30
-
-static struct dl_es_at_entry en_at_array[MAX_EN_COUNT];
-
-/*
- * This could be done below in-line with heavy casting, but the pointer arithmetic is
- * prone to error.
- */
-
-static
-int desc_in_bounds(block, current_ptr, offset_length)
- u_int block;
- char *current_ptr;
- u_long offset_length;
-{
- u_long end_of_block;
- u_long current_ptr_tmp;
-
- current_ptr_tmp = (u_long) current_ptr;
- end_of_block = (u_long) ether_desc_blk[block].block_ptr;
- end_of_block += (ETHER_DESC_BLK_SIZE * ether_desc_blk[block].n_blocks);
- if ((current_ptr_tmp + offset_length) < end_of_block)
- return 1;
- else
- return 0;
-}
/*
* Release all descriptor entries owned by this dl_tag (there may be several).
- * Setting the dl_tag to 0 releases the entry. Eventually we should compact-out
+ * Setting the type to 0 releases the entry. Eventually we should compact-out
* the unused entries.
*/
static
int ether_del_proto(struct if_proto *proto, u_long dl_tag)
{
- char *current_ptr = (char *) ether_desc_blk[proto->ifp->family_cookie].block_ptr;
- struct en_desc *ed;
- int i;
+ struct en_desc* ed = ether_desc_blk[proto->ifp->family_cookie].block_ptr;
+ u_long current = 0;
int found = 0;
-
- ed = (struct en_desc *) current_ptr;
-
- while(ed->total_len) {
- if (ed->dl_tag == dl_tag) {
- found = 1;
- ed->dl_tag = 0;
- }
-
- current_ptr += ed->total_len;
- ed = (struct en_desc *) current_ptr;
+
+ for (current = ether_desc_blk[proto->ifp->family_cookie].n_max_used;
+ current > 0; current--) {
+ if (ed[current - 1].proto == proto) {
+ found = 1;
+ ed[current - 1].type = 0;
+
+ if (current == ether_desc_blk[proto->ifp->family_cookie].n_max_used) {
+ ether_desc_blk[proto->ifp->family_cookie].n_max_used--;
+ }
+ }
}
+
+ return found;
}
-static
-int ether_add_proto(struct ddesc_head_str *desc_head, struct if_proto *proto, u_long dl_tag)
+static int
+ether_add_proto(struct ddesc_head_str *desc_head, struct if_proto *proto, u_long dl_tag)
{
char *current_ptr;
struct dlil_demux_desc *desc;
- u_long id_length; /* IN LONGWORDS!!! */
struct en_desc *ed;
+ struct en_desc *last;
u_long *bitmask;
u_long *proto_id;
- int i;
+ u_long i;
short total_length;
u_long block_count;
u_long *tmp;
- TAILQ_FOREACH(desc, desc_head, next) {
- switch (desc->type)
- {
- case DLIL_DESC_RAW:
- id_length = desc->variants.bitmask.proto_id_length;
- break;
-
- case DLIL_DESC_802_2:
- id_length = 1;
- break;
-
- case DLIL_DESC_802_2_SNAP:
- id_length = 2;
- break;
-
- default:
- return EINVAL;
- }
-
-restart:
- block_count = ether_desc_blk[proto->ifp->family_cookie].n_blocks;
- current_ptr = (char *) ether_desc_blk[proto->ifp->family_cookie].block_ptr;
- ed = (struct en_desc *) current_ptr;
- total_length = ((id_length << 2) * 2) + DB_HEADER_SIZE;
-
- while ((ed->total_len) && (desc_in_bounds(proto->ifp->family_cookie,
- current_ptr, total_length))) {
- if ((ed->dl_tag == 0) && (total_length <= ed->total_len))
- break;
- else
- current_ptr += *(short *)current_ptr;
-
- ed = (struct en_desc *) current_ptr;
- }
-
- if (!desc_in_bounds(proto->ifp->family_cookie, current_ptr, total_length)) {
-
- tmp = _MALLOC((ETHER_DESC_BLK_SIZE * (block_count + 1)),
- M_IFADDR, M_WAITOK);
- if (tmp == 0) {
- /*
- * Remove any previous descriptors set in the call.
- */
- ether_del_proto(proto, dl_tag);
- return ENOMEM;
- }
-
- bzero(tmp, ETHER_DESC_BLK_SIZE * (block_count + 1));
- bcopy(ether_desc_blk[proto->ifp->family_cookie].block_ptr,
- tmp, (ETHER_DESC_BLK_SIZE * block_count));
- FREE(ether_desc_blk[proto->ifp->family_cookie].block_ptr, M_IFADDR);
- ether_desc_blk[proto->ifp->family_cookie].n_blocks = block_count + 1;
- ether_desc_blk[proto->ifp->family_cookie].block_ptr = tmp;
- goto restart;
- }
-
- if (ed->total_len == 0)
- ed->total_len = total_length;
- ed->ethertype = *((u_short *) desc->native_type);
-
- ed->dl_tag = dl_tag;
- ed->proto = proto;
- ed->proto_id_length = id_length;
- ed->ifp = proto->ifp;
-
- switch (desc->type)
- {
- case DLIL_DESC_RAW:
- bcopy(desc->variants.bitmask.proto_id, &ed->proto_id_data[0], (id_length << 2) );
- bcopy(desc->variants.bitmask.proto_id_mask, &ed->proto_id_data[id_length],
- (id_length << 2));
- break;
-
- case DLIL_DESC_802_2:
- ed->proto_id_data[0] = 0;
- bcopy(&desc->variants.desc_802_2, &ed->proto_id_data[0], 3);
- ed->proto_id_data[1] = 0xffffff00;
- break;
-
- case DLIL_DESC_802_2_SNAP:
- /* XXX Add verification of fixed values here */
-
- ed->proto_id_data[0] = 0;
- ed->proto_id_data[1] = 0;
- bcopy(&desc->variants.desc_802_2_SNAP, &ed->proto_id_data[0], 8);
- ed->proto_id_data[2] = 0xffffffff;
- ed->proto_id_data[3] = 0xffffffff;;
- break;
- }
-
- if (id_length) {
- proto_id = (u_long *) &ed->proto_id_data[0];
- bitmask = (u_long *) &ed->proto_id_data[id_length];
- for (i=0; i < (id_length); i++) {
- litmus_mask[i] &= bitmask[i];
- litmus_mask[i] &= proto_id[i];
- }
- if (id_length > litmus_length)
- litmus_length = id_length;
- }
- }
-
- return 0;
+ TAILQ_FOREACH(desc, desc_head, next) {
+ switch (desc->type) {
+ /* These types are supported */
+ /* Top three are preferred */
+ case DLIL_DESC_ETYPE2:
+ if (desc->variants.native_type_length != 2)
+ return EINVAL;
+ break;
+
+ case DLIL_DESC_SAP:
+ if (desc->variants.native_type_length != 3)
+ return EINVAL;
+ break;
+
+ case DLIL_DESC_SNAP:
+ if (desc->variants.native_type_length != 5)
+ return EINVAL;
+ break;
+
+ case DLIL_DESC_802_2:
+ case DLIL_DESC_802_2_SNAP:
+ break;
+
+ case DLIL_DESC_RAW:
+ if (desc->variants.bitmask.proto_id_length == 0)
+ break;
+ /* else fall through, bitmask variant not supported */
+
+ default:
+ ether_del_proto(proto, dl_tag);
+ return EINVAL;
+ }
+
+ restart:
+ ed = ether_desc_blk[proto->ifp->family_cookie].block_ptr;
+
+ /* Find a free entry */
+ for (i = 0; i < ether_desc_blk[proto->ifp->family_cookie].n_count; i++) {
+ if (ed[i].type == 0) {
+ break;
+ }
+ }
+
+ if (i >= ether_desc_blk[proto->ifp->family_cookie].n_count) {
+ u_long new_count = ETHER_DESC_BLK_SIZE +
+ ether_desc_blk[proto->ifp->family_cookie].n_count;
+ tmp = _MALLOC((new_count * (sizeof(*ed))), M_IFADDR, M_WAITOK);
+ if (tmp == 0) {
+ /*
+ * Remove any previous descriptors set in the call.
+ */
+ ether_del_proto(proto, dl_tag);
+ return ENOMEM;
+ }
+
+ bzero(tmp, new_count * sizeof(*ed));
+ bcopy(ether_desc_blk[proto->ifp->family_cookie].block_ptr,
+ tmp, ether_desc_blk[proto->ifp->family_cookie].n_count * sizeof(*ed));
+ FREE(ether_desc_blk[proto->ifp->family_cookie].block_ptr, M_IFADDR);
+ ether_desc_blk[proto->ifp->family_cookie].n_count = new_count;
+ ether_desc_blk[proto->ifp->family_cookie].block_ptr = (struct en_desc*)tmp;
+ }
+
+ /* Bump n_max_used if appropriate */
+ if (i + 1 > ether_desc_blk[proto->ifp->family_cookie].n_max_used) {
+ ether_desc_blk[proto->ifp->family_cookie].n_max_used = i + 1;
+ }
+
+ ed[i].proto = proto;
+ ed[i].data[0] = 0;
+ ed[i].data[1] = 0;
+
+ switch (desc->type) {
+ case DLIL_DESC_RAW:
+ /* 2 byte ethernet raw protocol type is at native_type */
+ /* protocol is not in network byte order */
+ ed[i].type = DLIL_DESC_ETYPE2;
+ ed[i].data[0] = htons(*(u_int16_t*)desc->native_type);
+ break;
+
+ case DLIL_DESC_ETYPE2:
+ /* 2 byte ethernet raw protocol type is at native_type */
+ /* prtocol must be in network byte order */
+ ed[i].type = DLIL_DESC_ETYPE2;
+ ed[i].data[0] = *(u_int16_t*)desc->native_type;
+ break;
+
+ case DLIL_DESC_802_2:
+ ed[i].type = DLIL_DESC_SAP;
+ ed[i].data[0] = *(u_int32_t*)&desc->variants.desc_802_2;
+ ed[i].data[0] &= htonl(0xFFFFFF00);
+ break;
+
+ case DLIL_DESC_SAP:
+ ed[i].type = DLIL_DESC_SAP;
+ bcopy(desc->native_type, &ed[i].data[0], 3);
+ break;
+
+ case DLIL_DESC_802_2_SNAP:
+ ed[i].type = DLIL_DESC_SNAP;
+ desc->variants.desc_802_2_SNAP.protocol_type =
+ htons(desc->variants.desc_802_2_SNAP.protocol_type);
+ bcopy(&desc->variants.desc_802_2_SNAP, &ed[i].data[0], 8);
+ ed[i].data[0] &= htonl(0x000000FF);
+ desc->variants.desc_802_2_SNAP.protocol_type =
+ ntohs(desc->variants.desc_802_2_SNAP.protocol_type);
+ break;
+
+ case DLIL_DESC_SNAP: {
+ u_int8_t* pDest = ((u_int8_t*)&ed[i].data[0]) + 3;
+ ed[i].type = DLIL_DESC_SNAP;
+ bcopy(&desc->native_type, pDest, 5);
+ }
+ break;
+ }
+ }
+
+ return 0;
}
}
-
-
-
int ether_demux(ifp, m, frame_header, proto)
struct ifnet *ifp;
struct mbuf *m;
{
register struct ether_header *eh = (struct ether_header *)frame_header;
- u_short ether_type;
- char *current_ptr = (char *) ether_desc_blk[ifp->family_cookie].block_ptr;
- struct dlil_demux_desc *desc;
- register u_long temp;
- u_long *data;
- register struct if_proto *ifproto;
- u_long i;
- struct en_desc *ed;
-
-
+ u_short ether_type = eh->ether_type;
+ u_int16_t type;
+ u_int8_t *data;
+ u_long i = 0;
+ u_long max = ether_desc_blk[ifp->family_cookie].n_max_used;
+ struct en_desc *ed = ether_desc_blk[ifp->family_cookie].block_ptr;
+ u_int32_t extProto1 = 0;
+ u_int32_t extProto2 = 0;
+
if (eh->ether_dhost[0] & 1) {
- if (bcmp((caddr_t)etherbroadcastaddr, (caddr_t)eh->ether_dhost,
- sizeof(etherbroadcastaddr)) == 0)
- m->m_flags |= M_BCAST;
- else
- m->m_flags |= M_MCAST;
+ /* Check for broadcast */
+ if (*(u_int32_t*)eh->ether_dhost == 0xFFFFFFFF &&
+ *(u_int16_t*)(eh->ether_dhost + sizeof(u_int32_t)) == 0xFFFF)
+ m->m_flags |= M_BCAST;
+ else
+ m->m_flags |= M_MCAST;
}
-
- ether_type = ntohs(eh->ether_type);
-
+
+ data = mtod(m, u_int8_t*);
+
+ /*
+ * Determine the packet's protocol type and stuff the protocol into
+ * longs for quick compares.
+ */
+
+ if (ntohs(ether_type) < 1500) {
+ extProto1 = *(u_int32_t*)data;
+
+ // SAP or SNAP
+ if ((extProto1 & htonl(0xFFFFFF00)) == htonl(0xAAAA0300)) {
+ // SNAP
+ type = DLIL_DESC_SNAP;
+ extProto2 = *(u_int32_t*)(data + sizeof(u_int32_t));
+ extProto1 &= htonl(0x000000FF);
+ } else {
+ type = DLIL_DESC_SAP;
+ extProto1 &= htonl(0xFFFFFF00);
+ }
+ } else {
+ type = DLIL_DESC_ETYPE2;
+ }
+
/*
* Search through the connected protocols for a match.
*/
-
-
- data = mtod(m, u_long *);
- ed = (struct en_desc *) current_ptr;
- while (desc_in_bounds(ifp->family_cookie, current_ptr, DB_HEADER_SIZE)) {
- if (ed->total_len == 0)
- break;
-
- if ((ed->dl_tag != 0) && (ed->ifp == ifp) &&
- ((ed->ethertype == ntohs(eh->ether_type)) || (ed->ethertype == 0))) {
- if (ed->proto_id_length) {
- for (i=0; i < (ed->proto_id_length); i++) {
- temp = ntohs(data[i]) & ed->proto_id_data[ed->proto_id_length + i];
- if ((temp ^ ed->proto_id_data[i]))
- break;
- }
-
- if (i >= (ed->proto_id_length)) {
- *proto = ed->proto;
- return 0;
- }
- }
- else {
- *proto = ed->proto;
- return 0;
- }
- }
- current_ptr += ed->total_len;
- ed = (struct en_desc *) current_ptr;
+
+ switch (type) {
+ case DLIL_DESC_ETYPE2:
+ for (i = 0; i < max; i++) {
+ if ((ed[i].type == type) && (ed[i].data[0] == ether_type)) {
+ *proto = ed[i].proto;
+ return 0;
+ }
+ }
+ break;
+
+ case DLIL_DESC_SAP:
+ for (i = 0; i < max; i++) {
+ if ((ed[i].type == type) && (ed[i].data[0] == extProto1)) {
+ *proto = ed[i].proto;
+ return 0;
+ }
+ }
+ break;
+
+ case DLIL_DESC_SNAP:
+ for (i = 0; i < max; i++) {
+ if ((ed[i].type == type) && (ed[i].data[0] == extProto1) &&
+ (ed[i].data[1] == extProto2)) {
+ *proto = ed[i].proto;
+ return 0;
+ }
+ }
+ break;
}
-
-/*
- kprintf("ether_demux - No match for <%x><%x><%x><%x><%x><%x><%x<%x>\n",
- eh->ether_type,data[0], data[1], data[2], data[3], data[4],data[5],data[6]);
-*/
-
+
return ENOENT;
}
if ((ifp->if_flags & IFF_SIMPLEX) &&
((*m)->m_flags & M_LOOP)) {
if (lo_dlt == 0)
- dlil_find_dltag(APPLE_IF_FAM_LOOPBACK, 0, PF_INET, &lo_dlt);
+ dlil_find_dltag(APPLE_IF_FAM_LOOPBACK, 0, PF_INET, &lo_dlt);
if (lo_dlt) {
- if ((*m)->m_flags & M_BCAST) {
- struct mbuf *n = m_copy(*m, 0, (int)M_COPYALL);
- if (n != NULL)
- dlil_output(lo_dlt, n, 0, ndest, 0);
- }
- else
- {
- if (bcmp(edst, ac->ac_enaddr, ETHER_ADDR_LEN) == 0) {
- dlil_output(lo_dlt, *m, 0, ndest, 0);
- return EJUSTRETURN;
- }
- }
+ if ((*m)->m_flags & M_BCAST) {
+ struct mbuf *n = m_copy(*m, 0, (int)M_COPYALL);
+ if (n != NULL)
+ dlil_output(lo_dlt, n, 0, ndest, 0);
+ }
+ else
+ {
+ if (bcmp(edst, ac->ac_enaddr, ETHER_ADDR_LEN) == 0) {
+ dlil_output(lo_dlt, *m, 0, ndest, 0);
+ return EJUSTRETURN;
+ }
+ }
}
}
-
-
+
+
/*
* Add local net header. If no space in first mbuf,
* allocate another.
ifp->if_event = 0;
for (i=0; i < MAX_INTERFACES; i++)
- if (ether_desc_blk[i].n_blocks == 0)
- break;
+ if (ether_desc_blk[i].n_count == 0)
+ break;
if (i == MAX_INTERFACES)
- return ENOMEM;
+ return ENOMEM;
- ether_desc_blk[i].block_ptr = _MALLOC(ETHER_DESC_BLK_SIZE, M_IFADDR, M_WAITOK);
+ ether_desc_blk[i].block_ptr = _MALLOC(ETHER_DESC_BLK_SIZE * sizeof(struct en_desc),
+ M_IFADDR, M_WAITOK);
if (ether_desc_blk[i].block_ptr == 0)
- return ENOMEM;
+ return ENOMEM;
- ether_desc_blk[i].n_blocks = 1;
- bzero(ether_desc_blk[i].block_ptr, ETHER_DESC_BLK_SIZE);
+ ether_desc_blk[i].n_count = ETHER_DESC_BLK_SIZE;
+ bzero(ether_desc_blk[i].block_ptr, ETHER_DESC_BLK_SIZE * sizeof(struct en_desc));
ifp->family_cookie = i;
int ether_del_if(struct ifnet *ifp)
{
if ((ifp->family_cookie < MAX_INTERFACES) &&
- (ether_desc_blk[ifp->family_cookie].n_blocks)) {
- FREE(ether_desc_blk[ifp->family_cookie].block_ptr, M_IFADDR);
- ether_desc_blk[ifp->family_cookie].n_blocks = 0;
- return 0;
+ (ether_desc_blk[ifp->family_cookie].n_count))
+ {
+ FREE(ether_desc_blk[ifp->family_cookie].block_ptr, M_IFADDR);
+ ether_desc_blk[ifp->family_cookie].block_ptr = NULL;
+ ether_desc_blk[ifp->family_cookie].n_count = 0;
+ ether_desc_blk[ifp->family_cookie].n_max_used = 0;
+ return 0;
}
else
- return ENOENT;
+ return ENOENT;
}
u_char *e_addr;
- switch (command)
- {
- case SIOCRSLVMULTI:
- switch(rsreq->sa->sa_family)
- {
- case AF_UNSPEC:
- /* AppleTalk uses AF_UNSPEC for multicast registration.
- * No mapping needed. Just check that it's a valid MC address.
- */
- e_addr = &rsreq->sa->sa_data[0];
- if ((e_addr[0] & 1) != 1)
- return EADDRNOTAVAIL;
- *rsreq->llsa = 0;
- return EJUSTRETURN;
-
-
- case AF_LINK:
- /*
- * No mapping needed. Just check that it's a valid MC address.
- */
- sdl = (struct sockaddr_dl *)rsreq->sa;
- e_addr = LLADDR(sdl);
- if ((e_addr[0] & 1) != 1)
- return EADDRNOTAVAIL;
- *rsreq->llsa = 0;
- return EJUSTRETURN;
-
- default:
- return EAFNOSUPPORT;
- }
-
- default:
- return EOPNOTSUPP;
+ switch (command) {
+ case SIOCRSLVMULTI:
+ switch(rsreq->sa->sa_family) {
+ case AF_UNSPEC:
+ /* AppleTalk uses AF_UNSPEC for multicast registration.
+ * No mapping needed. Just check that it's a valid MC address.
+ */
+ e_addr = &rsreq->sa->sa_data[0];
+ if ((e_addr[0] & 1) != 1)
+ return EADDRNOTAVAIL;
+ *rsreq->llsa = 0;
+ return EJUSTRETURN;
+
+
+ case AF_LINK:
+ /*
+ * No mapping needed. Just check that it's a valid MC address.
+ */
+ sdl = (struct sockaddr_dl *)rsreq->sa;
+ e_addr = LLADDR(sdl);
+ if ((e_addr[0] & 1) != 1)
+ return EADDRNOTAVAIL;
+ *rsreq->llsa = 0;
+ return EJUSTRETURN;
+
+ default:
+ return EAFNOSUPPORT;
+ }
+
+ default:
+ return EOPNOTSUPP;
}
}
struct dlil_ifmod_reg_str ifmod_reg;
if (ivedonethis)
- return 0;
+ return 0;
ivedonethis = 1;
ifmod_reg.shutdown = ether_shutdown;
if (dlil_reg_if_modules(APPLE_IF_FAM_ETHERNET, &ifmod_reg)) {
- printf("WARNING: ether_family_init -- Can't register if family modules\n");
- return EIO;
+ printf("WARNING: ether_family_init -- Can't register if family modules\n");
+ return EIO;
}
- for (i=0; i < (LITMUS_SIZE/4); i++)
- litmus_mask[i] = 0xffffffff;
-
for (i=0; i < MAX_INTERFACES; i++)
- ether_desc_blk[i].n_blocks = 0;
-
- for (i=0; i < MAX_EN_COUNT; i++)
- en_at_array[i].ifp = 0;
+ ether_desc_blk[i].n_count = 0;
return 0;
}
#include <net/if_llc.h>
#include <net/if_dl.h>
#include <net/if_types.h>
-#include <net/ndrv.h>
#include <netinet/in.h>
#include <netinet/in_var.h>
#include <net/if_llc.h>
#include <net/if_dl.h>
#include <net/if_types.h>
-#include <net/ndrv.h>
#if INET
#include <netinet/in.h>
* strange protocol support. One of the main ones will be the
* BlueBox/Classic Shared IP Address support.
*/
+#include <mach/mach_types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <kern/queue.h>
-#include <net/if.h>
+#include <net/ndrv.h>
#include <net/netisr.h>
#include <net/route.h>
#include <net/if_llc.h>
#include <net/if_dl.h>
#include <net/if_types.h>
-#include <net/dlil.h>
-#include "ndrv.h"
+#include <net/ndrv_var.h>
#if INET
#include <netinet/in.h>
int ndrv_do_detach(struct ndrv_cb *);
int ndrv_do_disconnect(struct ndrv_cb *);
+struct ndrv_cb *ndrv_find_tag(unsigned int);
+void ndrv_read_event(struct socket* inSo, caddr_t ref, int waitf);
+int ndrv_setspec(struct ndrv_cb *np, struct sockopt *sopt);
+int ndrv_delspec(struct ndrv_cb *);
+int ndrv_to_dlil_demux(struct ndrv_demux_desc* ndrv, struct dlil_demux_desc* dlil);
+void ndrv_handle_ifp_detach(u_long family, short unit);
unsigned long ndrv_sendspace = NDRVSNDQ;
unsigned long ndrv_recvspace = NDRVRCVQ;
struct ndrv_cb ndrvl; /* Head of controlblock list */
-/* To handle input, need to map tag to ndrv_cb */
-struct ndrv_tag_map
-{ unsigned int tm_tag; /* Tag in use */
- struct ndrv_cb *tm_np; /* Owning device */
- struct dlil_demux_desc *tm_dm; /* Our local copy */
-};
-
-struct ndrv_tag_map *ndrv_tags;
-#define TAG_MAP_COUNT 10
-int tag_map_count;
-
struct domain ndrvdomain;
-extern struct protosw ndrvsw[];
+struct protosw ndrvsw;
+static struct socket* ndrv_so;
/*
void
ndrv_init()
{
+ int retval;
+ struct kev_request kev_request;
+
ndrvl.nd_next = ndrvl.nd_prev = &ndrvl;
+
+ /* Create a PF_SYSTEM socket so we can listen for events */
+ retval = socreate(PF_SYSTEM, &ndrv_so, SOCK_RAW, SYSPROTO_EVENT);
+ if (retval != 0 || ndrv_so == NULL)
+ retval = KERN_FAILURE;
+
+ /* Install a callback function for the socket */
+ ndrv_so->so_rcv.sb_flags |= SB_NOTIFY|SB_UPCALL;
+ ndrv_so->so_upcall = ndrv_read_event;
+ ndrv_so->so_upcallarg = NULL;
+
+ /* Configure the socket to receive the events we're interested in */
+ kev_request.vendor_code = KEV_VENDOR_APPLE;
+ kev_request.kev_class = KEV_NETWORK_CLASS;
+ kev_request.kev_subclass = KEV_DL_SUBCLASS;
+ retval = ndrv_so->so_proto->pr_usrreqs->pru_control(ndrv_so, SIOCSKEVFILT, (caddr_t)&kev_request, 0, 0);
+ if (retval != 0)
+ {
+ /*
+ * We will not get attaching or detaching events in this case.
+ * We should probably prevent any sockets from binding so we won't
+ * panic later if the interface goes away.
+ */
+ log(LOG_WARNING, "PF_NDRV: ndrv_init - failed to set event filter (%d)",
+ retval);
+ }
}
/*
*/
int
ndrv_output(register struct mbuf *m, register struct socket *so)
-{ register struct ndrv_cb *np = sotondrvcb(so);
+{
+ register struct ndrv_cb *np = sotondrvcb(so);
register struct ifnet *ifp = np->nd_if;
- int s, error;
extern void kprintf(const char *, ...);
+ int result = 0;
#if NDRV_DEBUG
kprintf("NDRV output: %x, %x, %x\n", m, so, np);
return(EINVAL);
/*
- * Can't do multicast accounting because we don't know
- * (a) if our interface does multicast; and
- * (b) what a multicast address looks like
- */
- s = splimp();
-
- /*
- * Can't call DLIL to do the job - we don't have a tag
- * and we aren't really a protocol
- */
-
- (*ifp->if_output)(ifp, m);
- splx(s);
- return (0);
-}
-
+ * Call DLIL if we can. DLIL is much safer than calling the
+ * ifp directly.
+ */
+ if (np->nd_tag != 0)
+ result = dlil_output(np->nd_tag, m, (caddr_t)NULL,
+ (struct sockaddr*)NULL, 1);
+ else if (np->nd_send_tag != 0)
+ result = dlil_output(np->nd_send_tag, m, (caddr_t)NULL,
+ (struct sockaddr*)NULL, 1);
+ else
+ result = ENXIO;
+ return (result);
+}
+
+/* Our input routine called from DLIL */
int
ndrv_input(struct mbuf *m,
char *frame_header,
struct ifnet *ifp,
u_long dl_tag,
int sync_ok)
-{ int s;
+{
struct socket *so;
struct sockaddr_dl ndrvsrc = {sizeof (struct sockaddr_dl), AF_NDRV};
register struct ndrv_cb *np;
- extern struct ndrv_cb *ndrv_find_tag(unsigned int);
- /* move packet from if queue to socket */
+ /* move packet from if queue to socket */
/* Should be media-independent */
- ndrvsrc.sdl_type = IFT_ETHER;
- ndrvsrc.sdl_nlen = 0;
- ndrvsrc.sdl_alen = 6;
- ndrvsrc.sdl_slen = 0;
- bcopy(frame_header, &ndrvsrc.sdl_data, 6);
+ ndrvsrc.sdl_type = IFT_ETHER;
+ ndrvsrc.sdl_nlen = 0;
+ ndrvsrc.sdl_alen = 6;
+ ndrvsrc.sdl_slen = 0;
+ bcopy(frame_header, &ndrvsrc.sdl_data, 6);
- s = splnet();
np = ndrv_find_tag(dl_tag);
if (np == NULL)
- { splx(s);
+ {
return(ENOENT);
}
so = np->nd_socket;
+ /* prepend the frame header */
+ m = m_prepend(m, ifp->if_data.ifi_hdrlen, M_NOWAIT);
+ if (m == NULL)
+ return EJUSTRETURN;
+ bcopy(frame_header, m->m_data, ifp->if_data.ifi_hdrlen);
if (sbappendaddr(&(so->so_rcv), (struct sockaddr *)&ndrvsrc,
m, (struct mbuf *)0) == 0)
- { /* yes, sbappendaddr returns zero if the sockbuff is full... */
- splx(s);
+ {
+ /* yes, sbappendaddr returns zero if the sockbuff is full... */
+ /* caller will free m */
return(ENOMEM);
} else
sorwakeup(so);
- splx(s);
return(0);
}
-int
-ndrv_ioctl(unsigned long dl_tag,
- struct ifnet *ifp,
- unsigned long command,
- caddr_t data)
-{
- if (ifp)
- return((*ifp->if_ioctl)(ifp, command, data));
-}
-
int
ndrv_control(struct socket *so, u_long cmd, caddr_t data,
struct ifnet *ifp, struct proc *p)
*/
int
ndrv_attach(struct socket *so, int proto, struct proc *p)
-{ int error;
+{
+ int error;
register struct ndrv_cb *np = sotondrvcb(so);
if ((so->so_state & SS_PRIV) == 0)
MALLOC(np, struct ndrv_cb *, sizeof(*np), M_PCB, M_WAITOK);
if (np == NULL)
return (ENOMEM);
+ so->so_pcb = (caddr_t)np;
+ bzero(np, sizeof(*np));
#if NDRV_DEBUG
kprintf("NDRV attach: %x, %x, %x\n", so, proto, np);
#endif
- if ((so->so_pcb = (caddr_t)np))
- bzero(np, sizeof(*np));
- else
- return(ENOBUFS);
if ((error = soreserve(so, ndrv_sendspace, ndrv_recvspace)))
return(error);
TAILQ_INIT(&np->nd_dlist);
np->nd_socket = so;
np->nd_proto.sp_family = so->so_proto->pr_domain->dom_family;
np->nd_proto.sp_protocol = proto;
+ np->nd_if = NULL;
+ np->nd_tag = 0;
+ np->nd_family = 0;
+ np->nd_unit = 0;
insque((queue_t)np, (queue_t)&ndrvl);
return(0);
}
int ndrv_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
{
register struct ndrv_cb *np = sotondrvcb(so);
+ int result = 0;
if (np == 0)
return EINVAL;
if (np->nd_faddr)
return EISCONN;
-
- bcopy((caddr_t) nam, (caddr_t) np->nd_faddr, sizeof(struct sockaddr_ndrv));
+
+ /* Allocate memory to store the remote address */
+ MALLOC(np->nd_faddr, struct sockaddr_ndrv*,
+ nam->sa_len, M_IFADDR, M_WAITOK);
+ if (result != 0)
+ return result;
+ if (np->nd_faddr == NULL)
+ return ENOMEM;
+
+ bcopy((caddr_t) nam, (caddr_t) np->nd_faddr, nam->sa_len);
soisconnected(so);
return 0;
}
/*
* This is the "driver open" hook - we 'bind' to the
* named driver.
- * Here's where we latch onto the driver and make it ours.
+ * Here's where we latch onto the driver.
*/
int
ndrv_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
-{ register struct sockaddr_ndrv *sa = (struct sockaddr_ndrv *) nam;
+{
+ register struct sockaddr_ndrv *sa = (struct sockaddr_ndrv *) nam;
register char *dname;
register struct ndrv_cb *np;
register struct ifnet *ifp;
extern int name_cmp(struct ifnet *, char *);
+ int result;
if TAILQ_EMPTY(&ifnet)
return(EADDRNOTAVAIL); /* Quick sanity check */
if (ifp == NULL)
return(EADDRNOTAVAIL);
+
+ /*
+ * Loopback demuxing doesn't work with PF_NDRV.
+ * The first 4 bytes of the packet must be the
+ * protocol ptr. Can't get that from userland.
+ */
+ if (ifp->if_family == APPLE_IF_FAM_LOOPBACK)
+ return (ENOTSUP);
+
+ if ((dlil_find_dltag(ifp->if_family, ifp->if_unit,
+ PF_NDRV, &np->nd_send_tag) != 0) &&
+ (ifp->if_family != APPLE_IF_FAM_PPP))
+ {
+ /* NDRV isn't registered on this interface, lets change that */
+ struct dlil_proto_reg_str ndrv_proto;
+ int result = 0;
+ bzero(&ndrv_proto, sizeof(ndrv_proto));
+ TAILQ_INIT(&ndrv_proto.demux_desc_head);
+
+ ndrv_proto.interface_family = ifp->if_family;
+ ndrv_proto.protocol_family = PF_NDRV;
+ ndrv_proto.unit_number = ifp->if_unit;
+
+ result = dlil_attach_protocol(&ndrv_proto, &np->nd_send_tag);
+
+ /*
+ * If the interface does not allow PF_NDRV to attach, we will
+ * respect it's wishes. Sending will be disabled. No error is
+ * returned because the client may later attach a real protocol
+ * that the interface may accept.
+ */
+ if (result != 0)
+ np->nd_send_tag = 0;
+ }
+
np->nd_if = ifp;
+ np->nd_family = ifp->if_family;
+ np->nd_unit = ifp->if_unit;
+
return(0);
}
return 0;
}
+/*
+ * Accessor function
+ */
+struct ifnet*
+ndrv_get_ifp(caddr_t ndrv_pcb)
+{
+ struct ndrv_cb* np = (struct ndrv_cb*)ndrv_pcb;
+
+#if DEBUG
+ {
+ struct ndrv_cb* temp = ndrvl.nd_next;
+ /* Verify existence of pcb */
+ for (temp = ndrvl.nd_next; temp != &ndrvl; temp = temp->nd_next)
+ {
+ if (temp == np)
+ break;
+ }
+
+ if (temp != np)
+ {
+ log(LOG_WARNING, "PF_NDRV: ndrv_get_ifp called with invalid ndrv_cb!");
+ return NULL;
+ }
+ }
+#endif
+
+ return np->nd_if;
+}
+
/*
* Mark the connection as being incapable of further input.
*/
int
ndrv_ctloutput(struct socket *so, struct sockopt *sopt)
-{ register struct ndrv_cb *np = sotondrvcb(so);
- struct ndrv_descr nd;
- int count = 0, error = 0;
- int ndrv_getspec(struct ndrv_cb *,
- struct sockopt *,
- struct ndrv_descr *);
- int ndrv_setspec(struct ndrv_cb *, struct ndrv_descr *);
- int ndrv_delspec(struct ndrv_cb *, struct ndrv_descr *);
-
- if (sopt->sopt_name != NDRV_DMXSPECCNT)
- error = sooptcopyin(sopt, &nd, sizeof nd, sizeof nd);
- if (error == 0)
- { switch(sopt->sopt_name)
- { case NDRV_DMXSPEC: /* Get/Set(Add) spec list */
- if (sopt->sopt_dir == SOPT_GET)
- error = ndrv_getspec(np, sopt, &nd);
- else
- error = ndrv_setspec(np, &nd);
- break;
- case NDRV_DELDMXSPEC: /* Delete specified specs */
- error = ndrv_delspec(np, &nd);
- break;
- case NDRV_DMXSPECCNT: /* How many are in the list */
- count = np->nd_descrcnt;
- error = sooptcopyout(sopt, &count, sizeof count);
- break;
- }
- }
+{
+ register struct ndrv_cb *np = sotondrvcb(so);
+ int error = 0;
+
+ switch(sopt->sopt_name)
+ {
+ case NDRV_DELDMXSPEC: /* Delete current spec */
+ /* Verify no parameter was passed */
+ if (sopt->sopt_val != NULL || sopt->sopt_valsize != 0) {
+ /*
+ * We don't support deleting a specific demux, it's
+ * all or nothing.
+ */
+ return EINVAL;
+ }
+ error = ndrv_delspec(np);
+ break;
+ case NDRV_SETDMXSPEC: /* Set protocol spec */
+ error = ndrv_setspec(np, sopt);
+ break;
+ default:
+ error = ENOTSUP;
+ }
#ifdef NDRV_DEBUG
log(LOG_WARNING, "NDRV CTLOUT: %x returns %d\n", sopt->sopt_name,
error);
int
ndrv_do_detach(register struct ndrv_cb *np)
-{ register struct socket *so = np->nd_socket;
- int ndrv_dump_descr(struct ndrv_cb *);
+{
+ struct ndrv_cb* cur_np = NULL;
+ struct socket *so = np->nd_socket;
+ struct ndrv_multicast* next;
+ int error;
#if NDRV_DEBUG
kprintf("NDRV detach: %x, %x\n", so, np);
#endif
- if (!TAILQ_EMPTY(&np->nd_dlist))
- ndrv_dump_descr(np);
-
+ if (np->nd_tag != 0)
+ {
+ error = dlil_detach_protocol(np->nd_tag);
+ if (error)
+ {
+ log(LOG_WARNING, "NDRV ndrv_do_detach: error %d removing dl_tag %d",
+ error, np->nd_tag);
+ return error;
+ }
+ }
+
+ /* Remove from the linked list of control blocks */
remque((queue_t)np);
+
+ if (np->nd_send_tag != 0)
+ {
+ /* Check if this is the last socket attached to this interface */
+ for (cur_np = ndrvl.nd_next; cur_np != &ndrvl; cur_np = cur_np->nd_next)
+ {
+ if (cur_np->nd_family == np->nd_family &&
+ cur_np->nd_unit == np->nd_unit)
+ {
+ break;
+ }
+ }
+
+ /* If there are no other interfaces, detach PF_NDRV from the interface */
+ if (cur_np == &ndrvl)
+ {
+ dlil_detach_protocol(np->nd_send_tag);
+ }
+ }
+
FREE((caddr_t)np, M_PCB);
so->so_pcb = 0;
sofree(so);
- return(0);
+ return error;
}
int
kprintf("NDRV disconnect: %x\n", np);
#endif
if (np->nd_faddr)
- { m_freem(dtom(np->nd_faddr));
+ {
+ FREE(np->nd_faddr, M_IFADDR);
np->nd_faddr = 0;
}
if (np->nd_socket->so_state & SS_NOFDREF)
*/
void
ndrv_flushq(register struct ifqueue *q)
-{ register struct mbuf *m;
- register int s;
+{
+ register struct mbuf *m;
for (;;)
- { s = splimp();
+ {
IF_DEQUEUE(q, m);
if (m == NULL)
break;
IF_DROP(q);
- splx(s);
if (m)
m_freem(m);
}
- splx(s);
}
int
-ndrv_getspec(struct ndrv_cb *np,
- struct sockopt *sopt,
- struct ndrv_descr *nd)
-{ struct dlil_demux_desc *mp, *mp1;
- int i, k, error = 0;
-
- /* Compute # structs to copy */
- i = k = min(np->nd_descrcnt,
- (nd->nd_len / sizeof (struct dlil_demux_desc)));
- mp = (struct dlil_demux_desc *)nd->nd_buf;
- TAILQ_FOREACH(mp1, &np->nd_dlist, next)
- { if (k-- == 0)
- break;
- error = copyout(mp1, mp++, sizeof (struct dlil_demux_desc));
- if (error)
- break;
- }
- if (error == 0)
- { nd->nd_len = i * (sizeof (struct dlil_demux_desc));
- error = sooptcopyout(sopt, nd, sizeof (*nd));
- }
- return(error);
+ndrv_setspec(struct ndrv_cb *np, struct sockopt *sopt)
+{
+ struct dlil_proto_reg_str dlilSpec;
+ struct ndrv_protocol_desc ndrvSpec;
+ struct dlil_demux_desc* dlilDemux = NULL;
+ struct ndrv_demux_desc* ndrvDemux = NULL;
+ int error = 0;
+
+ /* Sanity checking */
+ if (np->nd_tag)
+ return EBUSY;
+ if (np->nd_if == NULL)
+ return EINVAL;
+ if (sopt->sopt_valsize != sizeof(struct ndrv_protocol_desc))
+ return EINVAL;
+
+ /* Copy the ndrvSpec */
+ error = sooptcopyin(sopt, &ndrvSpec, sizeof(struct ndrv_protocol_desc),
+ sizeof(struct ndrv_protocol_desc));
+ if (error != 0)
+ return error;
+
+ /* Verify the parameter */
+ if (ndrvSpec.version > NDRV_PROTOCOL_DESC_VERS)
+ return ENOTSUP; // version is too new!
+ else if (ndrvSpec.version < 1)
+ return EINVAL; // version is not valid
+
+ /* Allocate storage for demux array */
+ MALLOC(ndrvDemux, struct ndrv_demux_desc*,
+ ndrvSpec.demux_count * sizeof(struct ndrv_demux_desc), M_TEMP, M_WAITOK);
+ if (ndrvDemux == NULL)
+ return ENOMEM;
+
+ /* Allocate enough dlil_demux_descs */
+ MALLOC(dlilDemux, struct dlil_demux_desc*,
+ sizeof(*dlilDemux) * ndrvSpec.demux_count, M_TEMP, M_WAITOK);
+ if (dlilDemux == NULL)
+ error = ENOMEM;
+
+ if (error == 0)
+ {
+ /* Copy the ndrv demux array from userland */
+ error = copyin(ndrvSpec.demux_list, ndrvDemux,
+ ndrvSpec.demux_count * sizeof(struct ndrv_demux_desc));
+ ndrvSpec.demux_list = ndrvDemux;
+ }
+
+ if (error == 0)
+ {
+ /* At this point, we've at least got enough bytes to start looking around */
+ u_long demuxOn = 0;
+
+ bzero(&dlilSpec, sizeof(dlilSpec));
+ TAILQ_INIT(&dlilSpec.demux_desc_head);
+ dlilSpec.interface_family = np->nd_family;
+ dlilSpec.unit_number = np->nd_unit;
+ dlilSpec.input = ndrv_input;
+ dlilSpec.protocol_family = ndrvSpec.protocol_family;
+
+ for (demuxOn = 0; demuxOn < ndrvSpec.demux_count; demuxOn++)
+ {
+ /* Convert an ndrv_demux_desc to a dlil_demux_desc */
+ error = ndrv_to_dlil_demux(&ndrvSpec.demux_list[demuxOn], &dlilDemux[demuxOn]);
+ if (error)
+ break;
+
+ /* Add the dlil_demux_desc to the list */
+ TAILQ_INSERT_TAIL(&dlilSpec.demux_desc_head, &dlilDemux[demuxOn], next);
+ }
+ }
+
+ if (error == 0)
+ {
+ /* We've got all our ducks lined up...lets attach! */
+ error = dlil_attach_protocol(&dlilSpec, &np->nd_tag);
+ }
+
+ /* Free any memory we've allocated */
+ if (dlilDemux)
+ FREE(dlilDemux, M_TEMP);
+ if (ndrvDemux)
+ FREE(ndrvDemux, M_TEMP);
+
+ return error;
}
-/*
- * Install a protocol descriptor, making us a protocol handler.
- * We expect the client to handle all output tasks (we get fully
- * formed frames from the client and hand them to the driver
- * directly). The reason we register is to get those incoming
- * frames. We do it as a protocol handler because the network layer
- * already knows how find the ones we want, so there's no need to
- * duplicate effort.
- * Since this mechanism is mostly for user mode, most of the procedures
- * to be registered will be null.
- * Note that we jam the pair (PF_XXX, native_type) into the native_type
- * field of the demux descriptor. Yeah, it's a hack.
- */
-int
-ndrv_setspec(struct ndrv_cb *np, struct ndrv_descr *nd)
-{ struct dlil_demux_desc *mp, *mp1;
- int i = 0, error = 0, j;
- unsigned long value;
- int *native_values;
- struct dlil_proto_reg_str proto_spec;
- int ndrv_add_descr(struct ndrv_cb *, struct dlil_proto_reg_str *);
-
- bzero((caddr_t)&proto_spec, sizeof (proto_spec));
- i = nd->nd_len / (sizeof (struct dlil_demux_desc)); /* # elts */
- MALLOC(native_values,int *, i * sizeof (int), M_TEMP, M_WAITOK);
- if (native_values == NULL)
- return (ENOMEM);
- mp = (struct dlil_demux_desc *)nd->nd_buf;
- for (j = 0; j++ < i;)
- { MALLOC(mp1, struct dlil_demux_desc *,
- sizeof (struct dlil_demux_desc), M_PCB, M_WAITOK);
- if (mp1 == NULL)
- { error = ENOBUFS;
- break;
- }
- error = copyin(mp++, mp1, sizeof (struct dlil_demux_desc));
- if (error)
- break;
- TAILQ_INSERT_TAIL(&np->nd_dlist, mp1, next);
- value = (unsigned long)mp1->native_type;
- native_values[j] = (unsigned short)value;
- mp1->native_type = (char *)&native_values[j];
- proto_spec.protocol_family = (unsigned char)(value>>16); /* Oy! */
- proto_spec.interface_family = np->nd_if->if_family;
- proto_spec.unit_number = np->nd_if->if_unit;
- /* Our input */
- proto_spec.input = ndrv_input;
- proto_spec.pre_output = NULL;
- /* No event/offer functionality needed */
- proto_spec.event = NULL;
- proto_spec.offer = NULL;
- proto_spec.ioctl = ndrv_ioctl; /* ??? */
- /* What exactly does this do again? */
- proto_spec.default_proto = 0;
-
- np->nd_descrcnt++;
- }
- if (error)
- { struct dlil_demux_desc *mp2;
- while ((mp2 = TAILQ_FIRST(&np->nd_dlist))) {
- TAILQ_REMOVE(&np->nd_dlist, mp2, next);
- FREE(mp2, M_PCB);
- }
- } else
- error = ndrv_add_descr(np, &proto_spec);
-#ifdef NDRV_DEBUG
- log(LOG_WARNING, "NDRV ADDSPEC: got error %d\n", error);
-#endif
- FREE(native_values, M_TEMP);
- return(error);
+int
+ndrv_to_dlil_demux(struct ndrv_demux_desc* ndrv, struct dlil_demux_desc* dlil)
+{
+ bzero(dlil, sizeof(*dlil));
+
+ if (ndrv->type < DLIL_DESC_ETYPE2)
+ {
+ /* using old "type", not supported */
+ return ENOTSUP;
+ }
+
+ if (ndrv->length > 28)
+ {
+ return EINVAL;
+ }
+
+ dlil->type = ndrv->type;
+ dlil->native_type = ndrv->data.other;
+ dlil->variants.native_type_length = ndrv->length;
+
+ return 0;
}
int
-ndrv_delspec(struct ndrv_cb *np, struct ndrv_descr *nd)
-{ struct dlil_demux_desc *mp;
-
- return(EINVAL);
+ndrv_delspec(struct ndrv_cb *np)
+{
+ int result = 0;
+
+ if (np->nd_tag == 0)
+ return EINVAL;
+
+ /* Detach the protocol */
+ result = dlil_detach_protocol(np->nd_tag);
+ if (result == 0)
+ {
+ np->nd_tag = 0;
+ }
+
+ return result;
}
struct ndrv_cb *
ndrv_find_tag(unsigned int tag)
-{ struct ndrv_tag_map *tmp;
- int i;
-
- tmp = ndrv_tags;
- for (i=0; i++ < tag_map_count; tmp++)
- if (tmp->tm_tag == tag)
- return(tmp->tm_np);
- return(NULL);
-}
-
-int
-ndrv_add_tag(struct ndrv_cb *np, unsigned int tag,
- struct dlil_demux_desc *mp)
-{ struct ndrv_tag_map *tmp;
+{
+ struct ndrv_cb* np;
int i;
-
- tmp = ndrv_tags;
- for (i=0; i++ < tag_map_count; tmp++)
- if (tmp->tm_tag == 0)
- { tmp->tm_tag = tag;
- tmp->tm_np = np;
-#ifdef NDRV_DEBUG
- log(LOG_WARNING, "NDRV ADDING TAG %d\n", tag);
-#endif
- return(0);
- }
-
- /* Oops - ran out of space. Realloc */
- i = tag_map_count + TAG_MAP_COUNT;
- MALLOC(tmp, struct ndrv_tag_map *, i * sizeof (struct ndrv_tag_map),
- M_PCB, M_WAITOK);
- if (tmp == NULL)
- return(ENOMEM);
- /* Clear tail of new table, except for the slot we are creating ... */
- bzero((caddr_t)&tmp[tag_map_count+1],
- (TAG_MAP_COUNT-1) * sizeof (struct ndrv_tag_map));
- /* ...and then copy in the original piece */
- if (tag_map_count)
- bcopy(ndrv_tags, tmp,
- tag_map_count * sizeof (struct ndrv_tag_map));
- /* ...and then install the new tag... */
- tmp[tag_map_count].tm_tag = tag;
- tmp[tag_map_count].tm_np = np;
- tag_map_count = i;
- if (tag_map_count)
- FREE(ndrv_tags, M_PCB);
- ndrv_tags = tmp;
-#ifdef NDRV_DEBUG
- log(LOG_WARNING, "NDRV ADDING TAG %d (new chunk)\n", tag);
-#endif
- return(0);
+
+ if (tag == 0)
+ return NULL;
+
+ for (np = ndrvl.nd_next; np != NULL; np = np->nd_next)
+ {
+ if (np->nd_tag == tag)
+ {
+ return np;
+ }
+ }
+
+ return NULL;
}
-/*
- * Attach the proto spec list, and record the tags.
- */
-int
-ndrv_add_descr(struct ndrv_cb *np, struct dlil_proto_reg_str *proto_spec)
-{ unsigned long dl_tag;
- int error;
- struct dlil_demux_desc *mp;
-
- /* Attach to our device to get requested packets */
- TAILQ_INIT(&proto_spec->demux_desc_head);
- error = dlil_attach_protocol(proto_spec, &dl_tag);
-
- if (error == 0)
- error = ndrv_add_tag(np, dl_tag, mp);
+void ndrv_dominit()
+{
+ static int ndrv_dominited = 0;
- return(error);
+ if (ndrv_dominited == 0 &&
+ net_add_proto(&ndrvsw, &ndrvdomain) == 0)
+ ndrv_dominited = 1;
}
-int
-ndrv_dump_descr(struct ndrv_cb *np)
-{ struct dlil_demux_desc *dm1, *dm2;
- struct ndrv_tag_map *tmp;
- int i, error = 0;
-
- if (dm1 = TAILQ_FIRST(&np->nd_dlist))
- { for (i = 0, tmp = &ndrv_tags[0]; i++ < tag_map_count; tmp++)
- if (tmp->tm_np == np)
- { error = dlil_detach_protocol(tmp->tm_tag);
- while (dm1)
- { dm2 = TAILQ_NEXT(dm1, next);
- FREE(dm1, M_PCB);
- dm1 = dm2;
- }
- log(LOG_WARNING,
- "Detached tag %d (error %d)\n",
- tmp->tm_tag, error);
- tmp->tm_np = 0;
- tmp->tm_tag = 0;
- }
- }
- return(0);
+void
+ndrv_read_event(struct socket* so, caddr_t ref, int waitf)
+{
+ // Read an event
+ struct mbuf *m = NULL;
+ struct kern_event_msg *msg;
+ struct uio auio = {0};
+ int result = 0;
+ int flags = 0;
+
+ // Get the data
+ auio.uio_resid = 1000000; // large number to get all of the data
+ flags = MSG_DONTWAIT;
+ result = soreceive(so, (struct sockaddr**)NULL, &auio, &m,
+ (struct mbuf**)NULL, &flags);
+ if (result != 0 || m == NULL)
+ return;
+
+ // cast the mbuf to a kern_event_msg
+ // this is dangerous, doesn't handle linked mbufs
+ msg = mtod(m, struct kern_event_msg*);
+
+ // check for detaches, assume even filtering is working
+ if (msg->event_code == KEV_DL_IF_DETACHING ||
+ msg->event_code == KEV_DL_IF_DETACHED)
+ {
+ struct net_event_data *ev_data;
+ ev_data = (struct net_event_data*)msg->event_data;
+ ndrv_handle_ifp_detach(ev_data->if_family, ev_data->if_unit);
+ }
+
+ m_free(m);
}
-void ndrv_dominit()
+void
+ndrv_handle_ifp_detach(u_long family, short unit)
{
- static int ndrv_dominited = 0;
-
- if (ndrv_dominited == 0) {
- net_add_proto(&ndrvsw[0], &ndrvdomain);
-
- ndrv_dominited = 1;
+ struct ndrv_cb* np;
+ u_long dl_tag;
+
+ /* Find all sockets using this interface. */
+ for (np = ndrvl.nd_next; np != &ndrvl; np = np->nd_next)
+ {
+ if (np->nd_family == family &&
+ np->nd_unit == unit)
+ {
+ /* This cb is using the detaching interface, but not for long. */
+ /* Let the protocol go */
+ if (np->nd_tag != 0)
+ ndrv_delspec(np);
+
+ /* Disavow all knowledge of the ifp */
+ np->nd_if = NULL;
+ np->nd_unit = 0;
+ np->nd_family = 0;
+ np->nd_send_tag = 0;
+
+ /* Make sure sending returns an error */
+ /* Is this safe? Will we drop the funnel? */
+ socantsendmore(np->nd_socket);
+ socantrcvmore(np->nd_socket);
}
+ }
+
+ /* Unregister our protocol */
+ if (dlil_find_dltag(family, unit, PF_NDRV, &dl_tag) == 0) {
+ dlil_detach_protocol(dl_tag);
+ }
}
struct pr_usrreqs ndrv_usrreqs = {
ndrv_sockaddr, sosend, soreceive, sopoll
};
-struct protosw ndrvsw[] =
-{ { SOCK_RAW, &ndrvdomain, 0, PR_ATOMIC|PR_ADDR,
- 0, ndrv_output, ndrv_ctlinput, ndrv_ctloutput,
- 0, ndrv_init, 0, 0,
- ndrv_drain, ndrv_sysctl, &ndrv_usrreqs
- }
+struct protosw ndrvsw =
+{ SOCK_RAW, &ndrvdomain, NDRVPROTO_NDRV, PR_ATOMIC|PR_ADDR,
+ 0, ndrv_output, ndrv_ctlinput, ndrv_ctloutput,
+ 0, ndrv_init, 0, 0,
+ ndrv_drain, ndrv_sysctl, &ndrv_usrreqs
};
struct domain ndrvdomain =
* @(#)ndrv.h 1.1 (MacOSX) 6/10/43
* Justin Walker - 970604
*/
+#include <net/dlil.h>
#ifndef _NET_NDRV_H
#define _NET_NDRV_H
+
struct sockaddr_ndrv
-{ unsigned char snd_len;
+{
+ unsigned char snd_len;
unsigned char snd_family;
unsigned char snd_name[IFNAMSIZ]; /* from if.h */
};
/*
- * The cb is plugged into the socket (so_pcb), and the ifnet structure
- * of BIND is plugged in here.
- * For now, it looks like a raw_cb up front...
+ * Support for user-mode protocol handlers
*/
-struct ndrv_cb
-{ struct ndrv_cb *nd_next; /* Doubly-linked list */
- struct ndrv_cb *nd_prev;
- struct socket *nd_socket; /* Back to the socket */
- unsigned int nd_signature; /* Just double-checking */
- struct sockaddr_ndrv *nd_faddr;
- struct sockaddr_ndrv *nd_laddr;
- struct sockproto nd_proto; /* proto family, protocol */
- int nd_descrcnt; /* # elements in nd_dlist */
- TAILQ_HEAD(dlist, dlil_demux_desc) nd_dlist; /* Descr. list */
- struct ifnet *nd_if;
-};
-#define sotondrvcb(so) ((struct ndrv_cb *)(so)->so_pcb)
-#define NDRV_SIGNATURE 0x4e445256 /* "NDRV" */
+#define NDRV_DEMUXTYPE_ETHERTYPE DLIL_DESC_ETYPE2
+#define NDRV_DEMUXTYPE_SAP DLIL_DESC_SAP
+#define NDRV_DEMUXTYPE_SNAP DLIL_DESC_SNAP
-/* Nominal allocated space for NDRV sockets */
-#define NDRVSNDQ 8192
-#define NDRVRCVQ 8192
+#define NDRVPROTO_NDRV 0
/*
- * Support for user-mode protocol handlers
+ * Struct: ndrv_demux_desc
+ * Purpose:
+ * To uniquely identify a packet based on its low-level framing information.
+ *
+ * Fields:
+ * type : type of protocol in data field, must be understood by
+ * the interface family of the interface the socket is bound to
+ * length : length of protocol data in "data" field
+ * data : union of framing-specific data, in network byte order
+ * ether_type : ethernet type in network byte order, assuming
+ * ethernet type II framing
+ * sap : first 3 bytes of sap header, network byte order
+ * snap : first 5 bytes of snap header, network byte order
+ * other : up to 28 bytes of protocol data for different protocol type
+ *
+ * Examples:
+ * 1) 802.1x uses ether_type 0x888e, so the descriptor would be set as:
+ * struct ndrv_demux_desc desc;
+ * desc.type = NDRV_DEMUXTYPE_ETHERTYPE
+ * desc.length = sizeof(unsigned short);
+ * desc.ether_type = htons(0x888e);
+ * 2) AppleTalk uses SNAP 0x080007809B
+ * struct ndrv_demux_desc desc;
+ * desc.type = NDRV_DEMUXTYPE_SNAP;
+ * desc.length = 5;
+ * desc.data.snap[0] = 08;
+ * desc.data.snap[1] = 00;
+ * desc.data.snap[2] = 07;
+ * desc.data.snap[3] = 80;
+ * desc.data.snap[4] = 9B;
*/
+struct ndrv_demux_desc
+{
+ u_int16_t type;
+ u_int16_t length;
+ union
+ {
+ u_int16_t ether_type;
+ u_int8_t sap[3];
+ u_int8_t snap[5];
+ u_int8_t other[28];
+ } data;
+};
+
+#define NDRV_PROTOCOL_DESC_VERS 1
-/* Arg to socket options */
-struct ndrv_descr
-{ unsigned int nd_len; /* Length of descriptor buffer, in bytes */
- unsigned char *nd_buf; /* Descriptor buffer */
+/*
+ * Struct: ndrv_protocol_desc
+ * Purpose:
+ * Used to "bind" an NDRV socket so that packets that match
+ * given protocol demux descriptions can be received:
+ * Field:
+ * version : must be NDRV_PROTOCOL_DESC_VERS
+ * protocol_family : unique identifier for this protocol
+ * demux_count : number of demux_list descriptors in demux_list
+ * demux_list : pointer to array of demux descriptors
+ */
+struct ndrv_protocol_desc
+{
+ u_int32_t version;
+ u_int32_t protocol_family;
+ u_int32_t demux_count;
+ struct ndrv_demux_desc* demux_list;
};
-#define NDRV_DMXSPEC 0x01 /* Get/Set (Add) a list of protocol specs */
-#define NDRV_DELDMXSPEC 0x02 /* Delete a list of protocol specs */
-#define NDRV_DMXSPECCNT 0x03 /* Return number of active protocol specs */
+#define SOL_NDRVPROTO NDRVPROTO_NDRV /* Use this socket level */
+/* NDRV_DMXSPEC 0x01 Obsolete */
+#define NDRV_DELDMXSPEC 0x02 /* Delete the registered protocol */
+/* NDRV_DMXSPECCNT 0x03 Obsolete */
+#define NDRV_SETDMXSPEC 0x04 /* Set the protocol spec */
#if KERNEL
-extern struct ndrv_cb ndrvl; /* Head of controlblock list */
+/* Additional Kernel APIs */
+struct ifnet* ndrv_get_ifp(caddr_t ndrv_pcb);
#endif
+
#endif /* _NET_NDRV_H */
--- /dev/null
+/*
+ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * The contents of this file constitute Original Code as defined in and
+ * are subject to the Apple Public Source License Version 1.1 (the
+ * "License"). You may not use this file except in compliance with the
+ * License. Please obtain a copy of the License at
+ * http://www.apple.com/publicsource and read it before using this file.
+ *
+ * This Original Code and all software distributed under the License are
+ * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
+ * License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+/* Copyright (c) 1997, 1998 Apple Computer, Inc. All Rights Reserved */
+/*
+ * @(#)ndrv.h 1.1 (MacOSX) 6/10/43
+ * Justin Walker - 970604
+ */
+
+#ifndef _NET_NDRV_VAR_H
+#define _NET_NDRV_VAR_H
+
+/*
+ * The cb is plugged into the socket (so_pcb), and the ifnet structure
+ * of BIND is plugged in here.
+ * For now, it looks like a raw_cb up front...
+ */
+struct ndrv_cb
+{
+ struct ndrv_cb *nd_next; /* Doubly-linked list */
+ struct ndrv_cb *nd_prev;
+ struct socket *nd_socket; /* Back to the socket */
+ unsigned int nd_signature; /* Just double-checking */
+ struct sockaddr_ndrv *nd_faddr;
+ struct sockaddr_ndrv *nd_laddr;
+ struct sockproto nd_proto; /* proto family, protocol */
+ int nd_descrcnt; /* # elements in nd_dlist - Obsolete */
+ TAILQ_HEAD(dlist, dlil_demux_desc) nd_dlist; /* Descr. list */
+ struct ifnet *nd_if;
+ u_long nd_send_tag;
+ u_long nd_tag;
+ u_long nd_family;
+ short nd_unit;
+};
+
+#define sotondrvcb(so) ((struct ndrv_cb *)(so)->so_pcb)
+#define NDRV_SIGNATURE 0x4e445256 /* "NDRV" */
+
+/* Nominal allocated space for NDRV sockets */
+#define NDRVSNDQ 8192
+#define NDRVRCVQ 8192
+
+#if KERNEL
+extern struct ndrv_cb ndrvl; /* Head of controlblock list */
+#endif
+#endif /* _NET_NDRV_VAR_H */
csum = in_cksum_skip(m, ip->ip_len, offset);
- if (csum == 0)
+ if ((m->m_pkthdr.csum_flags & CSUM_UDP) && csum == 0)
csum = 0xffff;
offset += m->m_pkthdr.csum_data & 0xFFFF; /* checksum offset */
/* Allocate a new vnode/inode. */
type = ump->um_devvp->v_tag == VT_MFS ? M_MFSNODE : M_FFSNODE; /* XXX */
MALLOC_ZONE(ip, struct inode *, sizeof(struct inode), type, M_WAITOK);
- if (error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) {
- FREE_ZONE(ip, sizeof(struct inode), type);
- *vpp = NULL;
- return (error);
- }
bzero((caddr_t)ip, sizeof(struct inode));
lockinit(&ip->i_lock, PINOD, "inode", 0, 0);
- vp->v_data = ip;
- ip->i_vnode = vp;
+ /* lock the inode */
+ lockmgr(&ip->i_lock, LK_EXCLUSIVE, (struct slock *)0, p);
+
ip->i_fs = fs = ump->um_fs;
ip->i_dev = dev;
ip->i_number = ino;
+ ip->i_flag |= IN_ALLOC;
#if QUOTA
for (i = 0; i < MAXQUOTAS; i++)
ip->i_dquot[i] = NODQUOT;
#endif
+
/*
- * Put it onto its hash chain and lock it so that other requests for
+ * MALLOC_ZONE is blocking call. Check for race.
+ */
+ if ((*vpp = ufs_ihashget(dev, ino)) != NULL) {
+ /* Clean up */
+ FREE_ZONE(ip, sizeof(struct inode), type);
+ vp = *vpp;
+ UBCINFOCHECK("ffs_vget", vp);
+ return (0);
+ }
+
+ /*
+ * Put it onto its hash chain locked so that other requests for
* this inode will block if they arrive while we are sleeping waiting
* for old data structures to be purged or for the contents of the
* disk portion of this inode to be read.
*/
ufs_ihashins(ip);
+ if (error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) {
+ ufs_ihashrem(ip);
+ if (ISSET(ip->i_flag, IN_WALLOC))
+ wakeup(ip);
+ FREE_ZONE(ip, sizeof(struct inode), type);
+ *vpp = NULL;
+ return (error);
+ }
+ vp->v_data = ip;
+ ip->i_vnode = vp;
+
+ /*
+ * A vnode is associated with the inode now,
+ * vget() can deal with the serialization.
+ */
+ CLR(ip->i_flag, IN_ALLOC);
+ if (ISSET(ip->i_flag, IN_WALLOC))
+ wakeup(ip);
+
/* Read in the disk contents for the inode, copy into the inode. */
if (error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
(int)fs->fs_bsize, NOCRED, &bp)) {
#define IN_EXLOCK 0x0040 /* File has exclusive lock. */
#define IN_TRANSIT 0x0080 /* inode is getting recycled */
#define IN_WTRANSIT 0x0100 /* waiting for inode getting recycled */
+#define IN_ALLOC 0x0200 /* being allocated */
+#define IN_WALLOC 0x0400 /* waiting for allocation to be done */
#ifdef KERNEL
/*
for (ip = INOHASH(dev, inum)->lh_first; ip; ip = ip->i_hash.le_next) {
if (inum == ip->i_number && dev == ip->i_dev) {
vp = ITOV(ip);
- if (ip->i_flag & IN_TRANSIT) {
- /* inode is getting reclaimed wait till
+ if (ISSET(ip->i_flag, IN_ALLOC)) {
+ /*
+ * inode is being created. Wait for it
+ * to finish creation
+ */
+ SET(ip->i_flag, IN_WALLOC);
+ simple_unlock(&ufs_ihash_slock);
+ (void)tsleep((caddr_t)ip, PINOD, "ufs_ihashget", 0);
+ goto loop;
+ }
+
+ if (ISSET(ip->i_flag, IN_TRANSIT)) {
+ /*
+ * inode is getting reclaimed wait till
* the operation is complete and return
* error
*/
- ip->i_flag |= IN_WTRANSIT;
+ SET(ip->i_flag, IN_WTRANSIT);
simple_unlock(&ufs_ihash_slock);
- tsleep((caddr_t)ip, PINOD, "ufs_ihashget", 0);
+ (void)tsleep((caddr_t)ip, PINOD, "ufs_ihashget1", 0);
goto loop;
}
simple_lock(&vp->v_interlock);
}
/*
-* Insert the inode into the hash table, and return it locked.
+ * Insert the inode into the hash table,
+ * inode is assumed to be locked by the caller
*/
void
ufs_ihashins(ip)
struct inode *ip;
{
- struct proc *p = current_proc(); /* XXX */
+ struct proc *p = current_proc();
struct ihashhead *ipp;
- /* lock the inode, then put it on the appropriate hash list */
- lockmgr(&ip->i_lock, LK_EXCLUSIVE, (struct slock *)0, p);
-
simple_lock(&ufs_ihash_slock);
ipp = INOHASH(ip->i_dev, ip->i_number);
LIST_INSERT_HEAD(ipp, ip, i_hash);
* marking inode in transit so that one can get this
* inode from inodecache
*/
- ip->i_flag |= IN_TRANSIT;
+ SET(ip->i_flag, IN_TRANSIT);
error = VOP_TRUNCATE(vp, (off_t)0, 0, NOCRED, p);
ip->i_rdev = 0;
mode = ip->i_mode;
}
}
#endif
+ CLR(ip->i_flag, (IN_ALLOC|IN_TRANSIT));
+ if (ISSET(ip->i_flag, IN_WALLOC)|| ISSET(ip->i_flag, IN_WTRANSIT))
+ wakeup(ip);
+
return (0);
}
if (error == 0) {
/*
* The cluster_io write completed successfully,
- * update the uio structure and commit.
+ * update the uio structure.
*/
-
- ubc_upl_commit_range(upl, (upl_offset & ~PAGE_MASK), upl_size,
- UPL_COMMIT_FREE_ON_EMPTY);
-
iov->iov_base += io_size;
iov->iov_len -= io_size;
uio->uio_resid -= io_size;
uio->uio_offset += io_size;
}
- else {
- ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size,
- UPL_ABORT_FREE_ON_EMPTY);
- }
+ /*
+ * always 'commit' the I/O via the abort primitive whether the I/O
+ * succeeded cleanly or not... this is necessary to insure that
+ * we preserve the state of the DIRTY flag on the pages used to
+ * provide the data for the I/O... the state of this flag SHOULD
+ * NOT be changed by a write
+ */
+ ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size,
+ UPL_ABORT_FREE_ON_EMPTY);
+
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_END,
(int)upl_offset, (int)uio->uio_offset, (int)uio->uio_resid, error, 0);
struct vnode *vp;
struct mount *mp;
struct vfsconf *vfsp;
- int error, flag;
+ int error, flag, err2;
struct vattr va;
u_long fstypenum;
struct nameidata nd;
vfs_unbusy(mp, p);
return (error);
}
+
+ /* get the vnode lock */
+ err2 = vn_lock(vp, LK_EXCLUSIVE|LK_RETRY, p);
+
/*
* Put the new filesystem on the mount list after root.
*/
cache_purge(vp);
- if (!error) {
+ if (!error && !err2) {
simple_lock(&vp->v_interlock);
CLR(vp->v_flag, VMOUNT);
vp->v_mountedhere =mp;
mp->mnt_vfc->vfc_refcount--;
vfs_unbusy(mp, p);
_FREE_ZONE((caddr_t)mp, sizeof (struct mount), M_MOUNT);
- vput(vp);
+ if (err2)
+ vrele(vp);
+ else
+ vput(vp);
}
return (error);
}
if (
((p = pfind(pid)) != (struct proc *) 0)
&& (p1 != (struct proc *) 0)
- && ((p->p_ucred->cr_uid == p1->p_ucred->cr_uid)
+ && (((p->p_ucred->cr_uid == p1->p_ucred->cr_uid) &&
+ ((p->p_cred->p_ruid == p1->p_cred->p_ruid)))
|| !(suser(p1->p_ucred, &p1->p_acflag)))
&& (p->p_stat != SZOMB)
) {
// type to the client or application being notified.
//*********************************************************************************
-void IOPMrootDomain::tellChangeUp ( unsigned long )
+void IOPMrootDomain::tellChangeUp ( unsigned long stateNum)
{
- return tellClients(kIOMessageSystemHasPoweredOn);
+ if ( stateNum == ON_STATE ) {
+ return tellClients(kIOMessageSystemHasPoweredOn);
+ }
}
//*********************************************************************************
#define _IOKIT_IOSHAREDLOCKIMP_H
#include <architecture/ppc/asm_help.h>
+#ifdef KERNEL
+#undef END
+#include <mach/ppc/asm.h>
+#endif
-// 'Till we're building in kernel
.macro DISABLE_PREEMPTION
#ifdef KERNEL
+ stwu r1,-(FM_SIZE)(r1)
+ mflr r0
+ stw r3,FM_ARG0(r1)
+ stw r0,(FM_SIZE+FM_LR_SAVE)(r1)
+ bl EXT(_disable_preemption)
+ lwz r3,FM_ARG0(r1)
+ lwz r1,0(r1)
+ lwz r0,FM_LR_SAVE(r1)
+ mtlr r0
#endif
.endmacro
.macro ENABLE_PREEMPTION
#ifdef KERNEL
+ stwu r1,-(FM_SIZE)(r1)
+ mflr r0
+ stw r3,FM_ARG0(r1)
+ stw r0,(FM_SIZE+FM_LR_SAVE)(r1)
+ bl EXT(_enable_preemption)
+ lwz r3,FM_ARG0(r1)
+ lwz r1,0(r1)
+ lwz r0,FM_LR_SAVE(r1)
+ mtlr r0
#endif
.endmacro
res = thread_block(0);
- if (THREAD_AWAKENED == res) {
- mutex_lock(lock->mutex);
- assert(lock->thread == 0);
- assert(lock->count == 0);
- lock->thread = IOThreadSelf();
- lock->count = count;
- }
+ // Must re-establish the recursive lock no matter why we woke up
+ // otherwise we would potentially leave the return path corrupted.
+ mutex_lock(lock->mutex);
+ assert(lock->thread == 0);
+ assert(lock->count == 0);
+ lock->thread = IOThreadSelf();
+ lock->count = count;
return res;
}
if ( priv->head_note_state < pm_vars->myCurrentState ) { // dropping power?
priv->machine_state = IOPMour_prechange_03; // yes, in case we have to wait for acks
pm_vars->doNotPowerDown = false;
- if ( askChangeDown(priv->head_note_state) ) { // ask apps and kernel clients if we can drop power
+ pm_vars->outofbandparameter = kNotifyApps; // ask apps and kernel clients if we can drop power
+ if ( askChangeDown(priv->head_note_state) ) {
if ( pm_vars->doNotPowerDown ) { // don't have to wait, did any clients veto?
tellNoChangeDown(priv->head_note_state); // yes, rescind the warning
priv-> head_note_flags |= IOPMNotDone; // mark the change note un-actioned
*/
const char * gIOKernelKmods =
"{
- 'com.apple.kernel' = '5.1';
- 'com.apple.kernel.bsd' = '5.1';
- 'com.apple.kernel.iokit' = '5.1';
- 'com.apple.kernel.libkern' = '5.1';
- 'com.apple.kernel.mach' = '5.1';
+ 'com.apple.kernel' = '5.2';
+ 'com.apple.kernel.bsd' = '5.2';
+ 'com.apple.kernel.iokit' = '5.2';
+ 'com.apple.kernel.libkern' = '5.2';
+ 'com.apple.kernel.mach' = '5.2';
'com.apple.iokit.IOADBFamily' = '1.1';
'com.apple.iokit.IOSystemManagementFamily' = '1.1';
}";
if (!(tpproc->cpu_flags & SignalReady)) return KERN_FAILURE;
+ if((tpproc->MPsigpStat & MPsigpMsgp) == MPsigpMsgp) { /* Is there an unreceived message already pending? */
+
+ if(signal == SIGPwake) return KERN_SUCCESS; /* SIGPwake can merge into all others... */
+
+ if((signal == SIGPast) && (tpproc->MPsigpParm0 == SIGPast)) { /* We can merge ASTs */
+ return KERN_SUCCESS; /* Don't bother to send this one... */
+ }
+ }
+
if(!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy,
- (gPEClockFrequencyInfo.bus_clock_rate_hz >> 7))) { /* Try to lock the message block */
+ (gPEClockFrequencyInfo.bus_clock_rate_hz >> 13))) { /* Try to lock the message block with a .5ms timeout */
return KERN_FAILURE; /* Timed out, take your ball and go home... */
}
#define pfWillNapb 20
#define pfNoMSRir 0x00000400
#define pfNoMSRirb 21
-#define pfL1nnc 0x00000200
-#define pfL1nncb 22
+#define pfL3pdet 0x00000200
+#define pfL3pdetb 22
#define pfL1i 0x00000100
#define pfL1ib 23
#define pfL1d 0x00000080
DECLARE("pfThermalb", pfThermalb);
DECLARE("pfThermInt", pfThermInt);
DECLARE("pfThermIntb", pfThermIntb);
- DECLARE("pfLClck", pfLClck);
- DECLARE("pfLClckb", pfLClckb);
DECLARE("pfWillNap", pfWillNap);
DECLARE("pfWillNapb", pfWillNapb);
DECLARE("pfNoMSRir", pfNoMSRir);
DECLARE("pfNoMSRirb", pfNoMSRirb);
- DECLARE("pfL1nnc", pfL1nnc);
- DECLARE("pfL1nncb", pfL1nncb);
+ DECLARE("pfLClck", pfLClck);
+ DECLARE("pfLClckb", pfLClckb);
+ DECLARE("pfL3pdet", pfL3pdet);
+ DECLARE("pfL3pdetb", pfL3pdetb);
DECLARE("pfL1i", pfL1i);
DECLARE("pfL1ib", pfL1ib);
DECLARE("pfL1d", pfL1d);
stw r8,napStamp(r12) ; Set high order time stamp
stw r7,napStamp+4(r12) ; Set low order nap stamp
- bf pfL1nncb,minoflushl1 ; The L1 is coherent in nap/doze...
-;
-; 7450 does not keep L1 cache coherent across nap/sleep it must alwasy flush.
-; It does not have a L1 flush assist, so we do not test for it here.
-;
-; Note that the time stamp take above is not completely accurate for 7450
-; because we are about to flush the L1 cache and that takes a bit of time.
-;
- cror cr0_eq,pfL1ib,pfL1db ; Check for either I- or D-cache
- bf- cr0_eq,minoflushl1 ; No level 1 to flush...
- rlwinm. r0,r4,0,ice,dce ; Were either of the level 1s on?
- beq- minoflushl1 ; No, no need to flush...
-
-miswdl1: lwz r0,pfl1dSize(r12) ; Get the level 1 cache size
- rlwinm r2,r0,0,1,30 ; Double it
- add r0,r0,r2 ; Get 3 times cache size
- rlwinm r2,r5,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Turn off data translation
- rlwinm r0,r0,26,6,31 ; Get 3/2 number of cache lines
- lis r3,0xFFF0 ; Dead recon ROM address for now
- mtctr r0 ; Number of lines to flush
- mtmsr r2 ; Do it
- isync
-
-miswfldl1a: lwz r2,0(r3) ; Flush anything else
- addi r3,r3,32 ; Next line
- bdnz miswfldl1a ; Flush the lot...
-
-miinvdl1: sync ; Make sure all flushes have been committed
- mtmsr r5 ; Put back data translation
- isync
-
- mfspr r8,hid0 ; Get the HID0 bits
- li r7,lo16(icem|dcem) ; Get the cache enable bits
- andc r8,r8,r7 ; Clear cache enables
- mtspr hid0,r8 ; and turn off L1 cache
- sync ; Make sure all is done
-
- ori r8,r8,lo16(icfim|dcfim) ; Set the HID0 bits for invalidate
- sync
- isync
-
- mtspr hid0,r8 ; Start the invalidate
- sync
-
-minoflushl1:
-
;
; We have to open up interruptions here because book 4 says that we should
; turn on only the POW bit and that we should have interrupts enabled
bne+ ciinvdl3b ; Assume so...
sync
+ bf pfL3pdetb, ciinvdl3nopdet
mfspr r3,l3pdet ; ?
rlwimi r3,r3,28,0,23 ; ?
oris r3,r3,0xF000 ; ?
mtspr l3pdet,r3 ; ?
isync
+ciinvdl3nopdet:
mfspr r3,l3cr ; Get the L3CR
rlwinm r3,r3,0,l3clken+1,l3clken-1 ; Clear the clock enable bit
mtspr l3cr,r3 ; Disable the clock
; 7450
init7450: bf firstBoot,i7450nb ; Do different if not initial boot...
- oris r17,r17,hi16(pfAvJava) ; Show that we do Java mode in non-open source version
mfspr r13,l2cr ; Get the L2CR
rlwinm. r0,r13,0,l2e,l2e ; Any L2?
; Take care of level 3 cache
mfspr r13,l3cr ; Get the L3CR
- rlwinm. r0,r13,0,l3e,l3e ; Any L2?
+ rlwinm. r0,r13,0,l3e,l3e ; Any L3?
bne+ i7450hl3 ; Yes...
- rlwinm r17,r17,0,pfL3b+1,pfL3b-1 ; No L2, turn off feature
+ rlwinm r17,r17,0,pfL3b+1,pfL3b-1 ; No L3, turn off feature
i7450hl3: cmplwi cr0,r13,0 ; No L3 if L3CR is zero
beq- init7450none ; Go turn off the features...
.long 0xFFFFFF00 ; Just revisions 1.xx
.short PROCESSOR_VERSION_7450
.short 0x0100
- .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfNoMSRir | pfLClck | pfL1nnc | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa
+ .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfNoMSRir | pfLClck | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa | pfL3pdet
.long init7450
.long CPU_SUBTYPE_POWERPC_7450
.long 105
.long 0xFFFFFFFF ; Just revision 2.0
.short PROCESSOR_VERSION_7450
.short 0x0200
- .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfNoMSRir | pfLClck | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa
+ .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfNoMSRir | pfLClck | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa | pfL3pdet
.long init7450
.long CPU_SUBTYPE_POWERPC_7450
.long 105
.long 0xFFFF0000 ; All other revisions
.short PROCESSOR_VERSION_7450
.short 0
- .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfNoMSRir | pfWillNap | pfLClck | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa
+ .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfWillNap | pfNoMSRir | pfLClck | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa | pfL3pdet
.long init7450
.long CPU_SUBTYPE_POWERPC_7450
.long 105