simple_unlock(&mntvnode_slock);
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p);
if (error) {
- if (error == ENOENT)
+ if (error == ENOENT) {
+ /*
+ * If vnode is being reclaimed, yield so
+ * that it can be removed from our list.
+ */
+ if (UBCISVALID(vp))
+ (void) tsleep((caddr_t)&lbolt, PINOD, "hfs_sync", 0);
goto loop;
+ }
simple_lock(&mntvnode_slock);
continue;
}
MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address");
int ifqmaxlen = IFQ_MAXLEN;
-struct ifnethead ifnet; /* depend on static init XXX */
+struct ifnethead ifnet = TAILQ_HEAD_INITIALIZER(ifnet);
struct ifmultihead ifma_lostlist = LIST_HEAD_INITIALIZER(ifma_lostlist);
#if INET6
* parameters.
*/
-int if_index = 0;
+int if_index;
struct ifaddr **ifnet_addrs;
-struct ifnet **ifindex2ifnet = NULL;
+struct ifnet **ifindex2ifnet;
+#define INITIAL_IF_INDEXLIM 8
+
+/*
+ * Function: if_next_index
+ * Purpose:
+ * Return the next available interface index.
+ * Grow the ifnet_addrs[] and ifindex2ifnet[] arrays to accomodate the
+ * added entry when necessary.
+ *
+ * Note:
+ * ifnet_addrs[] is indexed by (if_index - 1), whereas
+ * ifindex2ifnet[] is indexed by ifp->if_index. That requires us to
+ * always allocate one extra element to hold ifindex2ifnet[0], which
+ * is unused.
+ */
+static int
+if_next_index(void)
+{
+ static int if_indexlim = 0;
+ static int if_list_growing = 0;
+ int new_index;
+
+ while (if_list_growing) {
+ /* wait until list is done growing */
+ (void)tsleep((caddr_t)&ifnet_addrs, PZERO, "if_next_index", 0);
+ }
+ new_index = ++if_index;
+ if (if_index > if_indexlim) {
+ unsigned n;
+ int new_if_indexlim;
+ caddr_t new_ifnet_addrs;
+ caddr_t new_ifindex2ifnet;
+ caddr_t old_ifnet_addrs;
+
+ /* mark list as growing */
+ if_list_growing = 1;
+
+ old_ifnet_addrs = (caddr_t)ifnet_addrs;
+ if (ifnet_addrs == NULL) {
+ new_if_indexlim = INITIAL_IF_INDEXLIM;
+ } else {
+ new_if_indexlim = if_indexlim << 1;
+ }
+
+ /* allocate space for the larger arrays */
+ n = (2 * new_if_indexlim + 1) * sizeof(caddr_t);
+ new_ifnet_addrs = _MALLOC(n, M_IFADDR, M_WAITOK);
+ new_ifindex2ifnet = new_ifnet_addrs
+ + new_if_indexlim * sizeof(caddr_t);
+ bzero(new_ifnet_addrs, n);
+ if (ifnet_addrs != NULL) {
+ /* copy the existing data */
+ bcopy((caddr_t)ifnet_addrs, new_ifnet_addrs,
+ if_indexlim * sizeof(caddr_t));
+ bcopy((caddr_t)ifindex2ifnet,
+ new_ifindex2ifnet,
+ (if_indexlim + 1) * sizeof(caddr_t));
+ }
+
+ /* switch to the new tables and size */
+ ifnet_addrs = (struct ifaddr **)new_ifnet_addrs;
+ ifindex2ifnet = (struct ifnet **)new_ifindex2ifnet;
+ if_indexlim = new_if_indexlim;
+
+ /* release the old data */
+ if (old_ifnet_addrs != NULL) {
+ _FREE((caddr_t)old_ifnet_addrs, M_IFADDR);
+ }
+
+ /* wake up others that might be blocked */
+ if_list_growing = 0;
+ wakeup((caddr_t)&ifnet_addrs);
+ }
+ return (new_index);
+
+}
/*
* Attach an interface to the
char workbuf[64];
register struct sockaddr_dl *sdl;
register struct ifaddr *ifa;
- static int if_indexlim = 8;
- static int inited;
if (ifp->if_snd.ifq_maxlen == 0)
ifp->if_snd.ifq_maxlen = ifqmaxlen;
- if (!inited) {
- TAILQ_INIT(&ifnet);
- inited = 1;
- }
-
- TAILQ_INSERT_TAIL(&ifnet, ifp, if_link);
- /* if the interface is recycled, keep the index */
- if (!((ifp->if_eflags & IFEF_REUSE) && ifp->if_index))
- ifp->if_index = ++if_index;
/*
* XXX -
* The old code would work if the interface passed a pre-existing
TAILQ_INIT(&ifp->if_prefixhead);
LIST_INIT(&ifp->if_multiaddrs);
getmicrotime(&ifp->if_lastchange);
- if (ifnet_addrs == 0 || if_index >= if_indexlim) {
- unsigned n = (if_indexlim <<= 1) * sizeof(ifa);
- struct ifaddr **q = (struct ifaddr **)
- _MALLOC(n, M_IFADDR, M_WAITOK);
- bzero((caddr_t)q, n);
- if (ifnet_addrs) {
- bcopy((caddr_t)ifnet_addrs, (caddr_t)q, n/2);
- FREE((caddr_t)ifnet_addrs, M_IFADDR);
- }
- ifnet_addrs = (struct ifaddr **)q;
-
- /* grow ifindex2ifnet */
- n = if_indexlim * sizeof(struct ifaddr *);
- q = (struct ifaddr **)_MALLOC(n, M_IFADDR, M_WAITOK);
- bzero(q, n);
- if (ifindex2ifnet) {
- bcopy((caddr_t)ifindex2ifnet, q, n/2);
- _FREE((caddr_t)ifindex2ifnet, M_IFADDR);
- }
- ifindex2ifnet = (struct ifnet **)q;
- }
- ifindex2ifnet[if_index] = ifp;
+ if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_index == 0) {
+ /* allocate a new entry */
+ ifp->if_index = if_next_index();
+ ifindex2ifnet[ifp->if_index] = ifp;
- /*
- * create a Link Level name for this device
- */
- namelen = snprintf(workbuf, sizeof(workbuf),
- "%s%d", ifp->if_name, ifp->if_unit);
+ /*
+ * create a Link Level name for this device
+ */
+ namelen = snprintf(workbuf, sizeof(workbuf),
+ "%s%d", ifp->if_name, ifp->if_unit);
#define _offsetof(t, m) ((int)((caddr_t)&((t *)0)->m))
- masklen = _offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
- socksize = masklen + ifp->if_addrlen;
+ masklen = _offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
+ socksize = masklen + ifp->if_addrlen;
#define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1)))
- if (socksize < sizeof(*sdl))
- socksize = sizeof(*sdl);
- socksize = ROUNDUP(socksize);
- ifasize = sizeof(*ifa) + 2 * socksize;
- ifa = (struct ifaddr *) _MALLOC(ifasize, M_IFADDR, M_WAITOK);
- if (ifa) {
- bzero((caddr_t)ifa, ifasize);
- sdl = (struct sockaddr_dl *)(ifa + 1);
- sdl->sdl_len = socksize;
- sdl->sdl_family = AF_LINK;
- bcopy(workbuf, sdl->sdl_data, namelen);
- sdl->sdl_nlen = namelen;
- sdl->sdl_index = ifp->if_index;
- sdl->sdl_type = ifp->if_type;
- ifnet_addrs[if_index - 1] = ifa;
- ifa->ifa_ifp = ifp;
- ifa->ifa_rtrequest = link_rtrequest;
- ifa->ifa_addr = (struct sockaddr *)sdl;
- sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl);
- ifa->ifa_netmask = (struct sockaddr *)sdl;
- sdl->sdl_len = masklen;
- while (namelen != 0)
- sdl->sdl_data[--namelen] = 0xff;
+ if (socksize < sizeof(*sdl))
+ socksize = sizeof(*sdl);
+ socksize = ROUNDUP(socksize);
+ ifasize = sizeof(*ifa) + 2 * socksize;
+ ifa = (struct ifaddr *) _MALLOC(ifasize, M_IFADDR, M_WAITOK);
+ if (ifa) {
+ bzero((caddr_t)ifa, ifasize);
+ sdl = (struct sockaddr_dl *)(ifa + 1);
+ sdl->sdl_len = socksize;
+ sdl->sdl_family = AF_LINK;
+ bcopy(workbuf, sdl->sdl_data, namelen);
+ sdl->sdl_nlen = namelen;
+ sdl->sdl_index = ifp->if_index;
+ sdl->sdl_type = ifp->if_type;
+ ifnet_addrs[ifp->if_index - 1] = ifa;
+ ifa->ifa_ifp = ifp;
+ ifa->ifa_rtrequest = link_rtrequest;
+ ifa->ifa_addr = (struct sockaddr *)sdl;
+ sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl);
+ ifa->ifa_netmask = (struct sockaddr *)sdl;
+ sdl->sdl_len = masklen;
+ while (namelen != 0)
+ sdl->sdl_data[--namelen] = 0xff;
+ }
+ } else {
+ ifa = ifnet_addrs[ifp->if_index - 1];
+ }
+ if (ifa != NULL) {
TAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link);
}
+ TAILQ_INSERT_TAIL(&ifnet, ifp, if_link);
}
__private_extern__ int
struct inpcbinfo *ipi = inp->inp_pcbinfo;
struct rtentry *rt = inp->inp_route.ro_rt;
+
+ if (so->so_pcb == 0) /* we've been called twice, ignore */
+ return;
+
#if IPSEC
ipsec4_delete_pcbpolicy(inp);
#endif /*IPSEC*/
int errno;
{
if (inp->inp_route.ro_rt) {
+ if (ifa_foraddr(inp->inp_laddr.s_addr) == NULL)
+ return; /* we can't remove the route now. not sure if still ok to use src */
rtfree(inp->inp_route.ro_rt);
inp->inp_route.ro_rt = 0;
/*
* cache with IPv6.
*/
+ if (ro->ro_rt && (ro->ro_rt->generation_id != route_generation) &&
+ ((flags & (IP_ROUTETOIF | IP_FORWARDING)) == 0) && (ip->ip_src.s_addr != INADDR_ANY) &&
+ (ifa_foraddr(ip->ip_src.s_addr) == NULL)) {
+ error = EADDRNOTAVAIL;
+ goto bad;
+ }
if (ro->ro_rt && ((ro->ro_rt->rt_flags & RTF_UP) == 0 ||
dst->sin_family != AF_INET ||
- dst->sin_addr.s_addr != ip->ip_dst.s_addr ||
- ro->ro_rt->generation_id != route_generation) ) {
+ dst->sin_addr.s_addr != ip->ip_dst.s_addr)) {
rtfree(ro->ro_rt);
ro->ro_rt = (struct rtentry *)0;
}
* come back before the TCP connection times out).
*/
- if (tp->t_inpcb->inp_route.ro_rt != NULL &&
- (tp->t_inpcb->inp_route.ro_rt->generation_id != route_generation)) {
+ if ((tp->t_inpcb->inp_route.ro_rt != NULL &&
+ (tp->t_inpcb->inp_route.ro_rt->generation_id != route_generation)) || (tp->t_inpcb->inp_route.ro_rt == NULL)) {
/* check that the source address is still valid */
if (ifa_foraddr(tp->t_inpcb->inp_laddr.s_addr) == NULL) {
if (tp->t_state >= TCPS_CLOSE_WAIT) {
else
return(0); /* silently ignore and keep data in socket */
}
- else { /* Clear the cached route, will be reacquired later */
- rtfree(tp->t_inpcb->inp_route.ro_rt);
- tp->t_inpcb->inp_route.ro_rt = (struct rtentry *)0;
- }
}
}
sendalot = 0;
register struct rtentry *rt;
int dosavessthresh;
+ if ( inp->inp_ppcb == NULL) /* tcp_close was called previously, bail */
+ return;
+
#ifndef __APPLE__
/*
* Make sure that all of our timers are stopped before we
}
}
#endif
+
KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_START, tp,0,0,0,0);
switch (tp->t_state)
#endif
ipnxt = ip->inp_list.le_next;
tp = intotcpcb(ip);
+ if (tp == NULL) { /* tp already closed, remove from list */
+ LIST_REMOVE(ip, inp_list);
+ continue;
+ }
if (tp->t_timer[TCPT_2MSL] >= N_TIME_WAIT_SLOTS) {
tp->t_timer[TCPT_2MSL] -= N_TIME_WAIT_SLOTS;
tp->t_rcvtime += N_TIME_WAIT_SLOTS;
#endif
}
+extern size_t nd_ifinfo_indexlim;
+extern int ip6_use_tempaddr;
void
in6_tmpaddrtimer(ignored_arg)
void *ignored_arg;
(ip6_temp_preferred_lifetime - ip6_desync_factor -
ip6_temp_regen_advance) * hz);
- bzero(nullbuf, sizeof(nullbuf));
- for (i = 1; i < if_index + 1; i++) {
- ndi = &nd_ifinfo[i];
- if (bcmp(ndi->randomid, nullbuf, sizeof(nullbuf)) != 0) {
- /*
- * We've been generating a random ID on this interface.
- * Create a new one.
- */
- (void)generate_tmp_ifid(ndi->randomseed0,
- ndi->randomseed1,
- ndi->randomid);
+ if (ip6_use_tempaddr) {
+
+ bzero(nullbuf, sizeof(nullbuf));
+ for (i = 1; i < nd_ifinfo_indexlim + 1; i++) {
+ ndi = &nd_ifinfo[i];
+ if (ndi->flags != ND6_IFF_PERFORMNUD)
+ continue;
+ if (bcmp(ndi->randomid, nullbuf, sizeof(nullbuf)) != 0) {
+ /*
+ * We've been generating a random ID on this interface.
+ * Create a new one.
+ */
+ (void)generate_tmp_ifid(ndi->randomseed0,
+ ndi->randomseed1,
+ ndi->randomid);
+ }
}
}
-
splx(s);
}
static int nd6_inuse, nd6_allocated;
struct llinfo_nd6 llinfo_nd6 = {&llinfo_nd6, &llinfo_nd6};
-static size_t nd_ifinfo_indexlim = 8;
+size_t nd_ifinfo_indexlim = 8;
struct nd_ifinfo *nd_ifinfo = NULL;
struct nd_drhead nd_defrouter;
struct nd_prhead nd_prefix = { 0 };
* Then force a getattr rpc to ensure that you have up to date
* attributes.
* NB: This implies that cache data can be read when up to
- * NFS_MAXATTRTIMEO seconds out of date. If you find that you need current
- * attributes this could be forced by setting n_attrstamp to 0 before
- * the VOP_GETATTR() call.
+ * NFS_MAXATTRTIMEO seconds out of date. If you find that you need
+ * current attributes this could be forced by setting n_xid to 0
+ * before the VOP_GETATTR() call.
*/
if ((nmp->nm_flag & NFSMNT_NQNFS) == 0) {
if (np->n_flag & NMODIFIED) {
return (error);
}
}
- np->n_attrstamp = 0;
+ np->n_xid = 0;
error = VOP_GETATTR(vp, &vattr, cred, p);
if (error) {
FSDBG_BOT(514, vp, 0xd1e0004, 0, error);
(void)nfs_fsinfo(nmp, vp, cred, p);
if (ioflag & (IO_APPEND | IO_SYNC)) {
if (np->n_flag & NMODIFIED) {
- np->n_attrstamp = 0;
+ np->n_xid = 0;
error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
if (error) {
FSDBG_BOT(515, vp, uio->uio_offset, 0x10bad01, error);
}
}
if (ioflag & IO_APPEND) {
- np->n_attrstamp = 0;
+ np->n_xid = 0;
error = VOP_GETATTR(vp, &vattr, cred, p);
if (error) {
FSDBG_BOT(515, vp, uio->uio_offset, 0x10bad02, error);
/* need to flush, and refetch attributes to make */
/* sure we have the correct end of file offset */
if (np->n_flag & NMODIFIED) {
- np->n_attrstamp = 0;
+ np->n_xid = 0;
error = nfs_vinvalbuf(vp, V_SAVE, p->p_ucred, p, 1);
if (error) {
vrele(wvp);
return (error);
}
}
- np->n_attrstamp = 0;
+ np->n_xid = 0;
error = VOP_GETATTR(vp, &vattr, p->p_ucred, p);
if (error) {
vrele(wvp);
* to indicate the attributes were dropped - only getattr
* cares - it needs to retry the rpc.
*/
- np->n_attrstamp = 0;
+ np->n_xid = 0;
FSDBG_BOT(527, 0, np, np->n_xid, *xidp);
*xidp = 0;
return (0);
if (!UBCINFOEXISTS(vp) ||
dontshrink && np->n_size < ubc_getsize(vp)) {
vap->va_size = np->n_size = orig_size;
- np->n_attrstamp = 0;
+ np->n_xid = 0;
} else {
ubc_setsize(vp, (off_t)np->n_size); /* XXX */
}
struct timeval now, nowup;
int32_t timeo;
+ if (np->n_xid == 0) {
+ FSDBG(528, vp, 0, 0, 0);
+ nfsstats.attrcache_misses++;
+ return (ENOENT);
+ }
+
/* Set attribute timeout based on how recently the file has been modified. */
if ((np)->n_flag & NMODIFIED)
timeo = NFS_MINATTRTIMO;
if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
ap->a_p, 1)) == EINTR)
return (error);
- np->n_attrstamp = 0;
+ np->n_xid = 0;
if (vp->v_type == VDIR)
np->n_direofoffset = 0;
error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p);
}
}
if ((nmp->nm_flag & NFSMNT_NQNFS) == 0)
- np->n_attrstamp = 0; /* For Open/Close consistency */
+ np->n_xid = 0; /* For Open/Close consistency */
return (0);
}
} else {
error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 1);
}
- np->n_attrstamp = 0;
+ np->n_xid = 0;
if (getlock)
VOP_UNLOCK(vp, 0, ap->a_p);
}
if (v3) {
nfsm_wcc_data(vp, wccflag, &xid);
if (!wccflag)
- VTONFS(vp)->n_attrstamp = 0;
+ VTONFS(vp)->n_xid = 0;
} else
nfsm_loadattr(vp, (struct vattr *)0, &xid);
nfsm_reqdone;
}
VTONFS(dvp)->n_flag |= NMODIFIED;
if (!wccflag)
- VTONFS(dvp)->n_attrstamp = 0;
+ VTONFS(dvp)->n_xid = 0;
vput(dvp);
NFS_FREE_PNBUF(cnp);
return (error);
}
VTONFS(dvp)->n_flag |= NMODIFIED;
if (!wccflag)
- VTONFS(dvp)->n_attrstamp = 0;
+ VTONFS(dvp)->n_xid = 0;
vput(dvp);
NFS_FREE_PNBUF(cnp);
return (error);
} else if (!np->n_sillyrename) {
error = nfs_sillyrename(dvp, vp, cnp);
}
- np->n_attrstamp = 0;
+ np->n_xid = 0;
vput(dvp);
VOP_UNLOCK(vp, 0, cnp->cn_proc);
nfsm_reqdone;
VTONFS(dvp)->n_flag |= NMODIFIED;
if (!wccflag)
- VTONFS(dvp)->n_attrstamp = 0;
+ VTONFS(dvp)->n_xid = 0;
return (error);
}
nfsm_reqdone;
VTONFS(fdvp)->n_flag |= NMODIFIED;
if (!fwccflag)
- VTONFS(fdvp)->n_attrstamp = 0;
+ VTONFS(fdvp)->n_xid = 0;
VTONFS(tdvp)->n_flag |= NMODIFIED;
if (!twccflag)
- VTONFS(tdvp)->n_attrstamp = 0;
+ VTONFS(tdvp)->n_xid = 0;
return (error);
}
VTONFS(tdvp)->n_flag |= NMODIFIED;
if (!attrflag)
- VTONFS(vp)->n_attrstamp = 0;
+ VTONFS(vp)->n_xid = 0;
if (!wccflag)
- VTONFS(tdvp)->n_attrstamp = 0;
+ VTONFS(tdvp)->n_xid = 0;
if (didhold)
ubc_rele(vp);
vput(tdvp);
VTONFS(dvp)->n_flag |= NMODIFIED;
if (!wccflag)
- VTONFS(dvp)->n_attrstamp = 0;
+ VTONFS(dvp)->n_xid = 0;
vput(dvp);
NFS_FREE_PNBUF(cnp);
/*
nfsm_reqdone;
VTONFS(dvp)->n_flag |= NMODIFIED;
if (!wccflag)
- VTONFS(dvp)->n_attrstamp = 0;
+ VTONFS(dvp)->n_xid = 0;
/*
* Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
* if we can succeed in looking up the directory.
nfsm_reqdone;
VTONFS(dvp)->n_flag |= NMODIFIED;
if (!wccflag)
- VTONFS(dvp)->n_attrstamp = 0;
+ VTONFS(dvp)->n_xid = 0;
cache_purge(dvp);
cache_purge(vp);
vput(vp);
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleGetInfoString</key>
- <string>System Resource Pseudoextension, Apple Computer Inc, 7.0</string>
+ <string>System Resource Pseudoextension, Apple Computer Inc, 7.2</string>
<key>CFBundleIdentifier</key>
<string>com.apple.kernel</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>7.0</string>
+ <string>7.2</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>7.0</string>
+ <string>7.2</string>
<key>OSBundleCompatibleVersion</key>
- <string>7.0</string>
+ <string>7.2</string>
<key>OSBundleRequired</key>
<string>Root</string>
<key>OSKernelResource</key>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleGetInfoString</key>
- <string>AppleNMI Pseudoextension, Apple Computer Inc, 7.0</string>
+ <string>AppleNMI Pseudoextension, Apple Computer Inc, 7.2</string>
<key>CFBundleIdentifier</key>
<string>com.apple.driver.AppleNMI</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>7.0</string>
+ <string>7.2</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>7.0</string>
+ <string>7.2</string>
<key>OSBundleRequired</key>
<string>Root</string>
<key>OSKernelResource</key>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleGetInfoString</key>
- <string>Apple Platform Family Pseudoextension, Apple Computer Inc, 7.0</string>
+ <string>Apple Platform Family Pseudoextension, Apple Computer Inc, 7.2</string>
<key>CFBundleIdentifier</key>
<string>com.apple.iokit.ApplePlatformFamily</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>7.0</string>
+ <string>7.2</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>7.0</string>
+ <string>7.2</string>
<key>OSBundleCompatibleVersion</key>
<string>1.0</string>
<key>OSBundleRequired</key>
<key>CFBundleExecutable</key>
<string>BSDKernel</string>
<key>CFBundleGetInfoString</key>
- <string>BSD Kernel Pseudoextension, Apple Computer Inc, 7.0</string>
+ <string>BSD Kernel Pseudoextension, Apple Computer Inc, 7.2</string>
<key>CFBundleIdentifier</key>
<string>com.apple.kpi.bsd</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>7.0</string>
+ <string>7.2</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>7.0</string>
+ <string>7.2</string>
<key>OSBundleCompatibleVersion</key>
- <string>7.0</string>
+ <string>7.2</string>
<key>OSBundleRequired</key>
<string>Root</string>
<key>OSKernelResource</key>
<key>CFBundleExecutable</key>
<string>IOKit</string>
<key>CFBundleGetInfoString</key>
- <string>I/O Kit Pseudoextension, Apple Computer Inc, 7.0</string>
+ <string>I/O Kit Pseudoextension, Apple Computer Inc, 7.2</string>
<key>CFBundleIdentifier</key>
<string>com.apple.kpi.iokit</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>7.0</string>
+ <string>7.2</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>7.0</string>
+ <string>7.2</string>
<key>OSBundleCompatibleVersion</key>
- <string>7.0</string>
+ <string>7.2</string>
<key>OSBundleRequired</key>
<string>Root</string>
<key>OSKernelResource</key>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleGetInfoString</key>
- <string>AppleNMI Pseudoextension, Apple Computer Inc, 7.0</string>
+ <string>AppleNMI Pseudoextension, Apple Computer Inc, 7.2</string>
<key>CFBundleIdentifier</key>
<string>com.apple.iokit.IONVRAMFamily</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>7.0</string>
+ <string>7.2</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>7.0</string>
+ <string>7.2</string>
<key>OSBundleCompatibleVersion</key>
<string>1.1</string>
<key>OSBundleRequired</key>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleGetInfoString</key>
- <string>System Management Pseudoextension, Apple Computer Inc, 7.0</string>
+ <string>System Management Pseudoextension, Apple Computer Inc, 7.2</string>
<key>CFBundleIdentifier</key>
<string>com.apple.iokit.IOSystemManagementFamily</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>7.0</string>
+ <string>7.2</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>7.0</string>
+ <string>7.2</string>
<key>OSBundleCompatibleVersion</key>
<string>1.0.0b1</string>
<key>OSBundleRequired</key>
<key>CFBundleExecutable</key>
<string>Libkern</string>
<key>CFBundleGetInfoString</key>
- <string>Libkern Pseudoextension, Apple Computer Inc, 7.0</string>
+ <string>Libkern Pseudoextension, Apple Computer Inc, 7.2</string>
<key>CFBundleIdentifier</key>
<string>com.apple.kpi.libkern</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>7.0</string>
+ <string>7.2</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>7.0</string>
+ <string>7.2</string>
<key>OSBundleCompatibleVersion</key>
- <string>7.0</string>
+ <string>7.2</string>
<key>OSBundleRequired</key>
<string>Root</string>
<key>OSKernelResource</key>
<key>CFBundleExecutable</key>
<string>Mach</string>
<key>CFBundleGetInfoString</key>
- <string>Mach Kernel Pseudoextension, Apple Computer Inc, 7.0</string>
+ <string>Mach Kernel Pseudoextension, Apple Computer Inc, 7.2</string>
<key>CFBundleIdentifier</key>
<string>com.apple.kpi.mach</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>7.0</string>
+ <string>7.2</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>7.0</string>
+ <string>7.2</string>
<key>OSBundleCompatibleVersion</key>
- <string>7.0</string>
+ <string>7.2</string>
<key>OSBundleRequired</key>
<string>Root</string>
<key>OSKernelResource</key>
*/
const char * gIOKernelKmods =
"{"
-" 'com.apple.kernel' = '7.0';"
-" 'com.apple.kpi.bsd' = '7.0';"
-" 'com.apple.kpi.iokit' = '7.0';"
-" 'com.apple.kpi.libkern' = '7.0';"
-" 'com.apple.kpi.mach' = '7.0';"
-" 'com.apple.iokit.IONVRAMFamily' = '7.0';"
-" 'com.apple.driver.AppleNMI' = '7.0';"
-" 'com.apple.iokit.IOSystemManagementFamily' = '7.0';"
-" 'com.apple.iokit.ApplePlatformFamily' = '7.0';"
+" 'com.apple.kernel' = '7.2';"
+" 'com.apple.kpi.bsd' = '7.2';"
+" 'com.apple.kpi.iokit' = '7.2';"
+" 'com.apple.kpi.libkern' = '7.2';"
+" 'com.apple.kpi.mach' = '7.2';"
+" 'com.apple.iokit.IONVRAMFamily' = '7.2';"
+" 'com.apple.driver.AppleNMI' = '7.2';"
+" 'com.apple.iokit.IOSystemManagementFamily' = '7.2';"
+" 'com.apple.iokit.ApplePlatformFamily' = '7.2';"
" 'com.apple.kernel.6.0' = '6.9.9';"
" 'com.apple.kernel.bsd' = '6.9.9';"
" 'com.apple.kernel.iokit' = '6.9.9';"
mfmsr r5 /* Since we are passing control, get our MSR values */
lwz r11,SAVprev+4(r3) /* Get the previous savearea */
lwz r1,saver1+4(r3) /* Load new stack pointer */
+ lwz r10,ACT_MACT_SPF(r9) /* Get the special flags */
stw r0,saver3+4(r3) /* Make sure we pass in a 0 for the continuation */
stw r0,FM_BACKPTR(r1) /* zero backptr */
stw r5,savesrr1+4(r3) /* Pass our MSR to the new guy */
stw r11,ACT_MACT_PCB(r9) /* Unstack our savearea */
+ oris r10,r10,hi16(OnProc) /* Set OnProc bit */
stw r0,ACT_PREEMPT_CNT(r9) /* Enable preemption */
+ stw r10,ACT_MACT_SPF(r9) /* Update the special flags */
+ stw r10,spcFlags(r6) /* Set per_proc copy of the special flags */
b EXT(exception_exit) /* Go for it */
/* thread_t Switch_context(thread_t old,
DECLARE("FamVMmodebit", FamVMmodebit);
DECLARE("perfMonitor", perfMonitor);
DECLARE("perfMonitorbit", perfMonitorbit);
+ DECLARE("OnProc", OnProc);
+ DECLARE("OnProcbit", OnProcbit);
/* Per Proc info structure */
DECLARE("PP_CPU_NUMBER", offsetof(struct per_proc_info *, cpu_number));
lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom)
stw r8,SAVprev(r22) ; Link the old in (top)
stw r9,SAVprev+4(r22) ; Link the old in (bottom)
- xor r3,r24,r3 ; Convert to physical
+ xor r3,r22,r3 ; Convert to physical
stw r2,quickfret(r31) ; Set the first in quickfret list (top)
stw r3,quickfret+4(r31) ; Set the first in quickfret list (bottom)
#if !MACH_LDEBUG
mfsprg r6,1 ; load the current thread
lwz r5,0(r3) ; Get the lock quickly
+ li r4,0
+ li r8,0
mr. r5,r5 ; Quick check
- bne-- L_mutex_lock_slow ; Can not get it right now...
+ bne-- mlckspin1 ; Can not get it right now...
-L_mutex_lock_loop:
+mlcktry:
lwarx r5,0,r3 ; load the mutex lock
mr. r5,r5
- bne-- L_mutex_lock_slowX ; go to the slow path
+ bne-- mlckspin0 ; Can not get it right now...
stwcx. r6,0,r3 ; grab the lock
- bne-- L_mutex_lock_loop ; loop back if failed
+ bne-- mlcktry ; loop back if failed
isync ; stop prefeteching
+ mflr r8
+ stw r8,4(r3)
blr
-L_mutex_lock_slowX:
+mlckspin0:
li r5,lgKillResv ; Killing field
stwcx. r5,0,r5 ; Kill reservation
+mlckspin1:
+ mr. r4,r4 ; Test timeout value
+ bne++ mlckspin2
+ lis r4,hi16(EXT(MutexSpin)) ; Get the high part
+ ori r4,r4,lo16(EXT(MutexSpin) ) ; And the low part
+ lwz r4,0(r4) ; Get spin timerout value
+ mr. r4,r4 ; Test spin timeout value
+ beq mlckslow1 ; Is spin timeout set to zero
+
+mlckspin2: mr. r8,r8 ; Is r8 set to zero
+ bne++ mlckspin3 ; If yes, first spin attempt
+ lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
+ mfmsr r9 ; Get the MSR value
+ ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
+ ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
+ andc r9,r9,r0 ; Clear FP and VEC
+ andc r7,r9,r7 ; Clear EE as well
+ mtmsr r7 ; Turn off interruptions
+ isync ; May have turned off vec and fp here
+ mftb r8 ; Get timestamp on entry
+ b mlcksniff
+
+mlckspin3: mtmsr r7 ; Turn off interruptions
+ mftb r8 ; Get timestamp on entry
+
+mlcksniff: lwz r5,0(r3) ; Get that lock in here
+ mr. r5,r5 ; Is the lock held
+ beq++ mlckretry ; No, try for it again...
+ rlwinm r5,r5,0,0,29 ; Extract the lock owner
+ mr. r5,r5 ; Quick check
+ beq++ mlckslow0 ; InterLock is held
+ lwz r10,ACT_MACT_SPF(r5) ; Get the special flags
+ rlwinm. r10,r10,0,OnProcbit,OnProcbit ; Is OnProcbit set?
+ beq mlckslow0 ; Lock owner isn't running
+
+ mftb r10 ; Time stamp us now
+ sub r10,r10,r8 ; Get the elapsed time
+ cmplwi r10,128 ; Have we been spinning for 128 tb ticks?
+ blt++ mlcksniff ; Not yet...
+
+ mtmsr r9 ; Say, any interrupts pending?
+
+; The following instructions force the pipeline to be interlocked to that only one
+; instruction is issued per cycle. The insures that we stay enabled for a long enough
+; time; if it's too short, pending interruptions will not have a chance to be taken
+
+ subi r4,r4,128 ; Back off elapsed time from timeout value
+ or r4,r4,r4 ; Do nothing here but force a single cycle delay
+ mr. r4,r4 ; See if we used the whole timeout
+ or r4,r4,r4 ; Do nothing here but force a single cycle delay
+
+ ble-- mlckslow1 ; We failed
+ b mlckspin1 ; Now that we've opened an enable window, keep trying...
+mlckretry:
+ mtmsr r9 ; Restore interrupt state
+ li r8,1 ; Show already through once
+ b mlcktry
+mlckslow0: ; We couldn't get the lock
+ mtmsr r9 ; Restore interrupt state
-L_mutex_lock_slow:
+mlckslow1:
#endif
#if CHECKNMI
mflr r12 ; (TEST/DEBUG)
hpmGotOne: lwz r20,mpFlags(r3) ; Get the flags
andi. r9,r20,lo16(mpSpecial|mpNest|mpPerm|mpBlock) ; Are we allowed to remove it?
+ rlwinm r21,r20,8,24,31 ; Extract the busy count
+ cmplwi cr2,r21,0 ; Is it busy?
+ crand cr0_eq,cr2_eq,cr0_eq ; not busy and can be removed?
beq++ hrmGotX ; Found, branch to remove the mapping...
b hpmCNext ; Nope...
#include <kern/processor.h>
unsigned int max_cpus_initialized = 0;
+unsigned int LockTimeOut = 12500000;
+unsigned int MutexSpin = 0;
extern int forcenap;
#define MAX_CPUS_SET 0x1
return -1;
}
+void
+ml_init_lock_timeout(void)
+{
+ uint64_t abstime;
+ uint32_t mtxspin;
+
+ nanoseconds_to_absolutetime(NSEC_PER_SEC>>2, &abstime);
+ LockTimeOut = (unsigned int)abstime;
+
+ if (PE_parse_boot_arg("mtxspin", &mtxspin)) {
+ if (mtxspin > USEC_PER_SEC>>4)
+ mtxspin = USEC_PER_SEC>>4;
+ nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime);
+ } else {
+ nanoseconds_to_absolutetime(20*NSEC_PER_USEC, &abstime);
+ }
+ MutexSpin = (unsigned int)abstime;
+}
+
void
init_ast_check(processor_t processor)
{}
unsigned int ml_throttle(
unsigned int);
+void ml_init_lock_timeout(void);
+
#endif /* MACH_KERNEL_PRIVATE */
void ml_thread_policy(
; So, make sure everything we need there is already set up...
;
- li r10,hi16(dozem|napm|sleepm) ; Mask of power management bits
+ lis r10,hi16(dozem|napm|sleepm) ; Mask of power management bits
bf-- pf64Bitb,mipNSF1 ; skip if 32-bit...
old_act->mact.specFlags &= ~(userProtKey|FamVMmode);
old_act->mact.specFlags |= (ppinfo->spcFlags) & (userProtKey|FamVMmode);
}
+ old_act->mact.specFlags &= ~OnProc;
+ new_act->mact.specFlags |= OnProc;
/*
* We do not have to worry about the PMAP module, so switch.
old->top_act->mact.specFlags &= ~(userProtKey|FamVMmode);
old->top_act->mact.specFlags |= (ppinfo->spcFlags) & (userProtKey|FamVMmode);
}
+ old->top_act->mact.specFlags &= ~OnProc;
+ new->top_act->mact.specFlags |= OnProc;
KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF) | DBG_FUNC_NONE,
old->reason, (int)new, old->sched_pri, new->sched_pri, 0);
#include <machine/mach_param.h> /* HZ */
#include <machine/commpage.h>
+#include <machine/machine_routines.h>
#include <ppc/proc_reg.h>
#include <pexpert/pexpert.h>
nanoseconds_to_absolutetime(NSEC_PER_HZ, &abstime);
rtclock_tick_interval = abstime;
+
+ ml_init_lock_timeout();
}
else {
UNLOCK_RTC(s);
#define FamVMenabit 11
#define FamVMmodebit 12
#define perfMonitorbit 13
+#define OnProcbit 14
/* NOTE: Do not move or assign bit 31 without changing exception vector ultra fast path code */
#define bbThreadbit 28
#define bbNoMachSCbit 29
#define FamVMena 0x00100000 /* (1<<(31-FamVMenabit)) */
#define FamVMmode 0x00080000 /* (1<<(31-FamVMmodebit)) */
#define perfMonitor 0x00040000 /* (1<<(31-perfMonitorbit)) */
+#define OnProc 0x00020000 /* (1<<(31-OnProcbit)) */
#define bbThread 0x00000008 /* (1<<(31-bbThreadbit)) */
#define bbNoMachSC 0x00000004 /* (1<<(31-bbNoMachSCbit)) */
vm_pageout_throttle(
register vm_page_t m)
{
+ register vm_object_t object;
+
+ /*
+ * need to keep track of the object we
+ * started with... if we drop the object lock
+ * due to the throttle, it's possible that someone
+ * else will gather this page into an I/O if this
+ * is an external object... the page will then be
+ * potentially freed before we unwedge from the
+ * throttle... this is ok since no one plays with
+ * the page directly after the throttle... the object
+ * and offset are passed into the memory_object_data_return
+ * function where eventually it's relooked up against the
+ * object... if it's changed state or there is no longer
+ * a page at that offset, the pageout just finishes without
+ * issuing an I/O
+ */
+ object = m->object;
+
assert(!m->laundry);
m->laundry = TRUE;
- while (vm_page_laundry_count >= vm_page_laundry_max) {
+ if (!object->internal)
+ vm_page_burst_count++;
+ vm_page_laundry_count++;
+
+ while (vm_page_laundry_count > vm_page_laundry_max) {
/*
* Set the threshold for when vm_page_free()
* should wake us up.
assert_wait((event_t) &vm_page_laundry_count, THREAD_UNINT);
vm_page_unlock_queues();
- vm_object_unlock(m->object);
+ vm_object_unlock(object);
/*
* Pause to let the default pager catch up.
*/
thread_block((void (*)(void)) 0);
- vm_object_lock(m->object);
+ vm_object_lock(object);
vm_page_lock_queues();
}
- if (!m->object->internal)
- vm_page_burst_count++;
- vm_page_laundry_count++;
}
/*
#include <pexpert/ppc/powermac.h>
#include <pexpert/device_tree.h>
-/* External declarations */
-
-unsigned int LockTimeOut = 12500000;
-
/* pe_identify_machine:
*
* Sets up platform parameters.