return (0);
p = current_proc();
+ retry:
if (fp->ff_unallocblocks) {
lockExtBtree = 1;
if (fp->ff_unallocblocks) {
SInt64 reqbytes, actbytes;
+ //
+ // Make sure we have a transaction. It's possible
+ // that we came in and fp->ff_unallocblocks was zero
+ // but during the time we blocked acquiring the extents
+ // btree, ff_unallocblocks became non-zero and so we
+ // will need to start a transaction.
+ //
+ if (hfsmp->jnl && started_tr == 0) {
+ if (lockExtBtree) {
+ (void) hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_RELEASE, p);
+ lockExtBtree = 0;
+ }
+
+ goto retry;
+ }
+
reqbytes = (SInt64)fp->ff_unallocblocks *
(SInt64)HFSTOVCB(hfsmp)->blockSize;
/*
extern struct sysctl_oid sysctl__net_inet_tcp_mssdflt;
extern struct sysctl_oid sysctl__net_inet_tcp_recvspace;
extern struct sysctl_oid sysctl__net_inet_tcp_sendspace;
+extern struct sysctl_oid sysctl__net_inet_tcp_slowlink_wsize;
extern struct sysctl_oid sysctl__net_inet_tcp_blackhole;
extern struct sysctl_oid sysctl__net_inet_tcp_tcp_lq_overflow;
extern struct sysctl_oid sysctl__net_inet_tcp_path_mtu_discovery;
,&sysctl__net_inet_tcp_mssdflt
,&sysctl__net_inet_tcp_recvspace
,&sysctl__net_inet_tcp_sendspace
+ ,&sysctl__net_inet_tcp_slowlink_wsize
,&sysctl__net_inet_tcp_blackhole
,&sysctl__net_inet_tcp_tcp_lq_overflow
,&sysctl__net_inet_tcp_path_mtu_discovery
struct radix_node_head *rnh;
- if (((rnh = rt_tables[protocol]) != NULL) && (ifp != NULL))
+ if ((protocol <= AF_MAX) && ((rnh = rt_tables[protocol]) != NULL) && (ifp != NULL))
(void) rnh->rnh_walktree(rnh, if_rtdel, ifp);
}
#include <kern/cpu_number.h> /* before tcp_seq.h, for tcp_random18() */
#include <net/if.h>
+#include <net/if_types.h>
#include <net/route.h>
#include <netinet/in.h>
&drop_synfin, 0, "Drop TCP packets with SYN+FIN set");
#endif
+__private_extern__ int slowlink_wsize = 8192;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, slowlink_wsize, CTLFLAG_RW,
+ &slowlink_wsize, 0, "Maximum advertised window size for slowlink");
+
+
u_long tcp_now;
struct inpcbhead tcb;
#define tcb6 tcb /* for KAME src sync over BSD*'s */
win = sbspace(&so->so_rcv);
if (win < 0)
win = 0;
+ else { /* clip rcv window to 4K for modems */
+ if (tp->t_flags & TF_SLOWLINK && slowlink_wsize > 0)
+ win = min(win, slowlink_wsize);
+ }
tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
}
* here. Even if we requested window scaling, it will
* become effective only later when our SYN is acked.
*/
- tp->rcv_adv += min(tp->rcv_wnd, TCP_MAXWIN);
+ if (tp->t_flags & TF_SLOWLINK && slowlink_wsize > 0) /* clip window size for for slow link */
+ tp->rcv_adv += min(tp->rcv_wnd, slowlink_wsize);
+ else
+ tp->rcv_adv += min(tp->rcv_wnd, TCP_MAXWIN);
tcpstat.tcps_connects++;
soisconnected(so);
tp->t_timer[TCPT_KEEP] = tcp_keepinit;
tp->snd_wnd -= acked;
ourfinisacked = 0;
}
- sowwakeup(so);
tp->snd_una = th->th_ack;
if (SEQ_LT(tp->snd_nxt, tp->snd_una))
tp->snd_nxt = tp->snd_una;
+ sowwakeup(so);
switch (tp->t_state) {
return;
}
ifp = rt->rt_ifp;
+ /*
+ * Slower link window correction:
+ * If a value is specificied for slowlink_wsize use it for PPP links
+ * believed to be on a serial modem (speed <128Kbps). Excludes 9600bps as
+ * it is the default value adversized by pseudo-devices over ppp.
+ */
+ if (ifp->if_type == IFT_PPP && slowlink_wsize > 0 &&
+ ifp->if_baudrate > 9600 && ifp->if_baudrate <= 128000) {
+ tp->t_flags |= TF_SLOWLINK;
+ }
so = inp->inp_socket;
taop = rmx_taop(rt->rt_rmx);
isipv6 ? tcp_v6mssdflt :
#endif /* INET6 */
tcp_mssdflt;
+ /*
+ * Slower link window correction:
+ * If a value is specificied for slowlink_wsize use it for PPP links
+ * believed to be on a serial modem (speed <128Kbps). Excludes 9600bps as
+ * it is the default value adversized by pseudo-devices over ppp.
+ */
+ if (rt->rt_ifp->if_type == IFT_PPP && slowlink_wsize > 0 &&
+ rt->rt_ifp->if_baudrate > 9600 && rt->rt_ifp->if_baudrate <= 128000) {
+ tp->t_flags |= TF_SLOWLINK;
+ }
return rt->rt_ifp->if_mtu - min_protoh;
}
SYSCTL_INT(_net_inet_tcp, OID_AUTO, slowstart_flightsize, CTLFLAG_RW,
&ss_fltsz, 1, "Slow start flight size");
-int ss_fltsz_local = TCP_MAXWIN; /* something large */
+int ss_fltsz_local = 4; /* starts with four segments max */
SYSCTL_INT(_net_inet_tcp, OID_AUTO, local_slowstart_flightsize, CTLFLAG_RW,
&ss_fltsz_local, 1, "Slow start flight size for local networks");
extern int ipsec_bypass;
#endif
+extern int slowlink_wsize; /* window correction for slow links */
+
/*
* Tcp output routine: figure out what should be sent and send it.
*/
sendalot = 0;
off = tp->snd_nxt - tp->snd_una;
win = min(tp->snd_wnd, tp->snd_cwnd);
+ if (tp->t_flags & TF_SLOWLINK && slowlink_wsize > 0)
+ win = min(win, slowlink_wsize);
flags = tcp_outflags[tp->t_state];
/*
if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.sb_cc))
flags &= ~TH_FIN;
- win = sbspace(&so->so_rcv);
+ if (tp->t_flags & TF_SLOWLINK && slowlink_wsize > 0 ) /* Clips window size for slow links */
+ win = min(sbspace(&so->so_rcv), slowlink_wsize);
+ else
+ win = sbspace(&so->so_rcv);
/*
* Sender silly window avoidance. If connection is idle
win = 0;
if (win < (long)(tp->rcv_adv - tp->rcv_nxt))
win = (long)(tp->rcv_adv - tp->rcv_nxt);
- if (win > (long)TCP_MAXWIN << tp->rcv_scale)
+ if (tp->t_flags & TF_SLOWLINK && slowlink_wsize > 0) {
+ if (win > (long)slowlink_wsize)
+ win = slowlink_wsize;
+ th->th_win = htons((u_short) (win>>tp->rcv_scale));
+ }
+ else {
+
+ if (win > (long)TCP_MAXWIN << tp->rcv_scale)
win = (long)TCP_MAXWIN << tp->rcv_scale;
- th->th_win = htons((u_short) (win>>tp->rcv_scale));
+ th->th_win = htons((u_short) (win>>tp->rcv_scale));
+ }
if (SEQ_GT(tp->snd_up, tp->snd_nxt)) {
th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt));
th->th_flags |= TH_URG;
#define TF_MORETOCOME 0x10000 /* More data to be appended to sock */
#define TF_LQ_OVERFLOW 0x20000 /* listen queue overflow */
#define TF_RXWIN0SENT 0x40000 /* sent a receiver win 0 in response */
+#define TF_SLOWLINK 0x80000 /* route is a on a modem speed link */
int t_force; /* 1 if forcing out a byte */
*/
if (
n->m_ext.ext_free ||
- mclrefcnt[mtocl(n->m_ext.ext_buf)] > 1
+ m_mclhasreference(n)
)
{
int remain, copied;
FSDBG(530, myrep->r_xid, myrep, nmp, error);
nfs_rcvunlock(&nmp->nm_flag);
+ /* Bailout asap if nfsmount struct gone (unmounted). */
+ if (!myrep->r_nmp || !nmp->nm_so)
+ return (ECONNABORTED);
+
/*
* Ignore routing errors on connectionless protocols??
*/
nfs_rcvlock(rep)
register struct nfsreq *rep;
{
- register int *flagp = &rep->r_nmp->nm_flag;
+ register int *flagp;
int slpflag, slptimeo = 0;
+ /* make sure we still have our mountpoint */
+ if (!rep->r_nmp) {
+ if (rep->r_mrep != NULL)
+ return (EALREADY);
+ return (ECONNABORTED);
+ }
+
+ flagp = &rep->r_nmp->nm_flag;
FSDBG_TOP(534, rep->r_xid, rep, rep->r_nmp, *flagp);
if (*flagp & NFSMNT_INT)
slpflag = PCATCH;
return (EINVAL);
}
mp = vp->v_mount;
- flag = mp->mnt_flag;
+
+ if (vfs_busy(mp, LK_NOWAIT, 0, p)) {
+ vput(vp);
+ return (EBUSY);
+ }
/*
* We only allow the filesystem to be reloaded if it
* is currently mounted read-only.
*/
if ((uap->flags & MNT_RELOAD) &&
((mp->mnt_flag & MNT_RDONLY) == 0)) {
+ vfs_unbusy(mp, p);
vput(vp);
return (EOPNOTSUPP); /* Needs translation */
}
- mp->mnt_flag |=
- uap->flags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE);
/*
* Only root, or the user that did the original mount is
* permitted to update it.
*/
if (mp->mnt_stat.f_owner != p->p_ucred->cr_uid &&
(error = suser(p->p_ucred, &p->p_acflag))) {
+ vfs_unbusy(mp, p);
vput(vp);
return (error);
}
*/
if (p->p_ucred->cr_uid != 0) {
if (uap->flags & MNT_EXPORTED) {
+ vfs_unbusy(mp, p);
vput(vp);
return (EPERM);
}
uap->flags |= MNT_NOSUID | MNT_NODEV;
- if (flag & MNT_NOEXEC)
+ if (mp->mnt_flag & MNT_NOEXEC)
uap->flags |= MNT_NOEXEC;
}
- if (vfs_busy(mp, LK_NOWAIT, 0, p)) {
- vput(vp);
- return (EBUSY);
- }
+ flag = mp->mnt_flag;
+
+ mp->mnt_flag |=
+ uap->flags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE);
+
VOP_UNLOCK(vp, 0, p);
+
goto update;
}
/*
* Mount the filesystem.
*/
error = VFS_MOUNT(mp, uap->path, uap->data, &nd, p);
- if (mp->mnt_flag & MNT_UPDATE) {
+
+ if (uap->flags & MNT_UPDATE) {
vrele(vp);
if (mp->mnt_kern_flag & MNTK_WANTRDWR)
mp->mnt_flag &= ~MNT_RDONLY;
vp->v_mountedhere =mp;
simple_unlock(&vp->v_interlock);
simple_lock(&mountlist_slock);
+
CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
simple_unlock(&mountlist_slock);
checkdirs(vp);
/* increment the operations count */
if (!error)
vfs_nummntops++;
+
CIRCLEQ_REMOVE(&mountlist, mp, mnt_list);
if ((coveredvp = mp->mnt_vnodecovered) != NULLVP) {
coveredvp->v_mountedhere = (struct mount *)0;
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleGetInfoString</key>
- <string>System Resource Pseudoextension, Apple Computer Inc, 6.4</string>
+ <string>System Resource Pseudoextension, Apple Computer Inc, 6.5</string>
<key>CFBundleIdentifier</key>
<string>com.apple.kernel</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>6.4</string>
+ <string>6.5</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>6.4</string>
+ <string>6.5</string>
<key>OSBundleCompatibleVersion</key>
- <string>6.4</string>
+ <string>6.5</string>
<key>OSBundleRequired</key>
<string>Root</string>
<key>OSKernelResource</key>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleGetInfoString</key>
- <string>AppleNMI Pseudoextension, Apple Computer Inc, 6.4</string>
+ <string>AppleNMI Pseudoextension, Apple Computer Inc, 6.5</string>
<key>CFBundleIdentifier</key>
<string>com.apple.driver.AppleNMI</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>6.4</string>
+ <string>6.5</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>6.4</string>
+ <string>6.5</string>
<key>OSBundleRequired</key>
<string>Root</string>
<key>OSKernelResource</key>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleGetInfoString</key>
- <string>Apple Platform Family Pseudoextension, Apple Computer Inc, 6.4</string>
+ <string>Apple Platform Family Pseudoextension, Apple Computer Inc, 6.5</string>
<key>CFBundleIdentifier</key>
<string>com.apple.iokit.ApplePlatformFamily</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>6.4</string>
+ <string>6.5</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>6.4</string>
+ <string>6.5</string>
<key>OSBundleCompatibleVersion</key>
<string>1.0</string>
<key>OSBundleRequired</key>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleGetInfoString</key>
- <string>BSD Kernel Pseudoextension, Apple Computer Inc, 6.4</string>
+ <string>BSD Kernel Pseudoextension, Apple Computer Inc, 6.5</string>
<key>CFBundleIdentifier</key>
<string>com.apple.kernel.bsd</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>6.4</string>
+ <string>6.5</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>6.4</string>
+ <string>6.5</string>
<key>OSBundleCompatibleVersion</key>
<string>1.1</string>
<key>OSBundleRequired</key>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleGetInfoString</key>
- <string>ADB Family Pseudoextension, Apple Computer Inc, 6.4</string>
+ <string>ADB Family Pseudoextension, Apple Computer Inc, 6.5</string>
<key>CFBundleIdentifier</key>
<string>com.apple.iokit.IOADBFamily</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>6.4</string>
+ <string>6.5</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>6.4</string>
+ <string>6.5</string>
<key>OSBundleCompatibleVersion</key>
<string>1.0.0b1</string>
<key>OSBundleRequired</key>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleGetInfoString</key>
- <string>I/O Kit Pseudoextension, Apple Computer Inc, 6.4</string>
+ <string>I/O Kit Pseudoextension, Apple Computer Inc, 6.5</string>
<key>CFBundleIdentifier</key>
<string>com.apple.kernel.iokit</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>6.4</string>
+ <string>6.5</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>6.4</string>
+ <string>6.5</string>
<key>OSBundleCompatibleVersion</key>
<string>1.0.0b1</string>
<key>OSBundleRequired</key>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleGetInfoString</key>
- <string>AppleNMI Pseudoextension, Apple Computer Inc, 6.4</string>
+ <string>AppleNMI Pseudoextension, Apple Computer Inc, 6.5</string>
<key>CFBundleIdentifier</key>
<string>com.apple.iokit.IONVRAMFamily</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>6.4</string>
+ <string>6.5</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>6.4</string>
+ <string>6.5</string>
<key>OSBundleCompatibleVersion</key>
<string>1.1</string>
<key>OSBundleRequired</key>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleGetInfoString</key>
- <string>System Management Pseudoextension, Apple Computer Inc, 6.4</string>
+ <string>System Management Pseudoextension, Apple Computer Inc, 6.5</string>
<key>CFBundleIdentifier</key>
<string>com.apple.iokit.IOSystemManagementFamily</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>6.4</string>
+ <string>6.5</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>6.4</string>
+ <string>6.5</string>
<key>OSBundleCompatibleVersion</key>
<string>1.0.0b1</string>
<key>OSBundleRequired</key>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleGetInfoString</key>
- <string>Libkern Pseudoextension, Apple Computer Inc, 6.4</string>
+ <string>Libkern Pseudoextension, Apple Computer Inc, 6.5</string>
<key>CFBundleIdentifier</key>
<string>com.apple.kernel.libkern</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>6.4</string>
+ <string>6.5</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>6.4</string>
+ <string>6.5</string>
<key>OSBundleCompatibleVersion</key>
<string>1.0.0b1</string>
<key>OSBundleRequired</key>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleGetInfoString</key>
- <string>Mach Kernel Pseudoextension, Apple Computer Inc, 6.4</string>
+ <string>Mach Kernel Pseudoextension, Apple Computer Inc, 6.5</string>
<key>CFBundleIdentifier</key>
<string>com.apple.kernel.mach</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>6.4</string>
+ <string>6.5</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>6.4</string>
+ <string>6.5</string>
<key>OSBundleCompatibleVersion</key>
<string>1.0.0b1</string>
<key>OSBundleRequired</key>
*/
const char * gIOKernelKmods =
"{
- 'com.apple.kernel' = '6.4';
- 'com.apple.kernel.bsd' = '6.4';
- 'com.apple.kernel.iokit' = '6.4';
- 'com.apple.kernel.libkern' = '6.4';
- 'com.apple.kernel.mach' = '6.4';
- 'com.apple.iokit.IOADBFamily' = '6.4';
- 'com.apple.iokit.IONVRAMFamily' = '6.4';
- 'com.apple.iokit.IOSystemManagementFamily' = '6.4';
- 'com.apple.iokit.ApplePlatformFamily' = '6.4';
- 'com.apple.driver.AppleNMI' = '6.4';
+ 'com.apple.kernel' = '6.5';
+ 'com.apple.kernel.bsd' = '6.5';
+ 'com.apple.kernel.iokit' = '6.5';
+ 'com.apple.kernel.libkern' = '6.5';
+ 'com.apple.kernel.mach' = '6.5';
+ 'com.apple.iokit.IOADBFamily' = '6.5';
+ 'com.apple.iokit.IONVRAMFamily' = '6.5';
+ 'com.apple.iokit.IOSystemManagementFamily' = '6.5';
+ 'com.apple.iokit.ApplePlatformFamily' = '6.5';
+ 'com.apple.driver.AppleNMI' = '6.5';
}";
-
/*
* Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
*
int ps_info_valid;
int page_list_count;
- if (cnt > VM_SUPER_CLUSTER)
+ if((vs_offset & cl_mask) &&
+ (cnt > (VM_SUPER_CLUSTER -
+ (vs_offset & cl_mask)))) {
+ size = VM_SUPER_CLUSTER;
+ size -= vs_offset & cl_mask;
+ } else if (cnt > VM_SUPER_CLUSTER) {
size = VM_SUPER_CLUSTER;
- else
+ } else {
size = cnt;
+ }
cnt -= size;
ps_info_valid = 0;
*/
for (xfer_size = 0; xfer_size < size; ) {
- while (cl_index < pages_in_cl && xfer_size < size) {
+ while (cl_index < pages_in_cl
+ && xfer_size < size) {
/*
- * accumulate allocated pages within a physical segment
+ * accumulate allocated pages within
+ * a physical segment
*/
if (CLMAP_ISSET(clmap, cl_index)) {
xfer_size += vm_page_size;
} else
break;
}
- if (cl_index < pages_in_cl || xfer_size >= size) {
+ if (cl_index < pages_in_cl
+ || xfer_size >= size) {
/*
- * we've hit an unallocated page or the
- * end of this request... go fire the I/O
+ * we've hit an unallocated page or
+ * the end of this request... go fire
+ * the I/O
*/
break;
}
/*
- * we've hit the end of the current physical segment
- * and there's more to do, so try moving to the next one
+ * we've hit the end of the current physical
+ * segment and there's more to do, so try
+ * moving to the next one
*/
seg_index++;
- ps_offset[seg_index] = ps_clmap(vs, cur_offset & ~cl_mask, &clmap, CL_FIND, 0, 0);
- psp[seg_index] = CLMAP_PS(clmap);
+ ps_offset[seg_index] =
+ ps_clmap(vs,
+ cur_offset & ~cl_mask,
+ &clmap, CL_FIND, 0, 0);
+ psp[seg_index] = CLMAP_PS(clmap);
ps_info_valid = 1;
if ((ps_offset[seg_index - 1] != (ps_offset[seg_index] - cl_size)) || (psp[seg_index - 1] != psp[seg_index])) {
/*
- * if the physical segment we're about to step into
- * is not contiguous to the one we're currently
- * in, or it's in a different paging file, or
+ * if the physical segment we're about
+ * to step into is not contiguous to
+ * the one we're currently in, or it's
+ * in a different paging file, or
* it hasn't been allocated....
* we stop here and generate the I/O
*/
break;
}
/*
- * start with first page of the next physical segment
+ * start with first page of the next physical
+ * segment
*/
cl_index = 0;
}
*/
page_list_count = 0;
memory_object_super_upl_request(vs->vs_control,
- (memory_object_offset_t)vs_offset,
- xfer_size, xfer_size,
- &upl, NULL, &page_list_count,
- request_flags | UPL_SET_INTERNAL);
+ (memory_object_offset_t)vs_offset,
+ xfer_size, xfer_size,
+ &upl, NULL, &page_list_count,
+ request_flags | UPL_SET_INTERNAL);
- error = ps_read_file(psp[beg_pseg], upl, (vm_offset_t) 0,
- ps_offset[beg_pseg] + (beg_indx * vm_page_size), xfer_size, &residual, 0);
+ error = ps_read_file(psp[beg_pseg],
+ upl, (vm_offset_t) 0,
+ ps_offset[beg_pseg] +
+ (beg_indx * vm_page_size),
+ xfer_size, &residual, 0);
} else
continue;
failed_size = 0;
/*
- * Adjust counts and send response to VM. Optimize for the
- * common case, i.e. no error and/or partial data.
- * If there was an error, then we need to error the entire
- * range, even if some data was successfully read.
- * If there was a partial read we may supply some
+ * Adjust counts and send response to VM. Optimize
+ * for the common case, i.e. no error and/or partial
+ * data. If there was an error, then we need to error
+ * the entire range, even if some data was successfully
+ * read. If there was a partial read we may supply some
* data and may error some as well. In all cases the
* VM must receive some notification for every page in the
* range.
*/
if ((error == KERN_SUCCESS) && (residual == 0)) {
/*
- * Got everything we asked for, supply the data to
- * the VM. Note that as a side effect of supplying
- * the data, the buffer holding the supplied data is
- * deallocated from the pager's address space.
+ * Got everything we asked for, supply the data
+ * to the VM. Note that as a side effect of
+ * supplying * the data, the buffer holding the
+ * supplied data is * deallocated from the pager's
+ * address space.
*/
- pvs_object_data_provided(vs, upl, vs_offset, xfer_size);
+ pvs_object_data_provided(
+ vs, upl, vs_offset, xfer_size);
} else {
failed_size = xfer_size;
if (error == KERN_SUCCESS) {
if (residual == xfer_size) {
- /*
- * If a read operation returns no error
- * and no data moved, we turn it into
- * an error, assuming we're reading at
- * or beyong EOF.
- * Fall through and error the entire
- * range.
- */
+ /*
+ * If a read operation returns no error
+ * and no data moved, we turn it into
+ * an error, assuming we're reading at
+ * or beyong EOF.
+ * Fall through and error the entire
+ * range.
+ */
error = KERN_FAILURE;
} else {
- /*
- * Otherwise, we have partial read. If
- * the part read is a integral number
- * of pages supply it. Otherwise round
- * it up to a page boundary, zero fill
- * the unread part, and supply it.
- * Fall through and error the remainder
- * of the range, if any.
- */
+ /*
+ * Otherwise, we have partial read. If
+ * the part read is a integral number
+ * of pages supply it. Otherwise round
+ * it up to a page boundary, zero fill
+ * the unread part, and supply it.
+ * Fall through and error the remainder
+ * of the range, if any.
+ */
int fill, lsize;
- fill = residual & ~vm_page_size;
- lsize = (xfer_size - residual) + fill;
- pvs_object_data_provided(vs, upl, vs_offset, lsize);
+ fill = residual
+ & ~vm_page_size;
+ lsize = (xfer_size - residual)
+ + fill;
+ pvs_object_data_provided(
+ vs, upl,
+ vs_offset, lsize);
if (lsize < xfer_size) {
- failed_size = xfer_size - lsize;
+ failed_size =
+ xfer_size - lsize;
error = KERN_FAILURE;
}
}
}
/*
* If there was an error in any part of the range, tell
- * the VM. Note that error is explicitly checked again since
- * it can be modified above.
+ * the VM. Note that error is explicitly checked again
+ * since it can be modified above.
*/
if (error != KERN_SUCCESS) {
BS_STAT(psp[beg_pseg]->ps_bs,
- psp[beg_pseg]->ps_bs->bs_pages_in_fail += atop(failed_size));
+ psp[beg_pseg]->ps_bs->bs_pages_in_fail
+ += atop(failed_size));
}
size -= xfer_size;
vs_offset += xfer_size;
pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
- for (seg_index = 0, transfer_size = upl->size; transfer_size > 0; ) {
+ for (seg_index = 0, transfer_size = upl->size;
+ transfer_size > 0; ) {
- ps_offset[seg_index] = ps_clmap(vs, upl->offset + (seg_index * cl_size),
- &clmap, CL_ALLOC,
- transfer_size < cl_size ?
- transfer_size : cl_size, 0);
+ ps_offset[seg_index] =
+ ps_clmap(vs, upl->offset + (seg_index * cl_size),
+ &clmap, CL_ALLOC,
+ transfer_size < cl_size ?
+ transfer_size : cl_size, 0);
if (ps_offset[seg_index] == (vm_offset_t) -1) {
upl_abort(upl, 0);
} else
transfer_size = 0;
}
- for (page_index = 0, num_of_pages = upl->size / vm_page_size; page_index < num_of_pages; ) {
+ for (page_index = 0,
+ num_of_pages = upl->size / vm_page_size;
+ page_index < num_of_pages; ) {
/*
* skip over non-dirty pages
*/
for ( ; page_index < num_of_pages; page_index++) {
- if (UPL_DIRTY_PAGE(pl, page_index) || UPL_PRECIOUS_PAGE(pl, page_index))
+ if (UPL_DIRTY_PAGE(pl, page_index)
+ || UPL_PRECIOUS_PAGE(pl, page_index))
/*
* this is a page we need to write
- * go see if we can buddy it up with others
- * that are contiguous to it
+ * go see if we can buddy it up with
+ * others that are contiguous to it
*/
break;
/*
- * if the page is not-dirty, but present we need to commit it...
- * this is an unusual case since we only asked for dirty pages
+ * if the page is not-dirty, but present we
+ * need to commit it... This is an unusual
+ * case since we only asked for dirty pages
*/
if (UPL_PAGE_PRESENT(pl, page_index)) {
boolean_t empty = FALSE;
break;
/*
- * gather up contiguous dirty pages... we have at least 1
- * otherwise we would have bailed above
+ * gather up contiguous dirty pages... we have at
+ * least 1 otherwise we would have bailed above
* make sure that each physical segment that we step
* into is contiguous to the one we're currently in
* if it's not, we have to stop and write what we have
*/
- for (first_dirty = page_index; page_index < num_of_pages; ) {
- if ( !UPL_DIRTY_PAGE(pl, page_index) && !UPL_PRECIOUS_PAGE(pl, page_index))
+ for (first_dirty = page_index;
+ page_index < num_of_pages; ) {
+ if ( !UPL_DIRTY_PAGE(pl, page_index)
+ && !UPL_PRECIOUS_PAGE(pl, page_index))
break;
page_index++;
/*
int cur_seg;
int nxt_seg;
- cur_seg = (page_index - 1) / pages_in_cl;
+ cur_seg =
+ (page_index - 1) / pages_in_cl;
nxt_seg = page_index / pages_in_cl;
if (cur_seg != nxt_seg) {
if ((ps_offset[cur_seg] != (ps_offset[nxt_seg] - cl_size)) || (psp[cur_seg] != psp[nxt_seg]))
- /*
- * if the segment we're about to step into
- * is not contiguous to the one we're currently
- * in, or it's in a different paging file....
- * we stop here and generate the I/O
- */
+ /*
+ * if the segment we're about
+ * to step into is not
+ * contiguous to the one we're
+ * currently in, or it's in a
+ * different paging file....
+ * we stop here and generate
+ * the I/O
+ */
break;
}
}
seg_offset = upl_offset - (seg_index * cl_size);
transfer_size = num_dirty * vm_page_size;
- error = ps_write_file(psp[seg_index], upl, upl_offset,
- ps_offset[seg_index] + seg_offset, transfer_size, flags);
- if (error == 0) {
- while (transfer_size) {
- int seg_size;
+ while (transfer_size) {
+ int seg_size;
- if ((seg_size = cl_size - (upl_offset % cl_size)) > transfer_size)
- seg_size = transfer_size;
+ if ((seg_size = cl_size -
+ (upl_offset % cl_size))
+ > transfer_size)
+ seg_size = transfer_size;
- ps_vs_write_complete(vs, upl->offset + upl_offset, seg_size, error);
+ ps_vs_write_complete(vs,
+ upl->offset + upl_offset,
+ seg_size, error);
- transfer_size -= seg_size;
- upl_offset += seg_size;
- }
+ transfer_size -= seg_size;
+ upl_offset += seg_size;
}
+ upl_offset = first_dirty * vm_page_size;
+ transfer_size = num_dirty * vm_page_size;
+ error = ps_write_file(psp[seg_index],
+ upl, upl_offset,
+ ps_offset[seg_index]
+ + seg_offset,
+ transfer_size, flags);
must_abort = 0;
}
if (must_abort) {
/* Assume that the caller has given us contiguous */
/* pages */
if(cnt) {
+ ps_vs_write_complete(vs, mobj_target_addr,
+ cnt, error);
error = ps_write_file(ps, internal_upl,
0, actual_offset,
cnt, flags);
if (error)
break;
- ps_vs_write_complete(vs, mobj_target_addr,
- cnt, error);
- }
+ }
if (error)
break;
actual_offset += cnt;
return KERN_SUCCESS;
}
- if ((vs->vs_seqno != vs->vs_next_seqno++) || (vs->vs_xfer_pending)) {
+ if ((vs->vs_seqno != vs->vs_next_seqno++)
+ || (vs->vs_readers)
+ || (vs->vs_xfer_pending)) {
upl_t upl;
int page_list_count = 0;
lis r0,0x4000 ; This is the address of the first segment outside of the kernel
rlwinm r5,r23,6,26,29 ; Get index into pmap table
add r4,r23,r3 ; Point to the last byte accessed
- addi r5,r5,PMAP_SEGS ; Point to the segment slot
+ addi r7,r7,PMAP_SEGS ; Point to the segment slot
cmplw r23,r0 ; See if first segment register needs to be reloaded
cmplw cr2,r4,r0 ; Do we need to set the second (if any) SR?
xor r0,r4,r23 ; See if we are in the same segment as first
#define hid0 1008 /* Checkstop and misc enables */
#define HID1 1009 /* Clock configuration */
#define hid1 1009 /* Clock configuration */
+#define HID2 1016 /* Other processor controls */
+#define hid2 1016 /* Other processor controls */
#define iabr 1010 /* Instruction address breakpoint register */
#define ictrl 1011 /* Instruction Cache Control */
#define ldstdb 1012 /* Load/Store Debug */
#define hid1pc0 0x0000F800
#define hid1pr1 0x00000006
+
+; hid2 bits
+#define hid2vmin 18
+#define hid2vminm 0x00002000
+
; msscr0 bits
#define shden 0
#define shdenm 0x80000000
void ml_ppc_get_info(ml_ppc_cpu_info_t *cpu_info);
void ml_set_processor_speed(unsigned long speed);
+void ml_set_processor_voltage(unsigned long voltage);
#endif /* __APPLE_API_PRIVATE */
sps2:
blr
+
+/*
+** ml_set_processor_voltage()
+**
+*/
+; Force a line boundry here
+ .align 5
+ .globl EXT(ml_set_processor_voltage)
+
+LEXT(ml_set_processor_voltage)
+ mfspr r4, hid2 ; Get HID2 value
+ rlwimi r4, r3, 31-hid2vmin, hid2vmin, hid2vmin ; Insert the voltage mode bit
+ mtspr hid2, r4 ; Set the voltage mode
+ sync ; Make sure it is done
+ blr
mtspr hid1, r11 ; Select the desired PLL
blr
+; 750FX vers 2.0 or later
+init750FXV2:
+ bf firstBoot, init750FXV2nb ; Wake from sleep
+
+ mfspr r11, hid2
+ stw r11, pfHID2(r30) ; Save the HID2 value
+ b init750FX ; Continue with 750FX init
+
+init750FXV2nb:
+ lwz r13, pfHID2(r30) ; Get HID2
+ rlwinm r13, r13, 0, hid2vmin+1, hid2vmin-1 ; Clear the vmin bit
+ mtspr hid2, r13 ; Restore HID2 value
+ sync ; Wait for it to be done
+ b init750FX
+
; 7400
init7400: bf firstBoot,i7400nb ; Do different if not initial boot...
.long 32*1024
.long 32*1024
+; 750FX (ver 1.x)
+
+ .align 2
+ .long 0xFFFF0F00 ; 1.x vers
+ .short PROCESSOR_VERSION_750FX
+ .short 0x0100
+ .long pfFloat | pfCanSleep | pfCanNap | pfCanDoze | pfSlowNap | pfNoMuMMCK | pfL1i | pfL1d | pfL2
+ .long init750FX
+ .long CPU_SUBTYPE_POWERPC_750
+ .long 105
+ .long 90
+ .long 32
+ .long 32*1024
+ .long 32*1024
+
; 750FX (generic)
.align 2
.short PROCESSOR_VERSION_750FX
.short 0
.long pfFloat | pfCanSleep | pfCanNap | pfCanDoze | pfSlowNap | pfNoMuMMCK | pfL1i | pfL1d | pfL2
- .long init750FX
+ .long init750FXV2
.long CPU_SUBTYPE_POWERPC_750
.long 105
.long 90