else
proc = current_proc();
+ /* Make sure there is a process associated with this task */
+ if (proc == NULL) {
+ regs->save_r3 = (long long)EPERM;
+ /* set the "pc" to execute cerror routine */
+ regs->save_srr0 -= 4;
+ task_terminate_internal(current_task());
+ thread_exception_return();
+ /* NOTREACHED */
+ }
+
/*
* Delayed binding of thread credential to process credential, if we
* are not running with an explicitly set thread credential.
filefork_t * filefork;
hotfilelist_t *listp;
enum hfc_stage stage;
+ u_int32_t savedtemp;
int blksmoved;
int filesmoved;
int fileblocks;
int error = 0;
int startedtrans = 0;
+ int bt_op;
if (hfsmp->hfc_stage != HFC_EVICTION) {
return (EBUSY);
hfsmp->hfc_stage = HFC_BUSY;
filesmoved = blksmoved = 0;
+ bt_op = kBTreeFirstRecord;
MALLOC(iterator, BTreeIterator *, sizeof(*iterator), M_TEMP, M_WAITOK);
bzero(iterator, sizeof(*iterator));
/*
* Obtain the first record (ie the coldest one).
*/
- if (BTIterateRecord(filefork, kBTreeFirstRecord, iterator, NULL, NULL) != 0) {
+ if (BTIterateRecord(filefork, bt_op, iterator, NULL, NULL) != 0) {
#if HFC_VERBOSE
printf("hotfiles_evict: no more records\n");
#endif
if (error) {
if (error == ENOENT) {
- (void) BTDeleteRecord(filefork, iterator);
- key->temperature = HFC_LOOKUPTAG;
- (void) BTDeleteRecord(filefork, iterator);
- goto next; /* stale entry, go to next */
+ goto delete; /* stale entry, go to next */
} else {
printf("hotfiles_evict: err %d getting file %d\n",
error, key->fileID);
printf("hotfiles_evict: huh, not a file %d\n", key->fileID);
hfs_unlock(VTOC(vp));
vnode_put(vp);
- (void) BTDeleteRecord(filefork, iterator);
- key->temperature = HFC_LOOKUPTAG;
- (void) BTDeleteRecord(filefork, iterator);
- goto next; /* invalid entry, go to next */
+ goto delete; /* invalid entry, go to next */
}
fileblocks = VTOF(vp)->ff_blocks;
if ((blksmoved > 0) &&
#endif
hfs_unlock(VTOC(vp));
vnode_put(vp);
- (void) BTDeleteRecord(filefork, iterator);
- key->temperature = HFC_LOOKUPTAG;
- (void) BTDeleteRecord(filefork, iterator);
- goto next; /* go to next */
+ goto delete; /* stale entry, go to next */
}
/*
printf("hotfiles_evict: err %d relocating file %d\n", error, key->fileID);
hfs_unlock(VTOC(vp));
vnode_put(vp);
+ bt_op = kBTreeNextRecord;
goto next; /* go to next */
}
listp->hfl_reclaimblks = 0;
blksmoved += fileblocks;
filesmoved++;
-
+delete:
error = BTDeleteRecord(filefork, iterator);
if (error) {
- printf("hotfiles_evict: BTDeleteRecord failed %d (fileid %d)\n", error, key->fileID);
error = MacToVFSError(error);
break;
}
+ savedtemp = key->temperature;
key->temperature = HFC_LOOKUPTAG;
error = BTDeleteRecord(filefork, iterator);
if (error) {
- printf("hotfiles_evict: BTDeleteRecord thread failed %d (fileid %d)\n", error, key->fileID);
error = MacToVFSError(error);
break;
}
+ key->temperature = savedtemp;
next:
(void) BTFlushPath(filefork);
int hfs_write_access(struct vnode *vp, kauth_cred_t cred, struct proc *p, Boolean considerFlags);
-int hfs_chflags(struct vnode *vp, uint32_t flags, kauth_cred_t cred,
- struct proc *p);
int hfs_chmod(struct vnode *vp, int mode, kauth_cred_t cred,
struct proc *p);
int hfs_chown(struct vnode *vp, uid_t uid, gid_t gid,
* current securelevel are being changed.
*/
VATTR_SET_SUPPORTED(vap, va_flags);
- if (VATTR_IS_ACTIVE(vap, va_flags) &&
- ((error = hfs_chflags(vp, vap->va_flags, cred, p)) != 0))
- goto out;
+ if (VATTR_IS_ACTIVE(vap, va_flags)) {
+ cp->c_flags = vap->va_flags;
+ cp->c_touch_chgtime = TRUE;
+ }
/*
* If the file's extended security data is being changed, we
}
-
-/*
- * Change the flags on a file or directory.
- * cnode must be locked before calling.
- */
-__private_extern__
-int
-hfs_chflags(struct vnode *vp, uint32_t flags, __unused kauth_cred_t cred, __unused struct proc *p)
-{
- register struct cnode *cp = VTOC(vp);
-
- cp->c_flags &= SF_SETTABLE;
- cp->c_flags |= (flags & UF_SETTABLE);
- cp->c_touch_chgtime = TRUE;
-
- return (0);
-}
-
-
/*
* Perform chown operation on cnode cp;
* code must be locked prior to call.
MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
if (data == NULL)
return(ENOMEM);
+ /* 4108337 - copy in data for get socket option */
+ error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
}
len = sopt->sopt_valsize;
socket_unlock(so, 0);
* and increments the current iovec base address and offset value.
* If the current iovec length is 0 then advance to the next
* iovec (if any).
+ * If the a_count passed in is 0, than only do the advancement
+ * over any 0 length iovec's.
*/
void uio_update( uio_t a_uio, user_size_t a_count )
{
}
if (UIO_IS_64_BIT_SPACE(a_uio)) {
- if (a_count > a_uio->uio_iovs.uiovp->iov_len) {
- a_uio->uio_iovs.uiovp->iov_base += a_uio->uio_iovs.uiovp->iov_len;
- a_uio->uio_iovs.uiovp->iov_len = 0;
- }
- else {
- a_uio->uio_iovs.uiovp->iov_base += a_count;
- a_uio->uio_iovs.uiovp->iov_len -= a_count;
- }
+ /*
+ * if a_count == 0, then we are asking to skip over
+ * any empty iovs
+ */
+ if (a_count) {
+ if (a_count > a_uio->uio_iovs.uiovp->iov_len) {
+ a_uio->uio_iovs.uiovp->iov_base += a_uio->uio_iovs.uiovp->iov_len;
+ a_uio->uio_iovs.uiovp->iov_len = 0;
+ }
+ else {
+ a_uio->uio_iovs.uiovp->iov_base += a_count;
+ a_uio->uio_iovs.uiovp->iov_len -= a_count;
+ }
#if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
- if (a_uio->uio_resid < 0) {
- a_uio->uio_resid = 0;
- }
- if (a_count > (user_size_t)a_uio->uio_resid) {
- a_uio->uio_offset += a_uio->uio_resid;
- a_uio->uio_resid = 0;
- }
- else {
- a_uio->uio_offset += a_count;
- a_uio->uio_resid -= a_count;
- }
+ if (a_uio->uio_resid < 0) {
+ a_uio->uio_resid = 0;
+ }
+ if (a_count > (user_size_t)a_uio->uio_resid) {
+ a_uio->uio_offset += a_uio->uio_resid;
+ a_uio->uio_resid = 0;
+ }
+ else {
+ a_uio->uio_offset += a_count;
+ a_uio->uio_resid -= a_count;
+ }
#else
- if (a_uio->uio_resid_64 < 0) {
- a_uio->uio_resid_64 = 0;
- }
- if (a_count > (user_size_t)a_uio->uio_resid_64) {
- a_uio->uio_offset += a_uio->uio_resid_64;
- a_uio->uio_resid_64 = 0;
- }
- else {
- a_uio->uio_offset += a_count;
- a_uio->uio_resid_64 -= a_count;
- }
+ if (a_uio->uio_resid_64 < 0) {
+ a_uio->uio_resid_64 = 0;
+ }
+ if (a_count > (user_size_t)a_uio->uio_resid_64) {
+ a_uio->uio_offset += a_uio->uio_resid_64;
+ a_uio->uio_resid_64 = 0;
+ }
+ else {
+ a_uio->uio_offset += a_count;
+ a_uio->uio_resid_64 -= a_count;
+ }
#endif // LP64todo
-
- /* advance to next iovec if current one is totally consumed */
- while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.uiovp->iov_len == 0) {
+ }
+ /*
+ * advance to next iovec if current one is totally consumed
+ */
+ while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.uiovp->iov_len == 0) {
a_uio->uio_iovcnt--;
if (a_uio->uio_iovcnt > 0) {
a_uio->uio_iovs.uiovp++;
}
}
else {
- if (a_count > a_uio->uio_iovs.kiovp->iov_len) {
- a_uio->uio_iovs.kiovp->iov_base += a_uio->uio_iovs.kiovp->iov_len;
- a_uio->uio_iovs.kiovp->iov_len = 0;
- }
- else {
- a_uio->uio_iovs.kiovp->iov_base += a_count;
- a_uio->uio_iovs.kiovp->iov_len -= a_count;
- }
- if (a_uio->uio_resid < 0) {
- a_uio->uio_resid = 0;
- }
- if (a_count > (user_size_t)a_uio->uio_resid) {
- a_uio->uio_offset += a_uio->uio_resid;
- a_uio->uio_resid = 0;
- }
- else {
- a_uio->uio_offset += a_count;
- a_uio->uio_resid -= a_count;
+ /*
+ * if a_count == 0, then we are asking to skip over
+ * any empty iovs
+ */
+ if (a_count) {
+ if (a_count > a_uio->uio_iovs.kiovp->iov_len) {
+ a_uio->uio_iovs.kiovp->iov_base += a_uio->uio_iovs.kiovp->iov_len;
+ a_uio->uio_iovs.kiovp->iov_len = 0;
+ }
+ else {
+ a_uio->uio_iovs.kiovp->iov_base += a_count;
+ a_uio->uio_iovs.kiovp->iov_len -= a_count;
+ }
+ if (a_uio->uio_resid < 0) {
+ a_uio->uio_resid = 0;
+ }
+ if (a_count > (user_size_t)a_uio->uio_resid) {
+ a_uio->uio_offset += a_uio->uio_resid;
+ a_uio->uio_resid = 0;
+ }
+ else {
+ a_uio->uio_offset += a_count;
+ a_uio->uio_resid -= a_count;
+ }
}
-
- /* advance to next iovec if current one is totally consumed */
- while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.kiovp->iov_len == 0) {
+ /*
+ * advance to next iovec if current one is totally consumed
+ */
+ while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.kiovp->iov_len == 0) {
a_uio->uio_iovcnt--;
if (a_uio->uio_iovcnt > 0) {
a_uio->uio_iovs.kiovp++;
const struct sockaddr *from,
mbuf_t *data,
mbuf_t *control,
- sflt_data_flag_t flags)
+ sflt_data_flag_t flags,
+ int *filtered)
{
struct socket_filter_entry *filter;
- int filtered = 0;
int error = 0;
+ int filtered_storage;
+
+ if (filtered == NULL)
+ filtered = &filtered_storage;
+ *filtered = 0;
for (filter = so->so_filt; filter && (error == 0);
filter = filter->sfe_next_onsocket) {
if (filter->sfe_filter->sf_filter.sf_data_in) {
- if (filtered == 0) {
- filtered = 1;
+ if (*filtered == 0) {
+ *filtered = 1;
sflt_use(so);
socket_unlock(so, 0);
}
}
}
- if (filtered != 0) {
+ if (*filtered != 0) {
socket_lock(so, 0);
sflt_unuse(so);
}
#ifdef __APPLE__
selthreadclear(&so->so_snd.sb_sel);
selthreadclear(&so->so_rcv.sb_sel);
+ so->so_rcv.sb_flags &= ~SB_UPCALL;
+ so->so_snd.sb_flags &= ~SB_UPCALL;
#endif
return;
}
#ifdef __APPLE__
selthreadclear(&so->so_snd.sb_sel);
selthreadclear(&so->so_rcv.sb_sel);
+ so->so_rcv.sb_flags &= ~SB_UPCALL;
+ so->so_snd.sb_flags &= ~SB_UPCALL;
#endif
socket_unlock(head, 1);
return;
register struct mbuf *n, *sb_first;
int result = 0;
int error = 0;
+ int filtered = 0;
KERNEL_DEBUG((DBG_FNC_SBAPPEND | DBG_FUNC_START), sb, m->m_len, 0, 0, 0);
if (m == 0)
return 0;
+
+again:
sb_first = n = sb->sb_mb;
if (n) {
while (n->m_nextpkt)
} while (n->m_next && (n = n->m_next));
}
- if ((sb->sb_flags & SB_RECV) != 0) {
- error = sflt_data_in(sb->sb_so, NULL, &m, NULL, 0);
+ if (!filtered && (sb->sb_flags & SB_RECV) != 0) {
+ error = sflt_data_in(sb->sb_so, NULL, &m, NULL, 0, &filtered);
if (error) {
/* no data was appended, caller should not call sowakeup */
return 0;
}
- }
-
- /* 3962537 - sflt_data_in may drop the lock, need to validate state again */
- if (sb_first != sb->sb_mb) {
- n = sb->sb_mb;
- if (n) {
- while (n->m_nextpkt)
- n = n->m_nextpkt;
- }
+
+ /*
+ If we any filters, the socket lock was dropped. n and sb_first
+ cached data from the socket buffer. This cache is not valid
+ since we dropped the lock. We must start over. Since filtered
+ is set we won't run through the filters a second time. We just
+ set n and sb_start again.
+ */
+ if (filtered)
+ goto again;
}
result = sbcompress(sb, m, n);
return 0;
if ((sb->sb_flags & SB_RECV) != 0) {
- int error = sflt_data_in(sb->sb_so, NULL, &m0, NULL, sock_data_filt_flag_record);
+ int error = sflt_data_in(sb->sb_so, NULL, &m0, NULL, sock_data_filt_flag_record, NULL);
if (error != 0) {
if (error != EJUSTRETURN)
m_freem(m0);
if ((sb->sb_flags & SB_RECV) != 0) {
int error = sflt_data_in(sb->sb_so, NULL, &m0, NULL,
- sock_data_filt_flag_oob);
+ sock_data_filt_flag_oob, NULL);
if (error) {
if (error != EJUSTRETURN) {
/* Call socket data in filters */
if ((sb->sb_flags & SB_RECV) != 0) {
int error;
- error = sflt_data_in(sb->sb_so, asa, &m0, &control, 0);
+ error = sflt_data_in(sb->sb_so, asa, &m0, &control, 0, NULL);
if (error) {
if (error != EJUSTRETURN) {
if (m0) m_freem(m0);
if (sb->sb_flags & SB_RECV) {
int error;
- error = sflt_data_in(sb->sb_so, NULL, &m0, &control, 0);
+ error = sflt_data_in(sb->sb_so, NULL, &m0, &control, 0, NULL);
if (error) {
if (error != EJUSTRETURN) {
if (m0) m_freem(m0);
context.vc_proc = p;
context.vc_ucred = p->p_ucred; /* XXX kauth_cred_get() ??? proxy */
+ so2 = so3 = NULL;
len = nam->sa_len - offsetof(struct sockaddr_un, sun_path);
if (len <= 0)
if (m->m_len < sizeof(*ip6)) {
m = m_pullup(m, sizeof(*ip6));
- if (!m)
+ if (!m) {
+ *m0 = NULL; /* makes sure this won't be double freed */
return ENOBUFS;
+ }
}
ip6 = mtod(m, struct ip6_hdr *);
tos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
M_PREPEND(m, sizeof(struct ip), M_DONTWAIT);
if (m && m->m_len < sizeof(struct ip))
m = m_pullup(m, sizeof(struct ip));
- if (m == NULL)
+ if (m == NULL) {
+ *m0 = NULL;
return ENOBUFS;
+ }
ip = mtod(m, struct ip *);
bzero(ip, sizeof(*ip));
if (error == 0)
return EJUSTRETURN;
+ *m0 = NULL;
return error;
}
void sflt_unuse(struct socket *so);
void sflt_notify(struct socket *so, sflt_event_t event, void *param);
int sflt_data_in(struct socket *so, const struct sockaddr *from, mbuf_t *data,
- mbuf_t *control, sflt_data_flag_t flags);
+ mbuf_t *control, sflt_data_flag_t flags, int *filtered);
int sflt_attach_private(struct socket *so, struct socket_filter *filter, sflt_handle handle, int locked);
void sflt_detach_private(struct socket_filter_entry *entry, int filter_detached);
w.w_arg = name[2];
w.w_req = req;
- lck_mtx_lock(rt_mtx);
switch (w.w_op) {
case NET_RT_DUMP:
case NET_RT_DUMP2:
case NET_RT_FLAGS:
+ lck_mtx_lock(rt_mtx);
for (i = 1; i <= AF_MAX; i++)
if ((rnh = rt_tables[i]) && (af == 0 || af == i) &&
(error = rnh->rnh_walktree(rnh,
sysctl_dumpentry, &w)))
break;
+ lck_mtx_unlock(rt_mtx);
break;
case NET_RT_IFLIST:
error = sysctl_iflist(af, &w);
error = sysctl_rttrash(req);
break;
}
- lck_mtx_unlock(rt_mtx);
if (w.w_tmem)
FREE(w.w_tmem, M_RTABLE);
return (error);
return err;
}
-int _ATrw(fp, rw, uio, ext)
+int _ATrw(fp, rw, uio, p)
void *fp;
enum uio_rw rw;
struct uio *uio;
- int ext;
+ struct proc *p;
{
int s, err, len, clen = 0, res;
gref_t *gref;
gbuf_t *m, *mhead, *mprev;
/* no need to get/drop iocount as the fp already has one */
- if ((err = atalk_getref_locked(fp, 0, &gref, 0, 1)) != 0)
+ if ((err = atalk_getref_locked(fp, 0, &gref, p, 1)) != 0)
return err;
// LP64todo - fix this!
int stat;
atalk_lock();
- stat = _ATrw(fp, UIO_READ, uio, 0);
+ stat = _ATrw(fp, UIO_READ, uio, p);
atalk_unlock();
return stat;
}
int stat;
atalk_lock();
- stat = _ATrw(fp, UIO_WRITE, uio, 0);
+ stat = _ATrw(fp, UIO_WRITE, uio, p);
atalk_unlock();
return stat;
int s, err, rc = 0;
gref_t *gref;
+ /* Radar 4128949: Drop the proc_fd lock here to avoid lock inversion issues with the other AT calls
+ * select() is already holding a reference on the fd, so it won't go away during the time it is unlocked.
+ */
+ proc_fdunlock(proc);
+
atalk_lock();
/* no need to drop the iocount as select covers that */
err = atalk_getref_locked(fp, 0, &gref, 0, 0);
atalk_unlock();
+ /* Safe to re-grab the proc_fdlock at that point */
+ proc_fdlock(proc);
if (err != 0)
rc = 1;
else {
*/
if (ia == (struct in_ifaddr *)0) {
ia = in_ifaddrhead.tqh_first;
+ if (ia == (struct in_ifaddr *)0) {/* no address yet, bail out */
+ m_freem(m);
+ lck_mtx_unlock(rt_mtx);
+ goto done;
+ }
ifaref(&ia->ia_ifa);
}
lck_mtx_unlock(rt_mtx);
* locate outgoing interface; if we're the destination,
* use the incoming interface (should be same).
*/
- if ((ia = (INA)ifa_ifwithaddr((SA)&ipaddr)) != 0) {
+ if ((ia = (INA)ifa_ifwithaddr((SA)&ipaddr)) == 0) {
if ((ia = ip_rtaddr(ipaddr.sin_addr, ipforward_rt)) == 0) {
type = ICMP_UNREACH;
code = ICMP_UNREACH_HOST;
mbuf_t mrep = nfsd->nd_mrep, md = nfsd->nd_md;
mbuf_t nam = nfsd->nd_nam;
caddr_t dpos = nfsd->nd_dpos;
- vnode_t vp, dvp;
+ vnode_t vp;
struct nfs_filehandle nfh;
u_long *tl;
long t1;
if (nfsrv_authorize(vp, NULL, testaction, &context, nxo, 0))
nfsmode &= ~NFSV3ACCESS_EXTEND;
}
- dvp = NULLVP;
+
/*
- * For hard links, this answer may be wrong if the vnode
+ * Note concerning NFSV3ACCESS_DELETE:
+ * For hard links, the answer may be wrong if the vnode
* has multiple parents with different permissions.
+ * Also, some clients (e.g. MacOSX 10.3) may incorrectly
+ * interpret the missing/cleared DELETE bit.
+ * So we'll just leave the DELETE bit alone. At worst,
+ * we're telling the client it might be able to do
+ * something it really can't.
*/
- if ((nfsmode & NFSV3ACCESS_DELETE) &&
- (((dvp = vnode_getparent(vp)) == NULL) ||
- nfsrv_authorize(vp, dvp, KAUTH_VNODE_DELETE, &context, nxo, 0))) {
- nfsmode &= ~NFSV3ACCESS_DELETE;
- }
- if (dvp != NULLVP)
- vnode_put(dvp);
if ((nfsmode & NFSV3ACCESS_EXECUTE) &&
(vnode_isdir(vp) ||
VATTR_SET(vap, va_gid, kauth_cred_getgid(nfsd->nd_cr));
}
VATTR_SET(vap, va_type, VLNK);
+ VATTR_CLEAR_ACTIVE(vap, va_data_size);
+ VATTR_CLEAR_ACTIVE(vap, va_access_time);
/* authorize before creating */
error = nfsrv_authorize(dvp, NULL, KAUTH_VNODE_ADD_FILE, &context, nxo, 0);
#include <sys/kdebug.h>
-
#define CL_READ 0x01
#define CL_ASYNC 0x02
#define CL_COMMIT 0x04
buf_biowait(cbp);
if ((error = cluster_iodone(cbp_head, (void *)&dummy))) {
- if ((flags & (CL_PAGEOUT | CL_KEEPCACHED) == CL_PAGEOUT) && (error == ENXIO))
+ if (((flags & (CL_PAGEOUT | CL_KEEPCACHED)) == CL_PAGEOUT) && (error == ENXIO))
error = 0; /* drop the error */
else {
if (retval == 0)
#endif /* LP64_DEBUG */
while (uio_resid(uio) && uio->uio_offset < newEOF && retval == 0) {
- u_int64_t iov_len;
- u_int64_t iov_base;
+ user_size_t iov_len;
+ user_addr_t iov_base;
/*
* we know we have a resid, so this is safe
* skip over any emtpy vectors
*/
- iov_len = uio_iov_len(uio);
+ uio_update(uio, (user_size_t)0);
- while (iov_len == 0) {
- uio_next_iov(uio);
- uio->uio_iovcnt--;
- iov_len = uio_iov_len(uio);
- }
- iov_base = uio_iov_base(uio);
+ iov_len = uio_curriovlen(uio);
+ iov_base = uio_curriovbase(uio);
upl_size = PAGE_SIZE;
upl_flags = UPL_QUERY_OBJECT_TYPE;
// LP64todo - fix this!
if ((vm_map_get_upl(current_map(),
- CAST_DOWN(vm_offset_t, iov_base) & ~PAGE_MASK,
+ (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
&upl_size, &upl, NULL, NULL, &upl_flags, 0)) != KERN_SUCCESS) {
/*
* the user app must have passed in an invalid address
int error = 0;
struct clios iostate;
struct cl_writebehind *wbp;
- struct iovec *iov;
+
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_START,
(int)uio->uio_offset, (int)uio_resid(uio),
iostate.io_error = 0;
iostate.io_wanted = 0;
- iov = uio->uio_iov;
-
while (uio_resid(uio) && uio->uio_offset < newEOF && error == 0) {
+ user_addr_t iov_base;
+
io_size = uio_resid(uio);
if (io_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
io_size = MAX_UPL_TRANSFER * PAGE_SIZE;
+ iov_base = uio_curriovbase(uio);
+
// LP64todo - fix this!
- upl_offset = CAST_DOWN(vm_offset_t, iov->iov_base) & PAGE_MASK;
+ upl_offset = CAST_DOWN(vm_offset_t, iov_base) & PAGE_MASK;
upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_START,
- (int)upl_offset, upl_needed_size, (int)iov->iov_base, io_size, 0);
+ (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
pages_in_pl = 0;
// LP64todo - fix this!
kret = vm_map_get_upl(current_map(),
- CAST_DOWN(vm_offset_t, iov->iov_base) & ~PAGE_MASK,
+ (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
&upl_size,
&upl,
NULL,
io_size = (upl_size - (int)upl_offset) & ~PAGE_MASK;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
- (int)upl_offset, upl_size, (int)iov->iov_base, io_size, 0);
+ (int)upl_offset, upl_size, (int)iov_base, io_size, 0);
if (io_size == 0) {
ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size,
error = cluster_io(vp, upl, upl_offset, uio->uio_offset,
io_size, io_flag, (buf_t)NULL, &iostate);
- iov->iov_len -= io_size;
- ((u_int32_t)iov->iov_base) += io_size;
- uio_setresid(uio, (uio_resid(uio) - io_size));
- uio->uio_offset += io_size;
+ uio_update(uio, (user_size_t)io_size);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_END,
(int)upl_offset, (int)uio->uio_offset, (int)uio_resid(uio), error, 0);
int upl_flags;
kern_return_t kret;
int error = 0;
- u_int64_t iov_base;
+ user_addr_t iov_base;
int devblocksize;
struct cl_writebehind *wbp;
#endif /* LP64_DEBUG */
// LP64todo - fix this!
- io_size = uio_iov_len(uio);
- iov_base = uio_iov_base(uio);
+ io_size = (int)uio_curriovlen(uio);
+ iov_base = uio_curriovbase(uio);
+
upl_offset = CAST_DOWN(upl_offset_t, iov_base) & PAGE_MASK;
upl_needed_size = upl_offset + io_size;
// LP64todo - fix this!
kret = vm_map_get_upl(current_map(),
- CAST_DOWN(upl_offset_t, iov_base) & ~PAGE_MASK,
+ (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
&upl_size, &upl, NULL, &pages_in_pl, &upl_flags, 0);
if (kret != KERN_SUCCESS) {
}
pl = ubc_upl_pageinfo(upl);
- src_paddr = ((addr64_t)upl_phys_page(pl, 0) << 12) + ((addr64_t)(iov_base & PAGE_MASK));
+ src_paddr = ((addr64_t)upl_phys_page(pl, 0) << 12) + (addr64_t)upl_offset;
while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
int head_size;
* The cluster_io write completed successfully,
* update the uio structure
*/
- uio_setresid(uio, (uio_resid(uio) - io_size));
- uio_iov_len_add(uio, -io_size);
- uio_iov_base_add(uio, io_size);
- uio->uio_offset += io_size;
- src_paddr += io_size;
+ uio_update(uio, (user_size_t)io_size);
+
+ src_paddr += io_size;
if (tail_size)
error = cluster_align_phys_io(vp, uio, src_paddr, tail_size, 0);
#endif /* LP64_DEBUG */
while (uio_resid(uio) && uio->uio_offset < filesize && retval == 0) {
- u_int64_t iov_len;
- u_int64_t iov_base;
+ user_size_t iov_len;
+ user_addr_t iov_base;
/*
* we know we have a resid, so this is safe
* skip over any emtpy vectors
*/
- iov_len = uio_iov_len(uio);
+ uio_update(uio, (user_size_t)0);
+
+ iov_len = uio_curriovlen(uio);
+ iov_base = uio_curriovbase(uio);
- while (iov_len == 0) {
- uio_next_iov(uio);
- uio->uio_iovcnt--;
- iov_len = uio_iov_len(uio);
- }
- iov_base = uio_iov_base(uio);
upl_size = PAGE_SIZE;
upl_flags = UPL_QUERY_OBJECT_TYPE;
// LP64todo - fix this!
if ((vm_map_get_upl(current_map(),
- CAST_DOWN(vm_offset_t, iov_base) & ~PAGE_MASK,
+ (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
&upl_size, &upl, NULL, NULL, &upl_flags, 0)) != KERN_SUCCESS) {
/*
* the user app must have passed in an invalid address
int pages_in_pl;
int upl_flags;
kern_return_t kret;
- struct iovec *iov;
int i;
int force_data_sync;
int retval = 0;
iostate.io_error = 0;
iostate.io_wanted = 0;
- iov = uio->uio_iov;
-
while (uio_resid(uio) && uio->uio_offset < filesize && retval == 0) {
+ user_addr_t iov_base;
if (cluster_hard_throttle_on(vp)) {
max_rd_size = HARD_THROTTLE_MAXSIZE;
max_rd_ahead = HARD_THROTTLE_MAXSIZE - 1;
} else {
max_rd_size = MAX_UPL_TRANSFER * PAGE_SIZE;
- max_rd_ahead = MAX_UPL_TRANSFER * PAGE_SIZE * 2;
+ max_rd_ahead = MAX_UPL_TRANSFER * PAGE_SIZE * 8;
}
max_io_size = filesize - uio->uio_offset;
* to complete before returning
*/
goto wait_for_reads;
+
+ iov_base = uio_curriovbase(uio);
// LP64todo - fix this!
- upl_offset = CAST_DOWN(vm_offset_t, iov->iov_base) & PAGE_MASK;
+ upl_offset = CAST_DOWN(vm_offset_t, iov_base) & PAGE_MASK;
upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_START,
- (int)upl_offset, upl_needed_size, (int)iov->iov_base, io_size, 0);
+ (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
if (upl_offset == 0 && ((io_size & PAGE_MASK) == 0)) {
no_zero_fill = 1;
// LP64todo - fix this!
kret = vm_map_create_upl(current_map(),
- (vm_map_offset_t)(CAST_DOWN(vm_offset_t, iov->iov_base) & ~PAGE_MASK),
+ (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
&upl_size, &upl, NULL, &pages_in_pl, &upl_flags);
if (kret != KERN_SUCCESS) {
/*
* update the uio structure
*/
- ((u_int32_t)iov->iov_base) += io_size;
- iov->iov_len -= io_size;
- uio_setresid(uio, (uio_resid(uio) - io_size));
- uio->uio_offset += io_size;
+ uio_update(uio, (user_size_t)io_size);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_END,
(int)upl, (int)uio->uio_offset, (int)uio_resid(uio), retval, 0);
vm_offset_t upl_offset;
addr64_t dst_paddr;
off_t max_size;
-#if LP64KERN
- int64_t io_size;
- u_int64_t iov_len;
- u_int64_t iov_base;
-#else
- int io_size;
- uint iov_len;
- uint iov_base;
-#endif
+ int io_size;
+ user_size_t iov_len;
+ user_addr_t iov_base;
int tail_size;
int upl_size;
int upl_needed_size;
}
#endif /* LP64_DEBUG */
- iov_len = uio_iov_len(uio);
- iov_base = uio_iov_base(uio);
+ iov_len = uio_curriovlen(uio);
+ iov_base = uio_curriovbase(uio);
max_size = filesize - uio->uio_offset;
upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
kret = vm_map_get_upl(current_map(),
- CAST_DOWN(vm_offset_t, iov_base) & ~PAGE_MASK,
+ (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
&upl_size, &upl, NULL, &pages_in_pl, &upl_flags, 0);
if (kret != KERN_SUCCESS) {
}
pl = ubc_upl_pageinfo(upl);
- dst_paddr = ((addr64_t)upl_phys_page(pl, 0) << 12) + ((addr64_t)(iov_base & PAGE_MASK));
+ dst_paddr = ((addr64_t)upl_phys_page(pl, 0) << 12) + (addr64_t)upl_offset;
while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
int head_size;
*/
lck_mtx_lock(cl_mtxp);
- while ((iostate.io_issued - iostate.io_completed) > (2 * MAX_UPL_TRANSFER * PAGE_SIZE)) {
+ while ((iostate.io_issued - iostate.io_completed) > (8 * MAX_UPL_TRANSFER * PAGE_SIZE)) {
iostate.io_wanted = 1;
msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_phys_read", 0);
}
* update the uio structure
*/
if (error == 0) {
- uio_setresid(uio, (uio_resid(uio) - xsize));
- uio_iov_base_add(uio, xsize);
- uio_iov_len_add(uio, -xsize);
- uio->uio_offset += xsize;
- dst_paddr += xsize;
- upl_offset += xsize;
- io_size -= xsize;
+ uio_update(uio, (user_size_t)xsize);
+
+ dst_paddr += xsize;
+ upl_offset += xsize;
+ io_size -= xsize;
}
}
/*
static int
cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, int xsize, int flags)
{
- struct iovec *iov;
upl_page_info_t *pl;
upl_t upl;
addr64_t ubc_paddr;
int abort_flags;
int upl_flags;
- iov = uio->uio_iov;
-
upl_flags = UPL_SET_LITE;
if (! (flags & CL_READ)) {
/*
error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE,
0, (buf_t)NULL, (struct clios *)NULL);
}
- if (error == 0) {
- uio->uio_offset += xsize;
- uio_iov_base_add(uio, xsize);
- uio_iov_len_add(uio, -xsize);
- uio_setresid(uio, (uio_resid(uio) - xsize));
- }
+ if (error == 0)
+ uio_update(uio, (user_size_t)xsize);
+
if (did_read)
abort_flags = UPL_ABORT_FREE_ON_EMPTY;
else
_buf_callback
_buf_clear
_buf_clearflags
+_buf_clone
_buf_count
_buf_dataptr
_buf_device
-8.1.0
+8.2.0
# The first line of this file contains the master version number for the kernel.
# All other instances of the kernel version in xnu are derived from this file.
vm_size_t iopl_size = size;
kr = vm_map_get_upl(kernel_map,
- startUpl,
+ (vm_map_offset_t)startUpl,
&iopl_size,
&iopl,
0,
extern kern_return_t vm_map_get_upl(
vm_map_t target_map,
- vm_address_t address,
+ vm_map_offset_t map_offset,
vm_size_t *size,
upl_t *upl,
upl_page_info_array_t page_info,
kern_return_t
vm_map_get_upl(
vm_map_t map,
- vm_address_t offset,
+ vm_map_offset_t map_offset,
upl_size_t *upl_size,
upl_t *upl,
upl_page_info_array_t page_list,
int *flags,
int force_data_sync)
{
- vm_map_offset_t map_offset;
int map_flags;
kern_return_t kr;
if (VM_MAP_NULL == map)
return KERN_INVALID_ARGUMENT;
- map_offset = (vm_map_offset_t)offset;
map_flags = *flags & ~UPL_NOZEROFILL;
if (force_data_sync)
map_flags |= UPL_FORCE_DATA_SYNC;