From 743b15655a24ee3fe9f458f383003e011db0558f Mon Sep 17 00:00:00 2001 From: Apple Date: Tue, 4 Apr 2006 04:12:34 +0000 Subject: [PATCH] xnu-792.6.70.tar.gz --- bsd/dev/ppc/munge.s | 2 +- bsd/hfs/hfs.h | 1 + bsd/hfs/hfs_attrlist.c | 18 +++- bsd/hfs/hfs_catalog.c | 13 +-- bsd/hfs/hfs_readwrite.c | 1 + bsd/hfs/hfs_vfsops.c | 40 +++++---- bsd/hfs/hfs_vfsutils.c | 56 +++++++++++- bsd/hfs/hfs_vnops.c | 22 ++++- bsd/hfs/hfs_xattr.c | 21 ++++- bsd/hfs/hfscommon/Misc/VolumeAllocation.c | 21 +++-- bsd/kern/kern_proc.c | 7 +- bsd/kern/kern_subr.c | 21 ++++- bsd/kern/tty_pty.c | 1 + bsd/kern/uipc_socket2.c | 1 + bsd/netinet/ip_output.c | 4 +- bsd/netinet/tcp_input.c | 25 ++++-- bsd/nfs/nfs.h | 4 +- bsd/nfs/nfs_serv.c | 15 ++-- bsd/nfs/nfs_socket.c | 6 ++ bsd/nfs/nfs_subs.c | 4 +- bsd/nfs/nfs_syscalls.c | 33 ++++--- bsd/vfs/vfs_cache.c | 28 +++++- bsd/vfs/vfs_journal.c | 104 ++++++++++++++++++++++ bsd/vfs/vfs_journal.h | 13 +++ bsd/vfs/vfs_syscalls.c | 12 ++- bsd/vfs/vfs_xattr.c | 10 ++- config/MasterVersion | 2 +- libkern/libkern/OSCrossEndian.h | 8 +- 28 files changed, 397 insertions(+), 96 deletions(-) diff --git a/bsd/dev/ppc/munge.s b/bsd/dev/ppc/munge.s index 6c835dddd..0f4e09acf 100644 --- a/bsd/dev/ppc/munge.s +++ b/bsd/dev/ppc/munge.s @@ -17,7 +17,7 @@ * License for the specific language governing rights and limitations * under the License. * - * @APPLE_LICENSE_HEADER_EN + * @APPLE_LICENSE_HEADER_END@ */ /* diff --git a/bsd/hfs/hfs.h b/bsd/hfs/hfs.h index e238dbb08..4e3e3370b 100644 --- a/bsd/hfs/hfs.h +++ b/bsd/hfs/hfs.h @@ -302,6 +302,7 @@ typedef struct filefork FCB; #define MAKE_INODE_NAME(name,linkno) \ (void) sprintf((name), "%s%d", HFS_INODE_PREFIX, (linkno)) +#define HFS_INODE_PREFIX_LEN 5 #define HFS_AVERAGE_NAME_SIZE 22 diff --git a/bsd/hfs/hfs_attrlist.c b/bsd/hfs/hfs_attrlist.c index c3d29a9e8..8e15541b6 100644 --- a/bsd/hfs/hfs_attrlist.c +++ b/bsd/hfs/hfs_attrlist.c @@ -690,7 +690,7 @@ hfs_vnop_readdirattr(ap) } dir_entries = dcp->c_entries; - if (dcp->c_attr.ca_fileid == kHFSRootFolderID && hfsmp->jnl) { + if (dcp->c_attr.ca_fileid == kHFSRootFolderID && (hfsmp->jnl || ((HFSTOVCB(hfsmp)->vcbAtrb & kHFSVolumeJournaledMask) && (hfsmp->hfs_flags & HFS_READ_ONLY)))) { dir_entries -= 3; } @@ -887,7 +887,12 @@ hfs_vnop_readdirattr(ap) /* Make sure dcp is locked exclusive before changing c_dirhinttag. */ if (shared_cnode_lock) { - lck_rw_lock_shared_to_exclusive(&dcp->c_rwlock); + /* + * If the upgrade fails we loose the lock and + * have to take the exclusive lock on our own. + */ + if (lck_rw_lock_shared_to_exclusive(&dcp->c_rwlock) != 0) + lck_rw_lock_exclusive(&dcp->c_rwlock); dcp->c_lockowner = current_thread(); shared_cnode_lock = 0; } @@ -901,7 +906,12 @@ exit: /* Drop directory hint on error or if there are no more entries */ if (dirhint && (error || index >= dir_entries)) { if (shared_cnode_lock) { - lck_rw_lock_shared_to_exclusive(&dcp->c_rwlock); + /* + * If the upgrade fails we loose the lock and + * have to take the exclusive lock on our own. + */ + if (lck_rw_lock_shared_to_exclusive(&dcp->c_rwlock) != 0) + lck_rw_lock_exclusive(&dcp->c_rwlock); dcp->c_lockowner = current_thread(); } hfs_reldirhint(dcp, dirhint); @@ -1614,7 +1624,7 @@ packdirattr( if (descp->cd_parentcnid == kHFSRootParentID) { if (hfsmp->hfs_privdir_desc.cd_cnid != 0) --entries; /* hide private dir */ - if (hfsmp->jnl) + if (hfsmp->jnl || ((HFSTOVCB(hfsmp)->vcbAtrb & kHFSVolumeJournaledMask) && (hfsmp->hfs_flags & HFS_READ_ONLY))) entries -= 2; /* hide the journal files */ } diff --git a/bsd/hfs/hfs_catalog.c b/bsd/hfs/hfs_catalog.c index d21b4c4e0..bb4e69620 100644 --- a/bsd/hfs/hfs_catalog.c +++ b/bsd/hfs/hfs_catalog.c @@ -596,7 +596,7 @@ cat_lookupbykey(struct hfsmount *hfsmp, CatalogKey *keyp, u_long hint, int wantr hint = iterator->hint.nodeNum; /* Hide the journal files (if any) */ - if (hfsmp->jnl && + if ((hfsmp->jnl || ((HFSTOVCB(hfsmp)->vcbAtrb & kHFSVolumeJournaledMask) && (hfsmp->hfs_flags & HFS_READ_ONLY))) && ((cnid == hfsmp->hfs_jnlfileid) || (cnid == hfsmp->hfs_jnlinfoblkid))) { @@ -1022,11 +1022,14 @@ cat_rename ( /* Find cnode data at new location */ result = BTSearchRecord(fcb, to_iterator, &btdata, &datasize, NULL); + if (result) + goto exit; if ((fromtype != recp->recordType) || - (from_cdp->cd_cnid != getcnid(recp))) + (from_cdp->cd_cnid != getcnid(recp))) { + result = EEXIST; goto exit; /* EEXIST */ - + } /* The old name is a case variant and must be removed */ result = BTDeleteRecord(fcb, from_iterator); if (result) @@ -1563,7 +1566,7 @@ cat_readattr(const CatalogKey *key, const CatalogRecord *rec, (rec->hfsPlusFolder.folderID == hfsmp->hfs_privdir_desc.cd_cnid)) { return (1); /* continue */ } - if (hfsmp->jnl && + if ((hfsmp->jnl || ((HFSTOVCB(hfsmp)->vcbAtrb & kHFSVolumeJournaledMask) && (hfsmp->hfs_flags & HFS_READ_ONLY))) && (rec->recordType == kHFSPlusFileRecord) && ((rec->hfsPlusFile.fileID == hfsmp->hfs_jnlfileid) || (rec->hfsPlusFile.fileID == hfsmp->hfs_jnlinfoblkid))) { @@ -1879,7 +1882,7 @@ cat_packdirentry(const CatalogKey *ckp, const CatalogRecord *crp, cnid = crp->hfsPlusFile.fileID; /* Hide the journal files */ if ((curID == kHFSRootFolderID) && - (hfsmp->jnl) && + ((hfsmp->jnl || ((HFSTOVCB(hfsmp)->vcbAtrb & kHFSVolumeJournaledMask) && (hfsmp->hfs_flags & HFS_READ_ONLY)))) && ((cnid == hfsmp->hfs_jnlfileid) || (cnid == hfsmp->hfs_jnlinfoblkid))) { hide = 1; diff --git a/bsd/hfs/hfs_readwrite.c b/bsd/hfs/hfs_readwrite.c index 500933247..7fc125ffa 100644 --- a/bsd/hfs/hfs_readwrite.c +++ b/bsd/hfs/hfs_readwrite.c @@ -1158,6 +1158,7 @@ hfs_vnop_ioctl( struct vnop_ioctl_args /* { goto err_exit_bulk_access; } myucred.cr_rgid = myucred.cr_svgid = myucred.cr_groups[0]; + myucred.cr_gmuid = myucred.cr_uid; my_context.vc_proc = p; my_context.vc_ucred = &myucred; diff --git a/bsd/hfs/hfs_vfsops.c b/bsd/hfs/hfs_vfsops.c index 7ebe8aff7..ba4a5983d 100644 --- a/bsd/hfs/hfs_vfsops.c +++ b/bsd/hfs/hfs_vfsops.c @@ -548,7 +548,7 @@ hfs_reload_callback(struct vnode *vp, void *cargs) /* * Re-read cnode data for all active vnodes (non-metadata files). */ - if (!VNODE_IS_RSRC(vp)) { + if (!vnode_issystem(vp) && !VNODE_IS_RSRC(vp)) { struct cat_fork *datafork; struct cat_desc desc; @@ -591,7 +591,6 @@ hfs_reload(struct mount *mountp, kauth_cred_t cred, struct proc *p) struct filefork *forkp; struct cat_desc cndesc; struct hfs_reload_cargs args; - int lockflags; hfsmp = VFSTOHFS(mountp); vcb = HFSTOVCB(hfsmp); @@ -617,9 +616,7 @@ hfs_reload(struct mount *mountp, kauth_cred_t cred, struct proc *p) * the vnode will be in an 'unbusy' state (VNODE_WAIT) and * properly referenced and unreferenced around the callback */ - lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK); vnode_iterate(mountp, VNODE_RELOAD | VNODE_WAIT, hfs_reload_callback, (void *)&args); - hfs_systemfile_unlock(hfsmp, lockflags); if (args.error) return (args.error); @@ -2163,6 +2160,7 @@ hfs_vget(struct hfsmount *hfsmp, cnid_t cnid, struct vnode **vpp, int skiplock) struct cat_attr cnattr; struct cat_fork cnfork; struct componentname cn; + u_int32_t linkref = 0; int error; /* Check for cnids that should't be exported. */ @@ -2215,18 +2213,19 @@ hfs_vget(struct hfsmount *hfsmp, cnid_t cnid, struct vnode **vpp, int skiplock) return (error); } - /* Hide open files that have been deleted */ - if ((hfsmp->hfs_privdir_desc.cd_cnid != 0) && - (cndesc.cd_parentcnid == hfsmp->hfs_privdir_desc.cd_cnid)) { - // XXXdbg - if this is a hardlink, we could call - // hfs_chash_snoop() to see if there is - // already a cnode and vnode present for - // this fileid. however I'd rather not - // risk it at this point in Tiger. - cat_releasedesc(&cndesc); - error = ENOENT; - *vpp = NULL; - return (error); + /* + * If we just looked up a raw hardlink inode, + * then finish initializing it. + */ + if ((cndesc.cd_parentcnid == hfsmp->hfs_privdir_desc.cd_cnid) && + (bcmp(cndesc.cd_nameptr, HFS_INODE_PREFIX, HFS_INODE_PREFIX_LEN) == 0)) { + linkref = strtoul((const char*)&cndesc.cd_nameptr[HFS_INODE_PREFIX_LEN], NULL, 10); + cnattr.ca_rdev = linkref; + + // patch up the parentcnid + if (cnattr.ca_attrblks != 0) { + cndesc.cd_parentcnid = cnattr.ca_attrblks; + } } } @@ -2246,6 +2245,10 @@ hfs_vget(struct hfsmount *hfsmp, cnid_t cnid, struct vnode **vpp, int skiplock) /* XXX should we supply the parent as well... ? */ error = hfs_getnewvnode(hfsmp, NULLVP, &cn, &cndesc, 0, &cnattr, &cnfork, &vp); + if (error == 0 && linkref != 0) { + VTOC(vp)->c_flag |= C_HARDLINK; + } + FREE_ZONE(cn.cn_pnbuf, cn.cn_pnlen, M_NAMEI); cat_releasedesc(&cndesc); @@ -3277,7 +3280,6 @@ hfs_reclaimspace(struct hfsmount *hfsmp, u_long startblk) if (VTOF(vp)->ff_blocks > 0) { error = hfs_relocate(vp, hfsmp->hfs_metazone_end + 1, kauth_cred_get(), current_proc()); } - hfs_unlock(VTOC(vp)); if (error) break; @@ -3286,17 +3288,17 @@ hfs_reclaimspace(struct hfsmount *hfsmp, u_long startblk) error = hfs_vgetrsrc(hfsmp, vp, &rvp, current_proc()); if (error) break; - hfs_lock(VTOC(rvp), HFS_EXCLUSIVE_LOCK); error = hfs_relocate(rvp, hfsmp->hfs_metazone_end + 1, kauth_cred_get(), current_proc()); - hfs_unlock(VTOC(rvp)); vnode_put(rvp); if (error) break; } + hfs_unlock(VTOC(vp)); vnode_put(vp); vp = NULL; } if (vp) { + hfs_unlock(VTOC(vp)); vnode_put(vp); vp = NULL; } diff --git a/bsd/hfs/hfs_vfsutils.c b/bsd/hfs/hfs_vfsutils.c index 4eed699c9..4336e8732 100644 --- a/bsd/hfs/hfs_vfsutils.c +++ b/bsd/hfs/hfs_vfsutils.c @@ -562,7 +562,7 @@ OSErr hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, // if ( (vcb->vcbAtrb & kHFSVolumeJournaledMask) && (SWAP_BE32(vhp->lastMountedVersion) != kHFSJMountVersion) - && (hfsmp->jnl == NULL)) { + && (hfsmp->jnl == NULL)) { retval = hfs_late_journal_init(hfsmp, vhp, args); if (retval != 0) { @@ -604,9 +604,13 @@ OSErr hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, } else if (hfsmp->jnl) { vfs_setflags(hfsmp->hfs_mp, (uint64_t)((unsigned int)MNT_JOURNALED)); } - } else if (hfsmp->jnl) { + } else if (hfsmp->jnl || ((vcb->vcbAtrb & kHFSVolumeJournaledMask) && (hfsmp->hfs_flags & HFS_READ_ONLY))) { struct cat_attr jinfo_attr, jnl_attr; + if (hfsmp->hfs_flags & HFS_READ_ONLY) { + vcb->vcbAtrb &= ~kHFSVolumeJournaledMask; + } + // if we're here we need to fill in the fileid's for the // journal and journal_info_block. hfsmp->hfs_jnlinfoblkid = GetFileInfo(vcb, kRootDirID, ".journal_info_block", &jinfo_attr, NULL); @@ -615,6 +619,10 @@ OSErr hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, printf("hfs: danger! couldn't find the file-id's for the journal or journal_info_block\n"); printf("hfs: jnlfileid %d, jnlinfoblkid %d\n", hfsmp->hfs_jnlfileid, hfsmp->hfs_jnlinfoblkid); } + + if (hfsmp->hfs_flags & HFS_READ_ONLY) { + vcb->vcbAtrb |= kHFSVolumeJournaledMask; + } } /* @@ -1728,6 +1736,28 @@ hfs_early_journal_init(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, hfsmp->jnl_start = jibp->offset / SWAP_BE32(vhp->blockSize); hfsmp->jnl_size = jibp->size; + if ((hfsmp->hfs_flags & HFS_READ_ONLY) && (vfs_flags(hfsmp->hfs_mp) & MNT_ROOTFS) == 0) { + // if the file system is read-only, check if the journal is empty. + // if it is, then we can allow the mount. otherwise we have to + // return failure. + retval = journal_is_clean(hfsmp->jvp, + jibp->offset + embeddedOffset, + jibp->size, + devvp, + hfsmp->hfs_phys_block_size); + + hfsmp->jnl = NULL; + + buf_brelse(jinfo_bp); + + if (retval) { + printf("hfs: early journal init: volume on %s is read-only and journal is dirty. Can not mount volume.\n", + vnode_name(devvp)); + } + + return retval; + } + if (jibp->flags & kJIJournalNeedInitMask) { printf("hfs: Initializing the journal (joffset 0x%llx sz 0x%llx)...\n", jibp->offset + embeddedOffset, jibp->size); @@ -1915,6 +1945,28 @@ hfs_late_journal_init(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, void *_a hfsmp->jnl_start = jibp->offset / SWAP_BE32(vhp->blockSize); hfsmp->jnl_size = jibp->size; + if ((hfsmp->hfs_flags & HFS_READ_ONLY) && (vfs_flags(hfsmp->hfs_mp) & MNT_ROOTFS) == 0) { + // if the file system is read-only, check if the journal is empty. + // if it is, then we can allow the mount. otherwise we have to + // return failure. + retval = journal_is_clean(hfsmp->jvp, + jibp->offset + (off_t)vcb->hfsPlusIOPosOffset, + jibp->size, + devvp, + hfsmp->hfs_phys_block_size); + + hfsmp->jnl = NULL; + + buf_brelse(jinfo_bp); + + if (retval) { + printf("hfs: late journal init: volume on %s is read-only and journal is dirty. Can not mount volume.\n", + vnode_name(devvp)); + } + + return retval; + } + if (jibp->flags & kJIJournalNeedInitMask) { printf("hfs: Initializing the journal (joffset 0x%llx sz 0x%llx)...\n", jibp->offset + (off_t)vcb->hfsPlusIOPosOffset, jibp->size); diff --git a/bsd/hfs/hfs_vnops.c b/bsd/hfs/hfs_vnops.c index 59c278f10..507bb153c 100644 --- a/bsd/hfs/hfs_vnops.c +++ b/bsd/hfs/hfs_vnops.c @@ -289,7 +289,7 @@ hfs_vnop_getattr(struct vnop_getattr_args *ap) if (vnode_isvroot(vp)) { if (hfsmp->hfs_privdir_desc.cd_cnid != 0) --entries; /* hide private dir */ - if (hfsmp->jnl) + if (hfsmp->jnl || ((HFSTOVCB(hfsmp)->vcbAtrb & kHFSVolumeJournaledMask) && (hfsmp->hfs_flags & HFS_READ_ONLY))) entries -= 2; /* hide the journal files */ } VATTR_RETURN(vap, va_nlink, (uint64_t)entries); @@ -1890,6 +1890,10 @@ out: __private_extern__ void replace_desc(struct cnode *cp, struct cat_desc *cdp) { + if (&cp->c_desc == cdp) { + return; + } + /* First release allocated name buffer */ if (cp->c_desc.cd_flags & CD_HASBUF && cp->c_desc.cd_nameptr != 0) { char *name = cp->c_desc.cd_nameptr; @@ -3167,6 +3171,22 @@ hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp, struct vnode **rvpp, __un struct componentname cn; int lockflags; + /* + * Make sure cnode lock is exclusive, if not upgrade it. + * + * We assume that we were called from a read-only VNOP (getattr) + * and that its safe to have the cnode lock dropped and reacquired. + */ + if (cp->c_lockowner != current_thread()) { + /* + * If the upgrade fails we loose the lock and + * have to take the exclusive lock on our own. + */ + if (lck_rw_lock_shared_to_exclusive(&cp->c_rwlock) != 0) + lck_rw_lock_exclusive(&cp->c_rwlock); + cp->c_lockowner = current_thread(); + } + lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); /* Get resource fork data */ diff --git a/bsd/hfs/hfs_xattr.c b/bsd/hfs/hfs_xattr.c index 5030db050..1e2a11550 100644 --- a/bsd/hfs/hfs_xattr.c +++ b/bsd/hfs/hfs_xattr.c @@ -145,7 +145,12 @@ hfs_vnop_getxattr(struct vnop_getxattr_args *ap) if ( !RESOURCE_FORK_EXISTS(vp)) { return (ENOATTR); } - if ((result = hfs_vgetrsrc(hfsmp, vp, &rvp, vfs_context_proc(ap->a_context)))) { + if ((result = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK))) { + return (result); + } + result = hfs_vgetrsrc(hfsmp, vp, &rvp, vfs_context_proc(ap->a_context)); + hfs_unlock(VTOC(vp)); + if (result) { return (result); } if (uio == NULL) { @@ -292,7 +297,12 @@ hfs_vnop_setxattr(struct vnop_setxattr_args *ap) return (ENOATTR); } } - if ((result = hfs_vgetrsrc(hfsmp, vp, &rvp, vfs_context_proc(ap->a_context)))) { + if ((result = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK))) { + return (result); + } + result = hfs_vgetrsrc(hfsmp, vp, &rvp, vfs_context_proc(ap->a_context)); + hfs_unlock(VTOC(vp)); + if (result) { return (result); } result = VNOP_WRITE(rvp, uio, 0, ap->a_context); @@ -468,7 +478,12 @@ hfs_vnop_removexattr(struct vnop_removexattr_args *ap) if ( !RESOURCE_FORK_EXISTS(vp) ) { return (ENOATTR); } - if ((result = hfs_vgetrsrc(hfsmp, vp, &rvp, p))) { + if ((result = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK))) { + return (result); + } + result = hfs_vgetrsrc(hfsmp, vp, &rvp, p); + hfs_unlock(VTOC(vp)); + if (result) { return (result); } hfs_lock_truncate(VTOC(rvp), TRUE); diff --git a/bsd/hfs/hfscommon/Misc/VolumeAllocation.c b/bsd/hfs/hfscommon/Misc/VolumeAllocation.c index 157b7fb57..c5007ac7f 100644 --- a/bsd/hfs/hfscommon/Misc/VolumeAllocation.c +++ b/bsd/hfs/hfscommon/Misc/VolumeAllocation.c @@ -1353,13 +1353,20 @@ static OSErr BlockFindContiguous( UInt32 blockRef; UInt32 wordsPerBlock; - if (!useMetaZone) { - struct hfsmount *hfsmp = VCBTOHFS(vcb); - - - if ((hfsmp->hfs_flags & HFS_METADATA_ZONE) && - (startingBlock <= hfsmp->hfs_metazone_end)) - startingBlock = hfsmp->hfs_metazone_end + 1; + /* + * When we're skipping the metadata zone and the start/end + * range overlaps with the metadata zone then adjust the + * start to be outside of the metadata zone. If the range + * is entirely inside the metadata zone then we can deny the + * request (dskFulErr). + */ + if (!useMetaZone && (vcb->hfs_flags & HFS_METADATA_ZONE)) { + if (startingBlock <= vcb->hfs_metazone_end) { + if (endingBlock > (vcb->hfs_metazone_end + 2)) + startingBlock = vcb->hfs_metazone_end + 1; + else + goto DiskFull; + } } if ((endingBlock - startingBlock) < minBlocks) diff --git a/bsd/kern/kern_proc.c b/bsd/kern/kern_proc.c index db4489ae0..96ea755f3 100644 --- a/bsd/kern/kern_proc.c +++ b/bsd/kern/kern_proc.c @@ -594,17 +594,14 @@ pgdelete(pgrp) register struct pgrp *pgrp; { struct tty * ttyp; - int removettypgrp = 0; ttyp = pgrp->pg_session->s_ttyp; - if (pgrp->pg_session->s_ttyp != NULL && - pgrp->pg_session->s_ttyp->t_pgrp == pgrp) { + if (ttyp != NULL && pgrp->pg_session->s_ttyp->t_pgrp == pgrp) { pgrp->pg_session->s_ttyp->t_pgrp = NULL; - removettypgrp = 1; } LIST_REMOVE(pgrp, pg_hash); if (--pgrp->pg_session->s_count == 0) { - if (removettypgrp && (ttyp == &cons) && (ttyp->t_session == pgrp->pg_session)) + if (ttyp != NULL && (ttyp->t_session == pgrp->pg_session)) ttyp->t_session = 0; FREE_ZONE(pgrp->pg_session, sizeof(struct session), M_SESSION); } diff --git a/bsd/kern/kern_subr.c b/bsd/kern/kern_subr.c index c9f4723dc..ab64b242c 100644 --- a/bsd/kern/kern_subr.c +++ b/bsd/kern/kern_subr.c @@ -1055,7 +1055,7 @@ __private_extern__ void uio_calculateresid( uio_t a_uio ) return; } - a_uio->uio_iovcnt = 0; + a_uio->uio_iovcnt = a_uio->uio_max_iovs; if (UIO_IS_64_BIT_SPACE(a_uio)) { #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI a_uio->uio_resid = 0; @@ -1064,7 +1064,6 @@ __private_extern__ void uio_calculateresid( uio_t a_uio ) #endif for ( i = 0; i < a_uio->uio_max_iovs; i++ ) { if (a_uio->uio_iovs.uiovp[i].iov_len != 0 && a_uio->uio_iovs.uiovp[i].iov_base != 0) { - a_uio->uio_iovcnt++; #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI a_uio->uio_resid += a_uio->uio_iovs.uiovp[i].iov_len; #else @@ -1072,16 +1071,32 @@ __private_extern__ void uio_calculateresid( uio_t a_uio ) #endif } } + + /* position to first non zero length iovec (4235922) */ + while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.uiovp->iov_len == 0) { + a_uio->uio_iovcnt--; + if (a_uio->uio_iovcnt > 0) { + a_uio->uio_iovs.uiovp++; + } + } } else { a_uio->uio_resid = 0; for ( i = 0; i < a_uio->uio_max_iovs; i++ ) { if (a_uio->uio_iovs.kiovp[i].iov_len != 0 && a_uio->uio_iovs.kiovp[i].iov_base != 0) { - a_uio->uio_iovcnt++; a_uio->uio_resid += a_uio->uio_iovs.kiovp[i].iov_len; } } + + /* position to first non zero length iovec (4235922) */ + while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.kiovp->iov_len == 0) { + a_uio->uio_iovcnt--; + if (a_uio->uio_iovcnt > 0) { + a_uio->uio_iovs.kiovp++; + } + } } + return; } diff --git a/bsd/kern/tty_pty.c b/bsd/kern/tty_pty.c index 5ec154c67..b396f4b09 100644 --- a/bsd/kern/tty_pty.c +++ b/bsd/kern/tty_pty.c @@ -480,6 +480,7 @@ ptcopen(dev_t dev, __unused int flag, __unused int devtype, __unused proc_t p) goto out; } tp->t_oproc = ptsstart; + CLR(tp->t_state, TS_ZOMBIE); #ifdef sun4c tp->t_stop = ptsstop; #endif diff --git a/bsd/kern/uipc_socket2.c b/bsd/kern/uipc_socket2.c index 30750607f..a9287124f 100644 --- a/bsd/kern/uipc_socket2.c +++ b/bsd/kern/uipc_socket2.c @@ -322,6 +322,7 @@ sonewconn_internal(head, connstatus) } head->so_qlen++; #ifdef __APPLE__ + so->so_rcv.sb_flags |= SB_RECV; /* XXX */ so->so_rcv.sb_so = so->so_snd.sb_so = so; TAILQ_INIT(&so->so_evlist); diff --git a/bsd/netinet/ip_output.c b/bsd/netinet/ip_output.c index 259e5dd54..44f89c3cf 100644 --- a/bsd/netinet/ip_output.c +++ b/bsd/netinet/ip_output.c @@ -1383,8 +1383,8 @@ in_delayed_cksum_offset(struct mbuf *m, int ip_offset) } if (ip_offset + sizeof(struct ip) > m->m_len) { - printf("delayed m_pullup, m->len: %d off: %d p: %d\n", - m->m_len, ip_offset, ip->ip_p); + printf("delayed m_pullup, m->len: %d off: %d\n", + m->m_len, ip_offset); /* * XXX * this shouldn't happen diff --git a/bsd/netinet/tcp_input.c b/bsd/netinet/tcp_input.c index 822dabe19..624b3451f 100644 --- a/bsd/netinet/tcp_input.c +++ b/bsd/netinet/tcp_input.c @@ -144,7 +144,7 @@ static int blackhole = 0; SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW, &blackhole, 0, "Do not send RST when dropping refused connections"); -int tcp_delack_enabled = 1; +int tcp_delack_enabled = 3; SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW, &tcp_delack_enabled, 0, "Delay ACK to try and piggyback it onto a data packet"); @@ -236,13 +236,17 @@ extern int fw_verbose; * - the peer hasn't sent us a TH_PUSH data packet, if he did, take this as a clue that we * need to ACK with no delay. This helps higher level protocols who won't send * us more data even if the window is open because their last "segment" hasn't been ACKed - * + * - delayed acks are enabled (set to 3, "streaming detection") and + * - if we receive more than 4 full packets per second on this socket, we're streaming acts as "1". + * - if we don't meet that criteria, acts like "2". Allowing faster acking while browsing for example. * */ #define DELAY_ACK(tp) \ (((tcp_delack_enabled == 1) && ((tp->t_flags & TF_RXWIN0SENT) == 0)) || \ (((tcp_delack_enabled == 2) && (tp->t_flags & TF_RXWIN0SENT) == 0) && \ - ((thflags & TH_PUSH) == 0) && ((tp->t_flags & TF_DELACK) == 0))) + ((thflags & TH_PUSH) == 0) && ((tp->t_flags & TF_DELACK) == 0)) || \ + (((tcp_delack_enabled == 3) && (tp->t_flags & TF_RXWIN0SENT) == 0) && \ + (((tp->t_rcvtime == 0) && (tp->rcv_byps > (4* tp->t_maxseg))) || (((thflags & TH_PUSH) == 0) && ((tp->t_flags & TF_DELACK) == 0))))) static int tcpdropdropablreq(struct socket *head); @@ -1166,17 +1170,22 @@ findpcb: * example interactive connections with many small packets like * telnet or SSH. * - * Setting either tcp_minmssoverload or tcp_minmss to "0" disables - * this check. * * Account for packet if payload packet, skip over ACK, etc. + * + * The packet per second count is done all the time and is also used + * by "DELAY_ACK" to detect streaming situations. + * */ - if (tcp_minmss && tcp_minmssoverload && - tp->t_state == TCPS_ESTABLISHED && tlen > 0) { + if (tp->t_state == TCPS_ESTABLISHED && tlen > 0) { if (tp->rcv_reset > tcp_now) { tp->rcv_pps++; tp->rcv_byps += tlen + off; - if (tp->rcv_pps > tcp_minmssoverload) { + /* + * Setting either tcp_minmssoverload or tcp_minmss to "0" disables + * the check. + */ + if (tcp_minmss && tcp_minmssoverload && tp->rcv_pps > tcp_minmssoverload) { if ((tp->rcv_byps / tp->rcv_pps) < tcp_minmss) { char ipstrbuf[MAX_IPv6_STR_LEN]; printf("too many small tcp packets from " diff --git a/bsd/nfs/nfs.h b/bsd/nfs/nfs.h index 673240a29..9da9e3b8c 100644 --- a/bsd/nfs/nfs.h +++ b/bsd/nfs/nfs.h @@ -747,6 +747,7 @@ struct nfssvc_sock { int ns_reclen; int ns_numuids; u_long ns_sref; + time_t ns_timestamp; /* socket timestamp */ lck_mtx_t ns_wgmutex; /* mutex for write gather fields */ u_quad_t ns_wgtime; /* next Write deadline (usec) */ LIST_HEAD(, nfsrv_descript) ns_tq; /* Write gather lists */ @@ -764,7 +765,7 @@ struct nfssvc_sock { #define SLP_LASTFRAG 0x20 /* on last fragment of RPC record */ #define SLP_ALLFLAGS 0xff -extern TAILQ_HEAD(nfssvc_sockhead, nfssvc_sock) nfssvc_sockhead; +extern TAILQ_HEAD(nfssvc_sockhead, nfssvc_sock) nfssvc_sockhead, nfssvc_deadsockhead; /* locks for nfssvc_sock's */ extern lck_grp_attr_t *nfs_slp_group_attr; @@ -1021,6 +1022,7 @@ int nfsrv_setattr(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp, proc_t procp, mbuf_t *mrq); void nfsrv_slpderef(struct nfssvc_sock *slp); +void nfsrv_slpfree(struct nfssvc_sock *slp); int nfsrv_statfs(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp, proc_t procp, mbuf_t *mrq); diff --git a/bsd/nfs/nfs_serv.c b/bsd/nfs/nfs_serv.c index f6fc25fd4..d74605ded 100644 --- a/bsd/nfs/nfs_serv.c +++ b/bsd/nfs/nfs_serv.c @@ -213,10 +213,7 @@ nfsrv3_access(nfsd, slp, procp, mrq) KAUTH_VNODE_DELETE_CHILD; } else { testaction = - KAUTH_VNODE_WRITE_DATA | - KAUTH_VNODE_WRITE_ATTRIBUTES | - KAUTH_VNODE_WRITE_EXTATTRIBUTES | - KAUTH_VNODE_WRITE_SECURITY; + KAUTH_VNODE_WRITE_DATA; } if (nfsrv_authorize(vp, NULL, testaction, &context, nxo, 0)) nfsmode &= ~NFSV3ACCESS_MODIFY; @@ -780,7 +777,7 @@ nfsrv_read(nfsd, slp, procp, mrq) int i; caddr_t bpos; int error = 0, count, len, left, siz, tlen, getret; - int v3 = (nfsd->nd_flag & ND_NFSV3), reqlen; + int v3 = (nfsd->nd_flag & ND_NFSV3), reqlen, maxlen; char *cp2; mbuf_t mb, mb2, mreq; mbuf_t m2; @@ -803,7 +800,12 @@ nfsrv_read(nfsd, slp, procp, mrq) nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); off = (off_t)fxdr_unsigned(u_long, *tl); } - nfsm_srvstrsiz(reqlen, NFS_SRVMAXDATA(nfsd)); + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); + reqlen = fxdr_unsigned(u_long, *tl); + maxlen = NFS_SRVMAXDATA(nfsd); + if (reqlen > maxlen) + reqlen = maxlen; + if ((error = nfsrv_fhtovp(&nfh, nam, TRUE, &vp, &nx, &nxo))) { nfsm_reply(2 * NFSX_UNSIGNED); nfsm_srvpostop_attr(1, NULL); @@ -1851,6 +1853,7 @@ nfsrv_create(nfsd, slp, procp, mrq) if (!error) { if (nd.ni_cnd.cn_flags & ISSYMLINK) error = EINVAL; + vp = nd.ni_vp; } if (error) nfsm_reply(0); diff --git a/bsd/nfs/nfs_socket.c b/bsd/nfs/nfs_socket.c index 3f36830fa..2da38e94c 100644 --- a/bsd/nfs/nfs_socket.c +++ b/bsd/nfs/nfs_socket.c @@ -1891,6 +1891,12 @@ nfs_timer(__unused void *arg) if (slp->ns_wgtime && (slp->ns_wgtime <= cur_usec)) nfsrv_wakenfsd(slp); } + while ((slp = TAILQ_FIRST(&nfssvc_deadsockhead))) { + if ((slp->ns_timestamp + 5) > now.tv_sec) + break; + TAILQ_REMOVE(&nfssvc_deadsockhead, slp, ns_chain); + nfsrv_slpfree(slp); + } lck_mtx_unlock(nfsd_mutex); #endif /* NFS_NOSERVER */ diff --git a/bsd/nfs/nfs_subs.c b/bsd/nfs/nfs_subs.c index 556db3712..cfc3be25e 100644 --- a/bsd/nfs/nfs_subs.c +++ b/bsd/nfs/nfs_subs.c @@ -156,7 +156,7 @@ lck_grp_t *nfs_slp_rwlock_group; lck_grp_t *nfs_slp_mutex_group; struct nfs_reqq nfs_reqq; -struct nfssvc_sockhead nfssvc_sockhead; +struct nfssvc_sockhead nfssvc_sockhead, nfssvc_deadsockhead; struct nfsd_head nfsd_head; int nfsd_head_flag; @@ -1456,7 +1456,7 @@ nfs_loadattrcache( (nvap->nva_type == VREG) | (np->n_flag & NMODIFIED ? 6 : 4)); if (nvap->nva_type == VREG) { - int orig_size = np->n_size; + u_quad_t orig_size = np->n_size; if (np->n_flag & NMODIFIED) { if (nvap->nva_size < np->n_size) nvap->nva_size = np->n_size; diff --git a/bsd/nfs/nfs_syscalls.c b/bsd/nfs/nfs_syscalls.c index bf82ef6bc..8b685035b 100644 --- a/bsd/nfs/nfs_syscalls.c +++ b/bsd/nfs/nfs_syscalls.c @@ -912,12 +912,12 @@ nfssvc_nfsd(nsd, argp, p) continue; lck_rw_lock_exclusive(&slp->ns_rwlock); if (slp->ns_flag & SLP_VALID) { - if (slp->ns_flag & SLP_DISCONN) { - nfsrv_zapsock(slp); - } else if (slp->ns_flag & SLP_NEEDQ) { + if ((slp->ns_flag & (SLP_NEEDQ|SLP_DISCONN)) == SLP_NEEDQ) { slp->ns_flag &= ~SLP_NEEDQ; nfsrv_rcv_locked(slp->ns_so, slp, MBUF_WAITOK); } + if (slp->ns_flag & SLP_DISCONN) + nfsrv_zapsock(slp); error = nfsrv_dorec(slp, nfsd, &nd); microuptime(&now); cur_usec = (u_quad_t)now.tv_sec * 1000000 + @@ -1389,10 +1389,15 @@ nfsrv_zapsock(struct nfssvc_sock *slp) if (so == NULL) return; + /* + * Attempt to deter future upcalls, but leave the + * upcall info in place to avoid a race with the + * networking code. + */ socket_lock(so, 1); - so->so_upcall = NULL; so->so_rcv.sb_flags &= ~SB_UPCALL; socket_unlock(so, 1); + sock_shutdown(so, SHUT_RDWR); } @@ -1622,7 +1627,7 @@ nfsmout: /* * cleanup and release a server socket structure. */ -static void +void nfsrv_slpfree(struct nfssvc_sock *slp) { struct nfsuid *nuidp, *nnuidp; @@ -1673,6 +1678,8 @@ nfsrv_slpfree(struct nfssvc_sock *slp) void nfsrv_slpderef(struct nfssvc_sock *slp) { + struct timeval now; + lck_mtx_lock(nfsd_mutex); lck_rw_lock_exclusive(&slp->ns_rwlock); slp->ns_sref--; @@ -1682,10 +1689,13 @@ nfsrv_slpderef(struct nfssvc_sock *slp) return; } + /* queue the socket up for deletion */ + microuptime(&now); + slp->ns_timestamp = now.tv_sec; TAILQ_REMOVE(&nfssvc_sockhead, slp, ns_chain); + TAILQ_INSERT_TAIL(&nfssvc_deadsockhead, slp, ns_chain); + lck_rw_done(&slp->ns_rwlock); lck_mtx_unlock(nfsd_mutex); - - nfsrv_slpfree(slp); } @@ -1699,8 +1709,10 @@ nfsrv_init(terminating) int terminating; { struct nfssvc_sock *slp, *nslp; + struct timeval now; if (terminating) { + microuptime(&now); for (slp = TAILQ_FIRST(&nfssvc_sockhead); slp != 0; slp = nslp) { nslp = TAILQ_NEXT(slp, ns_chain); if (slp->ns_flag & SLP_VALID) { @@ -1708,10 +1720,10 @@ nfsrv_init(terminating) nfsrv_zapsock(slp); lck_rw_done(&slp->ns_rwlock); } + /* queue the socket up for deletion */ + slp->ns_timestamp = now.tv_sec; TAILQ_REMOVE(&nfssvc_sockhead, slp, ns_chain); - /* grab the lock one final time in case anyone's using it */ - lck_rw_lock_exclusive(&slp->ns_rwlock); - nfsrv_slpfree(slp); + TAILQ_INSERT_TAIL(&nfssvc_deadsockhead, slp, ns_chain); } nfsrv_cleancache(); /* And clear out server cache */ /* XXX Revisit when enabling WebNFS */ @@ -1723,6 +1735,7 @@ nfsrv_init(terminating) #endif TAILQ_INIT(&nfssvc_sockhead); + TAILQ_INIT(&nfssvc_deadsockhead); TAILQ_INIT(&nfsd_head); nfsd_head_flag &= ~NFSD_CHECKSLP; diff --git a/bsd/vfs/vfs_cache.c b/bsd/vfs/vfs_cache.c index 8cb282de6..fe7fe5269 100644 --- a/bsd/vfs/vfs_cache.c +++ b/bsd/vfs/vfs_cache.c @@ -655,11 +655,33 @@ cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, if ( (cnp->cn_flags & (ISLASTCN | ISDOTDOT)) ) { if (cnp->cn_nameiop != LOOKUP) break; - if (cnp->cn_flags & (LOCKPARENT | NOCACHE | ISDOTDOT)) + if (cnp->cn_flags & (LOCKPARENT | NOCACHE)) break; + if (cnp->cn_flags & ISDOTDOT) { + /* + * Quit here only if we can't use + * the parent directory pointer or + * don't have one. Otherwise, we'll + * use it below. + */ + if ((dp->v_flag & VROOT) || + dp->v_parent == NULLVP) + break; + } + } + + /* + * "." and ".." aren't supposed to be cached, so check + * for them before checking the cache. + */ + if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') + vp = dp; + else if (cnp->cn_flags & ISDOTDOT) + vp = dp->v_parent; + else { + if ( (vp = cache_lookup_locked(dp, cnp)) == NULLVP) + break; } - if ( (vp = cache_lookup_locked(dp, cnp)) == NULLVP) - break; if ( (cnp->cn_flags & ISLASTCN) ) break; diff --git a/bsd/vfs/vfs_journal.c b/bsd/vfs/vfs_journal.c index 4389d214b..c1308807f 100644 --- a/bsd/vfs/vfs_journal.c +++ b/bsd/vfs/vfs_journal.c @@ -1507,6 +1507,110 @@ journal_open(struct vnode *jvp, return NULL; } + +int +journal_is_clean(struct vnode *jvp, + off_t offset, + off_t journal_size, + struct vnode *fsvp, + size_t min_fs_block_size) +{ + journal jnl; + int phys_blksz, ret; + int orig_checksum, checksum; + struct vfs_context context; + + context.vc_proc = current_proc(); + context.vc_ucred = FSCRED; + + /* Get the real physical block size. */ + if (VNOP_IOCTL(jvp, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz, 0, &context)) { + printf("jnl: is_clean: failed to get device block size.\n"); + return EINVAL; + } + + if (phys_blksz > min_fs_block_size) { + printf("jnl: is_clean: error: phys blksize %d bigger than min fs blksize %d\n", + phys_blksz, min_fs_block_size); + return EINVAL; + } + + if ((journal_size % phys_blksz) != 0) { + printf("jnl: is_clean: journal size 0x%llx is not an even multiple of block size 0x%x\n", + journal_size, phys_blksz); + return EINVAL; + } + + memset(&jnl, 0, sizeof(jnl)); + + if (kmem_alloc(kernel_map, (vm_offset_t *)&jnl.header_buf, phys_blksz)) { + printf("jnl: is_clean: could not allocate space for header buffer (%d bytes)\n", phys_blksz); + return ENOMEM; + } + + jnl.jhdr = (journal_header *)jnl.header_buf; + memset(jnl.jhdr, 0, sizeof(journal_header)+4); + + jnl.jdev = jvp; + jnl.jdev_offset = offset; + jnl.fsdev = fsvp; + + // we have to set this up here so that do_journal_io() will work + jnl.jhdr->jhdr_size = phys_blksz; + + if (read_journal_header(&jnl, jnl.jhdr, phys_blksz) != phys_blksz) { + printf("jnl: is_clean: could not read %d bytes for the journal header.\n", + phys_blksz); + ret = EINVAL; + goto get_out; + } + + orig_checksum = jnl.jhdr->checksum; + jnl.jhdr->checksum = 0; + + if (jnl.jhdr->magic == SWAP32(JOURNAL_HEADER_MAGIC)) { + // do this before the swap since it's done byte-at-a-time + orig_checksum = SWAP32(orig_checksum); + checksum = calc_checksum((char *)jnl.jhdr, sizeof(struct journal_header)); + swap_journal_header(&jnl); + jnl.flags |= JOURNAL_NEED_SWAP; + } else { + checksum = calc_checksum((char *)jnl.jhdr, sizeof(struct journal_header)); + } + + if (jnl.jhdr->magic != JOURNAL_HEADER_MAGIC && jnl.jhdr->magic != OLD_JOURNAL_HEADER_MAGIC) { + printf("jnl: is_clean: journal magic is bad (0x%x != 0x%x)\n", + jnl.jhdr->magic, JOURNAL_HEADER_MAGIC); + ret = EINVAL; + goto get_out; + } + + if (orig_checksum != checksum) { + printf("jnl: is_clean: journal checksum is bad (0x%x != 0x%x)\n", orig_checksum, checksum); + ret = EINVAL; + goto get_out; + } + + // + // if the start and end are equal then the journal is clean. + // otherwise it's not clean and therefore an error. + // + if (jnl.jhdr->start == jnl.jhdr->end) { + ret = 0; + } else { + ret = EINVAL; + } + + get_out: + kmem_free(kernel_map, (vm_offset_t)jnl.header_buf, phys_blksz); + + return ret; + + +} + + + void journal_close(journal *jnl) { diff --git a/bsd/vfs/vfs_journal.h b/bsd/vfs/vfs_journal.h index cf87d421e..b03209b9a 100644 --- a/bsd/vfs/vfs_journal.h +++ b/bsd/vfs/vfs_journal.h @@ -202,6 +202,19 @@ journal *journal_open(struct vnode *jvp, void (*flush)(void *arg), void *arg); +/* + * Test whether the journal is clean or not. This is intended + * to be used when you're mounting read-only. If the journal + * is not clean for some reason then you should not mount the + * volume as your data structures may be in an unknown state. + */ +int journal_is_clean(struct vnode *jvp, + off_t offset, + off_t journal_size, + struct vnode *fsvp, + size_t min_fs_block_size); + + /* * Call journal_close() just before your file system is unmounted. * It flushes any outstanding transactions and makes sure the diff --git a/bsd/vfs/vfs_syscalls.c b/bsd/vfs/vfs_syscalls.c index 5675ce21f..e07690f8d 100644 --- a/bsd/vfs/vfs_syscalls.c +++ b/bsd/vfs/vfs_syscalls.c @@ -190,6 +190,7 @@ mount(struct proc *p, register struct mount_args *uap, __unused register_t *retv int mntalloc = 0; mode_t accessmode; boolean_t is_64bit; + boolean_t is_rwlock_locked = FALSE; AUDIT_ARG(fflags, uap->flags); @@ -227,13 +228,13 @@ mount(struct proc *p, register struct mount_args *uap, __unused register_t *retv } mount_unlock(mp); lck_rw_lock_exclusive(&mp->mnt_rwlock); + is_rwlock_locked = TRUE; /* * We only allow the filesystem to be reloaded if it * is currently mounted read-only. */ if ((uap->flags & MNT_RELOAD) && ((mp->mnt_flag & MNT_RDONLY) == 0)) { - lck_rw_done(&mp->mnt_rwlock); error = ENOTSUP; goto out1; } @@ -243,7 +244,6 @@ mount(struct proc *p, register struct mount_args *uap, __unused register_t *retv */ if (mp->mnt_vfsstat.f_owner != kauth_cred_getuid(context.vc_ucred) && (error = suser(context.vc_ucred, &p->p_acflag))) { - lck_rw_done(&mp->mnt_rwlock); goto out1; } /* @@ -333,6 +333,7 @@ mount(struct proc *p, register struct mount_args *uap, __unused register_t *retv TAILQ_INIT(&mp->mnt_newvnodes); mount_lock_init(mp); lck_rw_lock_exclusive(&mp->mnt_rwlock); + is_rwlock_locked = TRUE; mp->mnt_op = vfsp->vfc_vfsops; mp->mnt_vtable = vfsp; mount_list_lock(); @@ -471,6 +472,7 @@ update: mp->mnt_flag = flag; vfs_event_signal(NULL, VQ_UPDATE, (intptr_t)NULL); lck_rw_done(&mp->mnt_rwlock); + is_rwlock_locked = FALSE; if (!error) enablequotas(mp,&context); goto out2; @@ -490,6 +492,7 @@ update: vfs_event_signal(NULL, VQ_MOUNT, (intptr_t)NULL); checkdirs(vp, &context); lck_rw_done(&mp->mnt_rwlock); + is_rwlock_locked = FALSE; mount_list_add(mp); /* * there is no cleanup code here so I have made it void @@ -523,6 +526,7 @@ update: vnode_rele(device_vnode); } lck_rw_done(&mp->mnt_rwlock); + is_rwlock_locked = FALSE; mount_lock_destroy(mp); FREE_ZONE((caddr_t)mp, sizeof (struct mount), M_MOUNT); } @@ -544,6 +548,10 @@ out2: if (devpath && devvp) vnode_put(devvp); out1: + /* Release mnt_rwlock only when it was taken */ + if (is_rwlock_locked == TRUE) { + lck_rw_done(&mp->mnt_rwlock); + } if (mntalloc) FREE_ZONE((caddr_t)mp, sizeof (struct mount), M_MOUNT); vnode_put(vp); diff --git a/bsd/vfs/vfs_xattr.c b/bsd/vfs/vfs_xattr.c index 7ecca5261..9653ba143 100644 --- a/bsd/vfs/vfs_xattr.c +++ b/bsd/vfs/vfs_xattr.c @@ -1331,8 +1331,9 @@ lookup: if (fileflags & O_CREAT) { nd.ni_cnd.cn_nameiop = CREATE; - nd.ni_cnd.cn_flags |= LOCKPARENT; - + if (dvp != vp) { + nd.ni_cnd.cn_flags |= LOCKPARENT; + } if ( (error = namei(&nd))) { nd.ni_dvp = NULLVP; error = ENOATTR; @@ -1378,8 +1379,9 @@ lookup: xvp = nd.ni_vp; } nameidone(&nd); - vnode_put(dvp); /* drop iocount from LOCKPARENT request above */ - + if (dvp != vp) { + vnode_put(dvp); /* drop iocount from LOCKPARENT request above */ + } if (error) goto out; } else { diff --git a/config/MasterVersion b/config/MasterVersion index 3dd01efa7..bb78832f5 100644 --- a/config/MasterVersion +++ b/config/MasterVersion @@ -1,4 +1,4 @@ -8.5.0 +8.6.0 # The first line of this file contains the master version number for the kernel. # All other instances of the kernel version in xnu are derived from this file. diff --git a/libkern/libkern/OSCrossEndian.h b/libkern/libkern/OSCrossEndian.h index 0131455d1..2d04f44d6 100644 --- a/libkern/libkern/OSCrossEndian.h +++ b/libkern/libkern/OSCrossEndian.h @@ -56,14 +56,8 @@ static __inline__ int _OSRosettaCheck(void) { - int isCrossEndian; + int isCrossEndian = 0; - __asm__ ( "b 0f\n" - " .long 0x14400004\n" - " li %0,1\n" - "0:" - : "=r" (isCrossEndian) : "0" (0) - ); return isCrossEndian; } -- 2.45.2