]> git.saurik.com Git - apple/xnu.git/commitdiff
xnu-2422.100.13.tar.gz os-x-1093 v2422.100.13
authorApple <opensource@apple.com>
Tue, 12 Aug 2014 23:40:27 +0000 (23:40 +0000)
committerApple <opensource@apple.com>
Tue, 12 Aug 2014 23:40:27 +0000 (23:40 +0000)
36 files changed:
bsd/hfs/hfs.h
bsd/hfs/hfs_attrlist.c
bsd/hfs/hfs_cnode.c
bsd/hfs/hfs_cnode.h
bsd/hfs/hfs_format.h
bsd/hfs/hfs_fsctl.h
bsd/hfs/hfs_link.c
bsd/hfs/hfs_readwrite.c
bsd/hfs/hfs_search.c
bsd/hfs/hfs_vfsops.c
bsd/hfs/hfs_vfsutils.c
bsd/hfs/hfs_vnops.c
bsd/hfs/hfs_xattr.c
bsd/kern/kern_fork.c
bsd/kern/sysv_shm.c
bsd/kern/ubc_subr.c
bsd/sys/attr.h
bsd/sys/fsevents.h
bsd/sys/ubc_internal.h
bsd/sys/user.h
bsd/sys/vnode.h
bsd/vfs/vfs_attrlist.c
bsd/vfs/vfs_fsevents.c
config/MasterVersion
iokit/IOKit/pwr_mgt/RootDomain.h
iokit/Kernel/IOHibernateIO.cpp
iokit/Kernel/IOLib.cpp
iokit/Kernel/IOPMrootDomain.cpp
iokit/Kernel/IOPlatformExpert.cpp
iokit/Kernel/IOServicePM.cpp
osfmk/i386/pmap_x86_common.c
osfmk/vm/vm_compressor_pager.c
osfmk/vm/vm_map.c
osfmk/vm/vm_object.c
osfmk/vm/vm_protos.h
tools/tests/xnu_quick_test/shared_memory_tests.c

index 584b7f5a4720304709be979342b6c696eebc1404..eb0ab4a165d3430a5b9faf6e738749cdec0fefed 100644 (file)
@@ -325,39 +325,33 @@ typedef struct hfsmount {
        u_long hfs_idhash; /* size of cnid/fileid hash table -1 */
        LIST_HEAD(idhashhead, cat_preflightid) *hfs_idhashtbl; /* base of ID hash */
 
-       /*
-        * About the sync counters:
-        * hfs_sync_scheduled  keeps track whether a timer was scheduled but we
-        *                     haven't started processing the callback (i.e. we
-        *                     haven't begun the flush).  This will be non-zero
-        *                     even if the callback has been invoked, before we
-        *                    start the flush.
-        * hfs_sync_incomplete keeps track of the number of callbacks that have
-        *                     not completed yet (including callbacks not yet
-        *                     invoked).  We cannot safely unmount until this
-        *                     drops to zero.
-        *
-        * In both cases, we use counters, not flags, so that we can avoid
-        * taking locks.
-        */
-       int32_t         hfs_sync_scheduled;
-       int32_t         hfs_sync_incomplete;
-       u_int64_t       hfs_last_sync_request_time;
-       u_int32_t       hfs_active_threads;
-       u_int64_t       hfs_max_pending_io;
-                                       
-       thread_call_t   hfs_syncer;           // removeable devices get sync'ed by this guy
+    // Records the oldest outstanding sync request
+    struct timeval     hfs_sync_req_oldest;
+
+    // Records whether a sync has been queued or is in progress
+       boolean_t               hfs_sync_incomplete;
 
+       thread_call_t   hfs_syncer;            // removeable devices get sync'ed by this guy
+
+    /* Records the syncer thread so that we can avoid the syncer
+       queing more syncs. */
+    thread_t           hfs_syncer_thread;
+
+    // Not currently used except for debugging purposes
+       uint32_t        hfs_active_threads;
 } hfsmount_t;
 
 /*
- * HFS_META_DELAY is a duration (0.1 seconds, expressed in microseconds)
- * used for triggering the hfs_syncer() routine.  It is used in two ways:
- * as the delay between ending a transaction and firing hfs_syncer(), and
- * the delay in re-firing hfs_syncer() when it decides to back off (for
- * example, due to in-progress writes).
+ * HFS_META_DELAY is a duration (in usecs) used for triggering the 
+ * hfs_syncer() routine. We will back off if writes are in 
+ * progress, but...
+ * HFS_MAX_META_DELAY is the maximum time we will allow the
+ * syncer to be delayed.
  */
-enum { HFS_META_DELAY = 100 * 1000ULL };
+enum {
+    HFS_META_DELAY     = 100  * 1000,  // 0.1 secs
+    HFS_MAX_META_DELAY = 5000 * 1000   // 5 secs
+};
 
 typedef hfsmount_t  ExtendedVCB;
 
@@ -743,6 +737,8 @@ extern int hfs_owner_rights(struct hfsmount *hfsmp, uid_t cnode_uid, kauth_cred_
 
 extern int check_for_tracked_file(struct vnode *vp, time_t ctime, uint64_t op_type, void *arg);
 extern int check_for_dataless_file(struct vnode *vp, uint64_t op_type);
+extern int hfs_generate_document_id(struct hfsmount *hfsmp, uint32_t *docid);
+
 
 /*
  * Journal lock function prototypes
@@ -796,6 +792,11 @@ extern int  hfs_virtualmetafile(struct cnode *);
 extern int hfs_start_transaction(struct hfsmount *hfsmp);
 extern int hfs_end_transaction(struct hfsmount *hfsmp);
 extern int hfs_journal_flush(struct hfsmount *hfsmp, boolean_t wait_for_IO);
+extern void hfs_syncer_lock(struct hfsmount *hfsmp);
+extern void hfs_syncer_unlock(struct hfsmount *hfsmp);
+extern void hfs_syncer_wait(struct hfsmount *hfsmp);
+extern void hfs_syncer_wakeup(struct hfsmount *hfsmp);
+extern void hfs_syncer_queue(thread_call_t syncer);
 extern void hfs_sync_ejectable(struct hfsmount *hfsmp);
 
 extern void hfs_trim_callback(void *arg, uint32_t extent_count, const dk_extent_t *extents);
@@ -803,6 +804,8 @@ extern void hfs_trim_callback(void *arg, uint32_t extent_count, const dk_extent_
 /* Erase unused Catalog nodes due to <rdar://problem/6947811>. */
 extern int hfs_erase_unused_nodes(struct hfsmount *hfsmp);
 
+extern uint64_t hfs_usecs_to_deadline(uint64_t usecs);
+
 
 /*****************************************************************************
        Functions from hfs_vnops.c
index 0d230c19964fcc508b7eec01a7826e65ccbbde99..c57f811e245416269e043540e38fe96de1048037 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
@@ -686,12 +686,15 @@ packcommonattr(
                /* also don't expose the date_added or write_gen_counter fields */
                if (S_ISREG(cap->ca_mode) || S_ISLNK(cap->ca_mode)) {
                        struct FndrExtendedFileInfo *extinfo = (struct FndrExtendedFileInfo *)finfo;
+                       extinfo->document_id = 0;
                        extinfo->date_added = 0;
                        extinfo->write_gen_counter = 0;
                }
                else if (S_ISDIR(cap->ca_mode)) {
                        struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)finfo;
+                       extinfo->document_id = 0;
                        extinfo->date_added = 0;
+                       extinfo->write_gen_counter = 0;
                }
 
                attrbufptr = (char *)attrbufptr + sizeof(u_int8_t) * 32;
@@ -739,6 +742,7 @@ packcommonattr(
                *((u_int32_t *)attrbufptr) = cap->ca_flags;
                attrbufptr = ((u_int32_t *)attrbufptr) + 1;
        }
+
        if (ATTR_CMN_USERACCESS & attr) {
                u_int32_t user_access;
 
index 439e6b27063690ade60397f95a5781bf63a27126..574afc762145f57bd99a689cf1bf0da1c94d7278 100644 (file)
@@ -1743,20 +1743,25 @@ uint32_t hfs_incr_gencount (struct cnode *cp) {
        return gcount;
 }
 
-/* Getter for the gen count */
-u_int32_t hfs_get_gencount (struct cnode *cp) {
+static u_int32_t
+hfs_get_gencount_internal(const uint8_t *finderinfo, mode_t mode)
+{
        u_int8_t *finfo = NULL;
        u_int32_t gcount = 0;
 
        /* overlay the FinderInfo to the correct pointer, and advance */
-       finfo = (u_int8_t*)cp->c_finderinfo;
+       finfo = (u_int8_t*)finderinfo;
        finfo = finfo + 16;
 
        /* 
         * FinderInfo is written out in big endian... make sure to convert it to host
         * native before we use it.
+        *
+        * NOTE: the write_gen_counter is stored in the same location in both the
+        *       FndrExtendedFileInfo and FndrExtendedDirInfo structs (it's the
+        *       last 32-bit word) so it is safe to have one code path here.
         */
-       if (S_ISREG(cp->c_attr.ca_mode)) {
+       if (S_ISDIR(mode) || S_ISREG(mode)) {
                struct FndrExtendedFileInfo *extinfo = (struct FndrExtendedFileInfo *)finfo;
                gcount = OSSwapBigToHostInt32 (extinfo->write_gen_counter);
                
@@ -1768,14 +1773,30 @@ u_int32_t hfs_get_gencount (struct cnode *cp) {
                if (gcount == 0) {
                        gcount++;       
                }
-       }
-       else {
+       } else if (S_ISDIR(mode)) {
+               struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)finderinfo + 16);
+               gcount = OSSwapBigToHostInt32 (extinfo->write_gen_counter);
+
+               if (gcount == 0) {
+                       gcount++;       
+               }
+       } else {
                gcount = 0;
        }       
 
        return gcount;
 }
 
+/* Getter for the gen count */
+u_int32_t hfs_get_gencount (struct cnode *cp) {
+       return hfs_get_gencount_internal(cp->c_finderinfo, cp->c_attr.ca_mode);
+}
+
+/* Getter for the gen count from a buffer (currently pointer to finderinfo)*/
+u_int32_t hfs_get_gencount_from_blob (const uint8_t *finfoblob, mode_t mode) {
+       return hfs_get_gencount_internal(finfoblob, mode);
+}
+
 /*
  * Touch cnode times based on c_touch_xxx flags
  *
index 824aa2938ec524dbe1cfc463b0aa241e369fd69c..9c45e79128a5e03e9c88ae39ad8a3c8cd46c45ba 100644 (file)
@@ -344,8 +344,13 @@ extern u_int32_t hfs_get_dateadded (struct cnode *cp);
 /* Gen counter methods */
 extern void hfs_write_gencount(struct cat_attr *cattrp, uint32_t gencount);
 extern uint32_t hfs_get_gencount(struct cnode *cp);
+extern uint32_t hfs_get_gencount_from_blob (const uint8_t *finfoblob, mode_t mode);
 extern uint32_t hfs_incr_gencount (struct cnode *cp);
 
+/* Document id methods */
+extern uint32_t hfs_get_document_id(struct cnode * /* cp */);
+extern uint32_t hfs_get_document_id_from_blob(const uint8_t * /* finderinfo */, mode_t /* mode */);
+
 /* Zero-fill file and push regions out to disk */
 extern int  hfs_filedone(struct vnode *vp, vfs_context_t context);
 
index 52bcd0e203fcfe757799105956fc850cc93b3995..ba00a272a2f52e8b68bb541ba288e3666b9497a7 100644 (file)
@@ -225,15 +225,15 @@ struct FndrOpaqueInfo {
 typedef struct FndrOpaqueInfo FndrOpaqueInfo;
 
 struct FndrExtendedDirInfo {
-       u_int32_t point;
+       u_int32_t document_id;
        u_int32_t date_added;
        u_int16_t extended_flags;
        u_int16_t reserved3;
-       u_int32_t reserved4;
+       u_int32_t write_gen_counter;
 } __attribute__((aligned(2), packed));
 
 struct FndrExtendedFileInfo {
-       u_int32_t reserved1;
+       u_int32_t document_id;
        u_int32_t date_added;
        u_int16_t extended_flags;
        u_int16_t reserved2;
index ccd86e8d3f7777a182dbdd3b63524f47a2326a00..aad94ddabebb03f1062b9ccaad0939b709d41918 100644 (file)
@@ -148,6 +148,15 @@ struct hfs_journal_info {
 #define HFSIOC_GET_WRITE_GEN_COUNTER  _IOR('h', 30, u_int32_t)
 #define HFS_GET_WRITE_GEN_COUNTER  IOCBASECMD(HFSIOC_GET_WRITE_GEN_COUNTER)
 
+#define HFS_DOCUMENT_ID_ALLOCATE       0x1
+
+#define HFSIOC_GET_DOCUMENT_ID  _IOR('h', 31, u_int32_t)
+#define HFS_GET_DOCUMENT_ID  IOCBASECMD(HFSIOC_GET_DOCUMENT_ID)
+
+/* revisiond only uses this when something transforms in a way the kernel can't track such as "foo.rtf" -> "foo.rtfd" */
+#define HFSIOC_TRANSFER_DOCUMENT_ID  _IOW('h', 32, u_int32_t)
+#define HFS_TRANSFER_DOCUMENT_ID  IOCBASECMD(HFSIOC_TRANSFER_DOCUMENT_ID)
+
 #endif /* __APPLE_API_UNSTABLE */
 
 #endif /* ! _HFS_FSCTL_H_ */
index 73afb95d3d3e9be5b7c6d4fe50ef7fdb5b6d5544..287fa62538210e327cb3b7322cbf22e5e8c8d09a 100644 (file)
@@ -610,6 +610,7 @@ hfs_vnop_link(struct vnop_link_args *ap)
                        }
                }
                tdcp->c_dirchangecnt++;
+               hfs_incr_gencount(tdcp);
                tdcp->c_touch_chgtime = TRUE;
                tdcp->c_touch_modtime = TRUE;
                tdcp->c_flag |= C_FORCEUPDATE;
@@ -758,6 +759,7 @@ hfs_unlink(struct hfsmount *hfsmp, struct vnode *dvp, struct vnode *vp, struct c
                DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
        }
        dcp->c_dirchangecnt++;
+       hfs_incr_gencount(dcp);
        microtime(&tv);
        dcp->c_ctime = tv.tv_sec;
        dcp->c_mtime = tv.tv_sec;
@@ -1015,6 +1017,7 @@ hfs_privatedir_init(struct hfsmount * hfsmp, enum privdirtype type)
                dcp->c_entries++;
                INC_FOLDERCOUNT(hfsmp, dcp->c_attr);
                dcp->c_dirchangecnt++;
+               hfs_incr_gencount(dcp);
                microtime(&tv);
                dcp->c_ctime = tv.tv_sec;
                dcp->c_mtime = tv.tv_sec;
index a3f653fc445e3f0d40af60bd9ee49891d164a181..690f3046479a3cb2ffe59f7892a3261998478b94 100644 (file)
@@ -54,6 +54,7 @@
 #include <sys/sysctl.h>
 #include <sys/fsctl.h>
 #include <sys/mount_internal.h>
+#include <sys/file_internal.h>
 
 #include <miscfs/specfs/specdev.h>
 
@@ -1662,8 +1663,9 @@ hfs_vnop_ioctl( struct vnop_ioctl_args /* {
 
                cp = VTOC(vp);
 
-               if (vnode_isdir (vp)) {
-                       error = EISDIR;
+               if (!vnode_isdir(vp) && !(vnode_isreg(vp)) &&
+                               !(vnode_islnk(vp))) {
+                       error = EBADF;
                        *counter = 0;
                        return error;
                }
@@ -1671,12 +1673,12 @@ hfs_vnop_ioctl( struct vnop_ioctl_args /* {
                error = hfs_lock (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
                if (error == 0) {
                        struct ubc_info *uip;
-                       int is_mapped = 0;
+                       int is_mapped_writable = 0;
                        
                        if (UBCINFOEXISTS(vp)) {
                                uip = vp->v_ubcinfo;
-                               if (uip->ui_flags & UI_ISMAPPED) {
-                                       is_mapped = 1;
+                               if ((uip->ui_flags & UI_ISMAPPED) && (uip->ui_flags & UI_MAPPEDWRITE)) {
+                                       is_mapped_writable = 1;
                                }
                        }
 
@@ -1690,21 +1692,21 @@ hfs_vnop_ioctl( struct vnop_ioctl_args /* {
                                // (since the file may be unmapped but the pageouts have not
                                // yet happened).
                                //
-                               if (is_mapped) {
+                               if (is_mapped_writable) {
                                        hfs_incr_gencount (cp);
                                        gcount = hfs_get_gencount(cp);
                                }
-                               
-                               *counter = gcount;
 
-                       } 
-                       else {
+                               *counter = gcount;
+                       } else if (S_ISDIR(cp->c_attr.ca_mode)) {
+                               *counter = hfs_get_gencount(cp);
+                       } else {
                                /* not a file or dir? silently return */
                                *counter = 0;
                        }
                        hfs_unlock (cp);
 
-                       if (is_mapped) {
+                       if (is_mapped_writable) {
                                error = EBUSY;
                        }
                }
@@ -1712,6 +1714,235 @@ hfs_vnop_ioctl( struct vnop_ioctl_args /* {
                return error;
        }
 
+       case HFS_GET_DOCUMENT_ID:
+       {
+               struct cnode *cp = NULL;
+               int error=0;
+               u_int32_t *document_id = (u_int32_t *)ap->a_data;
+
+               cp = VTOC(vp);
+
+               if (cp->c_desc.cd_cnid == kHFSRootFolderID) {
+                       // the root-dir always has document id '2' (aka kHFSRootFolderID)
+                       *document_id = kHFSRootFolderID;
+
+               } else if ((S_ISDIR(cp->c_attr.ca_mode) || S_ISREG(cp->c_attr.ca_mode) || S_ISLNK(cp->c_attr.ca_mode))) {
+                       int mark_it = 0;
+                       uint32_t tmp_doc_id;
+
+                       //
+                       // we can use the FndrExtendedFileInfo because the doc-id is the first
+                       // thing in both it and the FndrExtendedDirInfo struct which is fixed 
+                       // in format and can not change layout
+                       //
+                       struct FndrExtendedFileInfo *extinfo = (struct FndrExtendedFileInfo *)((u_int8_t*)cp->c_finderinfo + 16);
+
+                       hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
+
+                       //
+                       // if the cnode isn't UF_TRACKED and the doc-id-allocate flag isn't set
+                       // then just return a zero for the doc-id
+                       //
+                       if (!(cp->c_bsdflags & UF_TRACKED) && !(ap->a_fflag & HFS_DOCUMENT_ID_ALLOCATE)) {
+                               *document_id = 0;
+                               hfs_unlock(cp);
+                               return 0;
+                       }
+
+                       //
+                       // if the cnode isn't UF_TRACKED and the doc-id-allocate flag IS set,
+                       // then set mark_it so we know to set the UF_TRACKED flag once the
+                       // cnode is locked.
+                       //
+                       if (!(cp->c_bsdflags & UF_TRACKED) && (ap->a_fflag & HFS_DOCUMENT_ID_ALLOCATE)) {
+                               mark_it = 1;
+                       }
+                       
+                       tmp_doc_id = extinfo->document_id;   // get a copy of this
+                       
+                       hfs_unlock(cp);   // in case we have to call hfs_generate_document_id() 
+
+                       //
+                       // If the document_id isn't set, get a new one and then set it.
+                       // Note: we first get the document id, then lock the cnode to
+                       // avoid any deadlock potential between cp and the root vnode.
+                       //
+                       uint32_t new_id;
+                       if (tmp_doc_id == 0 && (error = hfs_generate_document_id(hfsmp, &new_id)) == 0) {
+
+                               if ((error = hfs_lock (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)) == 0) {
+                                       extinfo->document_id = tmp_doc_id = new_id;
+                                       //printf("ASSIGNING: doc-id %d to ino %d\n", extinfo->document_id, cp->c_fileid);
+                                               
+                                       if (mark_it) {
+                                               cp->c_bsdflags |= UF_TRACKED;
+                                       }
+
+                                       // mark the cnode dirty
+                                       cp->c_flag |= C_MODIFIED | C_FORCEUPDATE;
+
+                                       int lockflags;
+                                       if ((error = hfs_start_transaction(hfsmp)) == 0) {
+                                               lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
+
+                                               (void) cat_update(hfsmp, &cp->c_desc, &cp->c_attr, NULL, NULL);
+
+                                               hfs_systemfile_unlock (hfsmp, lockflags);
+                                               (void) hfs_end_transaction(hfsmp);
+                                       }
+
+#if CONFIG_FSE
+                                       add_fsevent(FSE_DOCID_CHANGED, context,
+                                                   FSE_ARG_DEV,   hfsmp->hfs_raw_dev,
+                                                   FSE_ARG_INO,   (ino64_t)0,             // src inode #
+                                                   FSE_ARG_INO,   (ino64_t)cp->c_fileid,  // dst inode #
+                                                   FSE_ARG_INT32, extinfo->document_id,
+                                                   FSE_ARG_DONE);
+
+                                       hfs_unlock (cp);    // so we can send the STAT_CHANGED event without deadlocking
+
+                                       if (need_fsevent(FSE_STAT_CHANGED, vp)) {
+                                               add_fsevent(FSE_STAT_CHANGED, context, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
+                                       }
+#else
+                                       hfs_unlock (cp);
+#endif
+                               }
+                       }
+
+                       *document_id = tmp_doc_id;
+               } else {
+                       *document_id = 0;
+               }
+
+               return error;
+       }
+
+       case HFS_TRANSFER_DOCUMENT_ID:
+       {
+               struct cnode *cp = NULL;
+               int error;
+               u_int32_t to_fd = *(u_int32_t *)ap->a_data;
+               struct fileproc *to_fp;
+               struct vnode *to_vp;
+               struct cnode *to_cp;
+
+               cp = VTOC(vp);
+
+               if ((error = fp_getfvp(p, to_fd, &to_fp, &to_vp)) != 0) {
+                       //printf("could not get the vnode for fd %d (err %d)\n", to_fd, error);
+                       return error;
+               }
+               if ( (error = vnode_getwithref(to_vp)) ) {
+                       file_drop(to_fd);
+                       return error;
+               }
+
+               if (VTOHFS(to_vp) != hfsmp) {
+                       error = EXDEV;
+                       goto transfer_cleanup;
+               }
+
+               int need_unlock = 1;
+               to_cp = VTOC(to_vp);
+               error = hfs_lockpair(cp, to_cp, HFS_EXCLUSIVE_LOCK);
+               if (error != 0) {
+                       //printf("could not lock the pair of cnodes (error %d)\n", error);
+                       goto transfer_cleanup;
+               }
+                       
+               if (!(cp->c_bsdflags & UF_TRACKED)) {
+                       error = EINVAL;
+               } else if (to_cp->c_bsdflags & UF_TRACKED) {
+                       //
+                       // if the destination is already tracked, return an error
+                       // as otherwise it's a silent deletion of the target's
+                       // document-id
+                       //
+                       error = EEXIST;
+               } else if (S_ISDIR(cp->c_attr.ca_mode) || S_ISREG(cp->c_attr.ca_mode) || S_ISLNK(cp->c_attr.ca_mode)) {
+                       //
+                       // we can use the FndrExtendedFileInfo because the doc-id is the first
+                       // thing in both it and the ExtendedDirInfo struct which is fixed in
+                       // format and can not change layout
+                       //
+                       struct FndrExtendedFileInfo *f_extinfo = (struct FndrExtendedFileInfo *)((u_int8_t*)cp->c_finderinfo + 16);
+                       struct FndrExtendedFileInfo *to_extinfo = (struct FndrExtendedFileInfo *)((u_int8_t*)to_cp->c_finderinfo + 16);
+
+                       if (f_extinfo->document_id == 0) {
+                               uint32_t new_id;
+
+                               hfs_unlockpair(cp, to_cp);  // have to unlock to be able to get a new-id
+                               
+                               if ((error = hfs_generate_document_id(hfsmp, &new_id)) == 0) {
+                                       //
+                                       // re-lock the pair now that we have the document-id
+                                       //
+                                       hfs_lockpair(cp, to_cp, HFS_EXCLUSIVE_LOCK);
+                                       f_extinfo->document_id = new_id;
+                               } else {
+                                       goto transfer_cleanup;
+                               }
+                       }
+                                       
+                       to_extinfo->document_id = f_extinfo->document_id;
+                       f_extinfo->document_id = 0;
+                       //printf("TRANSFERRING: doc-id %d from ino %d to ino %d\n", to_extinfo->document_id, cp->c_fileid, to_cp->c_fileid);
+
+                       // make sure the destination is also UF_TRACKED
+                       to_cp->c_bsdflags |= UF_TRACKED;
+                       cp->c_bsdflags &= ~UF_TRACKED;
+
+                       // mark the cnodes dirty
+                       cp->c_flag |= C_MODIFIED | C_FORCEUPDATE;
+                       to_cp->c_flag |= C_MODIFIED | C_FORCEUPDATE;
+
+                       int lockflags;
+                       if ((error = hfs_start_transaction(hfsmp)) == 0) {
+
+                               lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
+
+                               (void) cat_update(hfsmp, &cp->c_desc, &cp->c_attr, NULL, NULL);
+                               (void) cat_update(hfsmp, &to_cp->c_desc, &to_cp->c_attr, NULL, NULL);
+
+                               hfs_systemfile_unlock (hfsmp, lockflags);
+                               (void) hfs_end_transaction(hfsmp);
+                       }
+
+#if CONFIG_FSE
+                       add_fsevent(FSE_DOCID_CHANGED, context,
+                                   FSE_ARG_DEV,   hfsmp->hfs_raw_dev,
+                                   FSE_ARG_INO,   (ino64_t)cp->c_fileid,       // src inode #
+                                   FSE_ARG_INO,   (ino64_t)to_cp->c_fileid,    // dst inode #
+                                   FSE_ARG_INT32, to_extinfo->document_id,
+                                   FSE_ARG_DONE);
+
+                       hfs_unlockpair(cp, to_cp);    // unlock this so we can send the fsevents
+                       need_unlock = 0;
+
+                       if (need_fsevent(FSE_STAT_CHANGED, vp)) {
+                               add_fsevent(FSE_STAT_CHANGED, context, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
+                       }
+                       if (need_fsevent(FSE_STAT_CHANGED, to_vp)) {
+                               add_fsevent(FSE_STAT_CHANGED, context, FSE_ARG_VNODE, to_vp, FSE_ARG_DONE);
+                       }
+#else
+                       hfs_unlockpair(cp, to_cp);    // unlock this so we can send the fsevents
+                       need_unlock = 0;
+#endif
+               }
+               
+               if (need_unlock) {
+                       hfs_unlockpair(cp, to_cp);
+               }
+
+       transfer_cleanup:
+               vnode_put(to_vp);
+               file_drop(to_fd);
+
+               return error;
+       }
+
        case HFS_PREV_LINK:
        case HFS_NEXT_LINK:
        {
index 53ea092f40a1ad4af3d33aa0bd82c59f45071286..a76a9a9e5fdfe6427315381472c6d1002e21f0a0 100644 (file)
@@ -678,7 +678,6 @@ CheckCriteria(      ExtendedVCB *vcb,
        struct cat_attr c_attr;
        struct cat_fork datafork;
        struct cat_fork rsrcfork;
-       struct hfsmount *hfsmp = (struct hfsmount*)vcb;
        int force_case_sensitivity = proc_is_forcing_hfs_case_sensitivity(vfs_context_proc(ctx));
        
        bzero(&c_attr, sizeof(c_attr));
@@ -750,19 +749,29 @@ CheckCriteria(    ExtendedVCB *vcb,
                if (isHFSPlus) {
                        int case_sensitive = 0;
 
-                       if (hfsmp->hfs_flags & HFS_CASE_SENSITIVE) {
-                               case_sensitive = 1;
-                       } else if (force_case_sensitivity) {
+                       /*
+                        * Longstanding default behavior here is to use a non-case-sensitive 
+                        * search, even on case-sensitive filesystems. 
+                        * 
+                        * We only force case sensitivity if the controlling process has explicitly
+                        * asked for it in the proc flags, and only if they are not doing
+                        * a partial name match.  Consider that if you are doing a partial
+                        * name match ("all files that begin with 'image'"), the likelihood is 
+                        * high that you would want to see all matches, even those that do not
+                        * explicitly match the case.
+                        */
+                       if (force_case_sensitivity) {
                                case_sensitive = 1;
                        }
 
                        /* Check for partial/full HFS Plus name match */
 
                        if ( searchBits & SRCHFS_MATCHPARTIALNAMES ) {
+                               /* always use a case-INSENSITIVE search here */
                                matched = ComparePartialUnicodeName(key->hfsPlus.nodeName.unicode,
                                                                    key->hfsPlus.nodeName.length,
                                                                    (UniChar*)searchInfo1->name,
-                                                                   searchInfo1->nameLength, case_sensitive);
+                                                                   searchInfo1->nameLength, 0);
                        } 
                        else {
                                /* Full name match.  Are we HFSX (case sensitive) or HFS+ ? */
index bdd4331246f4e6a62c69cf1428ee5f155f4dc56f..d2e76f7b3ca547717554a99bf126618cad6b86ab 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999-2013 Apple Inc. All rights reserved.
+ * Copyright (c) 1999-2014 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
@@ -1001,100 +1001,146 @@ hfs_reload(struct mount *mountp)
        return (0);
 }
 
-
-static uint64_t timeval_to_microseconds(struct timeval *tv)
+__unused
+static uint64_t tv_to_usecs(struct timeval *tv)
 {
        return tv->tv_sec * 1000000ULL + tv->tv_usec;
 }
 
+// Returns TRUE if b - a >= usecs
+static boolean_t hfs_has_elapsed (const struct timeval *a, 
+                                  const struct timeval *b,
+                                  uint64_t usecs)
+{
+    struct timeval diff;
+    timersub(b, a, &diff);
+    return diff.tv_sec * 1000000ULL + diff.tv_usec >= usecs;
+}
+
 static void
 hfs_syncer(void *arg0, void *unused)
 {
 #pragma unused(unused)
     
     struct hfsmount *hfsmp = arg0;
-    clock_sec_t secs;
-    clock_usec_t usecs;
-    uint64_t deadline = 0;
-    uint64_t now;
-    
-    clock_get_system_microtime(&secs, &usecs);
-    now = ((uint64_t)secs * USEC_PER_SEC) + (uint64_t)usecs;
-    KERNEL_DEBUG_CONSTANT(HFSDBG_SYNCER | DBG_FUNC_START, hfsmp, now, timeval_to_microseconds(&hfsmp->hfs_mp->mnt_last_write_completed_timestamp), hfsmp->hfs_mp->mnt_pending_write_size, 0);
-    
+    struct timeval   now;
+
+    microuptime(&now);
+
+    KERNEL_DEBUG_CONSTANT(HFSDBG_SYNCER | DBG_FUNC_START, hfsmp, 
+                          tv_to_usecs(&now),
+                          tv_to_usecs(&hfsmp->hfs_mp->mnt_last_write_completed_timestamp), 
+                          hfsmp->hfs_mp->mnt_pending_write_size, 0);
+
+    hfs_syncer_lock(hfsmp);
+
+    if (!hfsmp->hfs_syncer) {
+        // hfs_unmount is waiting for us leave now and let it do the sync
+        hfsmp->hfs_sync_incomplete = FALSE;
+        hfs_syncer_unlock(hfsmp);
+        hfs_syncer_wakeup(hfsmp);
+        return;
+    }
+
+    /* Check to see whether we should flush now: either the oldest is
+       > HFS_MAX_META_DELAY or HFS_META_DELAY has elapsed since the
+       request and there are no pending writes. */
+
+    boolean_t flush_now = FALSE;
+
+    if (hfs_has_elapsed(&hfsmp->hfs_sync_req_oldest, &now, HFS_MAX_META_DELAY))
+        flush_now = TRUE;
+    else if (!hfsmp->hfs_mp->mnt_pending_write_size) {
+        /* N.B. accessing mnt_last_write_completed_timestamp is not thread safe, but
+           it won't matter for what we're using it for. */
+        if (hfs_has_elapsed(&hfsmp->hfs_mp->mnt_last_write_completed_timestamp,
+                            &now,
+                            HFS_META_DELAY)) {
+            flush_now = TRUE;
+        }
+    }
+
+    if (!flush_now) {
+        thread_call_t syncer = hfsmp->hfs_syncer;
+
+        hfs_syncer_unlock(hfsmp);
+
+        hfs_syncer_queue(syncer);
+
+        return;
+    }
+
+    timerclear(&hfsmp->hfs_sync_req_oldest);
+
+    hfs_syncer_unlock(hfsmp);
+
+    KERNEL_DEBUG_CONSTANT(HFSDBG_SYNCER_TIMED | DBG_FUNC_START, 
+                          tv_to_usecs(&now),
+                          tv_to_usecs(&hfsmp->hfs_mp->mnt_last_write_completed_timestamp),
+                          tv_to_usecs(&hfsmp->hfs_mp->mnt_last_write_issued_timestamp), 
+                          hfsmp->hfs_mp->mnt_pending_write_size, 0);
+
+    if (hfsmp->hfs_syncer_thread) {
+        printf("hfs: syncer already running!");
+               return;
+       }
+
+    hfsmp->hfs_syncer_thread = current_thread();
+
+    hfs_start_transaction(hfsmp);   // so we hold off any new writes
+
     /*
-     * Flush the journal if there have been no writes (or outstanding writes) for 0.1 seconds.
+     * We intentionally do a synchronous flush (of the journal or entire volume) here.
+     * For journaled volumes, this means we wait until the metadata blocks are written
+     * to both the journal and their final locations (in the B-trees, etc.).
      *
-     * WARNING!  last_write_completed >= last_write_issued isn't sufficient to test whether
-     * there are still outstanding writes.  We could have issued a whole bunch of writes,
-     * and then stopped issuing new writes, then one or more of those writes complete.
+     * This tends to avoid interleaving the metadata writes with other writes (for
+     * example, user data, or to the journal when a later transaction notices that
+     * an earlier transaction has finished its async writes, and then updates the
+     * journal start in the journal header).  Avoiding interleaving of writes is
+     * very good for performance on simple flash devices like SD cards, thumb drives;
+     * and on devices like floppies.  Since removable devices tend to be this kind of
+     * simple device, doing a synchronous flush actually improves performance in
+     * practice.
      *
-     * NOTE: This routine uses clock_get_system_microtime (i.e. uptime) instead of
-     * clock_get_calendar_microtime (i.e. wall time) because mnt_last_write_completed_timestamp
-     * and mnt_last_write_issued_timestamp are also stored as system (uptime) times.
-     * Trying to compute durations from a mix of system and calendar times is meaningless
-     * since they are relative to different points in time. 
+     * NOTE: For non-journaled volumes, the call to hfs_sync will also cause dirty
+     * user data to be written.
      */
-    hfs_start_transaction(hfsmp);   // so we hold off any new writes
-    uint64_t last_write_completed = timeval_to_microseconds(&hfsmp->hfs_mp->mnt_last_write_completed_timestamp);
-    if (hfsmp->hfs_mp->mnt_pending_write_size == 0 && (now - last_write_completed) >= HFS_META_DELAY) {
-       /*
-        * Time to flush the journal.
-        */
-       KERNEL_DEBUG_CONSTANT(HFSDBG_SYNCER_TIMED | DBG_FUNC_START, now, last_write_completed, timeval_to_microseconds(&hfsmp->hfs_mp->mnt_last_write_issued_timestamp), hfsmp->hfs_mp->mnt_pending_write_size, 0);
-       
-       /*
-        * We intentionally do a synchronous flush (of the journal or entire volume) here.
-        * For journaled volumes, this means we wait until the metadata blocks are written
-        * to both the journal and their final locations (in the B-trees, etc.).
-        *
-        * This tends to avoid interleaving the metadata writes with other writes (for
-        * example, user data, or to the journal when a later transaction notices that
-        * an earlier transaction has finished its async writes, and then updates the
-        * journal start in the journal header).  Avoiding interleaving of writes is
-        * very good for performance on simple flash devices like SD cards, thumb drives;
-        * and on devices like floppies.  Since removable devices tend to be this kind of
-        * simple device, doing a synchronous flush actually improves performance in
-        * practice.
-        *
-        * NOTE: For non-journaled volumes, the call to hfs_sync will also cause dirty
-        * user data to be written.
-        */
-       if (hfsmp->jnl) {
-           hfs_journal_flush(hfsmp, TRUE);
-       } else {
-           hfs_sync(hfsmp->hfs_mp, MNT_WAIT, vfs_context_kernel());
-       }
-       
-       clock_get_system_microtime(&secs, &usecs);
-       now = ((uint64_t)secs * USEC_PER_SEC) + (uint64_t)usecs;
-       
-       KERNEL_DEBUG_CONSTANT(HFSDBG_SYNCER_TIMED | DBG_FUNC_END, now, timeval_to_microseconds(&hfsmp->hfs_mp->mnt_last_write_completed_timestamp), timeval_to_microseconds(&hfsmp->hfs_mp->mnt_last_write_issued_timestamp), hfsmp->hfs_mp->mnt_pending_write_size, 0);
-       hfs_end_transaction(hfsmp);
-       
-       //
-       // NOTE: we decrement these *after* we've done the journal_flush() since
-       // it can take a significant amount of time and so we don't want more
-       // callbacks scheduled until we've done this one.
-       //
-       OSDecrementAtomic((volatile SInt32 *)&hfsmp->hfs_sync_scheduled);
-       OSDecrementAtomic((volatile SInt32 *)&hfsmp->hfs_sync_incomplete);
-       wakeup((caddr_t)&hfsmp->hfs_sync_incomplete);
+    if (hfsmp->jnl) {
+        hfs_journal_flush(hfsmp, TRUE);
     } else {
-       /*
-        * Defer the journal flush by rescheduling the timer.
-        */
-       
-       clock_interval_to_deadline(HFS_META_DELAY, NSEC_PER_USEC, &deadline);
-       thread_call_enter_delayed(hfsmp->hfs_syncer, deadline);
-       
-       // note: we intentionally return early here and do not
-       // decrement the sync_scheduled and sync_incomplete
-       // variables because we rescheduled the timer.
-       
-       hfs_end_transaction(hfsmp);
+        hfs_sync(hfsmp->hfs_mp, MNT_WAIT, vfs_context_kernel());
     }
-    KERNEL_DEBUG_CONSTANT(HFSDBG_SYNCER| DBG_FUNC_END, deadline ? EAGAIN : 0, deadline, 0, 0, 0);
+
+    KERNEL_DEBUG_CONSTANT(HFSDBG_SYNCER_TIMED | DBG_FUNC_END, 
+                          (microuptime(&now), tv_to_usecs(&now)),
+                          tv_to_usecs(&hfsmp->hfs_mp->mnt_last_write_completed_timestamp), 
+                          tv_to_usecs(&hfsmp->hfs_mp->mnt_last_write_issued_timestamp), 
+                          hfsmp->hfs_mp->mnt_pending_write_size, 0);
+
+    hfs_end_transaction(hfsmp);
+
+    hfsmp->hfs_syncer_thread = NULL;
+
+    hfs_syncer_lock(hfsmp);
+
+    // If hfs_unmount lets us and we missed a sync, schedule again
+    if (hfsmp->hfs_syncer && timerisset(&hfsmp->hfs_sync_req_oldest)) {
+        thread_call_t syncer = hfsmp->hfs_syncer;
+
+        hfs_syncer_unlock(hfsmp);
+
+        hfs_syncer_queue(syncer);
+    } else {
+        hfsmp->hfs_sync_incomplete = FALSE;
+        hfs_syncer_unlock(hfsmp);
+        hfs_syncer_wakeup(hfsmp);
+    }
+
+    /* BE CAREFUL WHAT YOU ADD HERE: at this point hfs_unmount is free
+       to continue and therefore hfsmp might be invalid. */
+
+    KERNEL_DEBUG_CONSTANT(HFSDBG_SYNCER | DBG_FUNC_END, 0, 0, 0, 0, 0);
 }
 
 
@@ -1904,7 +1950,6 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args,
        if (isroot == 0) {
                if ((hfsmp->hfs_flags & HFS_VIRTUAL_DEVICE) == 0 && 
                                IOBSDIsMediaEjectable(mp->mnt_vfsstat.f_mntfromname)) {
-                       hfsmp->hfs_max_pending_io = 4096*1024;   // a reasonable value to start with.
                        hfsmp->hfs_syncer = thread_call_allocate(hfs_syncer, hfsmp);
                        if (hfsmp->hfs_syncer == NULL) {
                                printf("hfs: failed to allocate syncer thread callback for %s (%s)\n",
@@ -1997,36 +2042,34 @@ hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context)
        if (hfsmp->hfs_flags & HFS_METADATA_ZONE)
                (void) hfs_recording_suspend(hfsmp);
 
-       /*
-        * Cancel any pending timers for this volume.  Then wait for any timers
-        * which have fired, but whose callbacks have not yet completed.
-        */
+    // Tidy up the syncer
        if (hfsmp->hfs_syncer)
        {
-               struct timespec ts = {0, 100000000};    /* 0.1 seconds */
-               
-               /*
-                * Cancel any timers that have been scheduled, but have not
-                * fired yet.  NOTE: The kernel considers a timer complete as
-                * soon as it starts your callback, so the kernel does not
-                * keep track of the number of callbacks in progress.
-                */
-               if (thread_call_cancel(hfsmp->hfs_syncer))
-                       OSDecrementAtomic((volatile SInt32 *)&hfsmp->hfs_sync_incomplete);
-               thread_call_free(hfsmp->hfs_syncer);
-               hfsmp->hfs_syncer = NULL;
-               
-               /*
-                * This waits for all of the callbacks that were entered before
-                * we did thread_call_cancel above, but have not completed yet.
-                */
-               while(hfsmp->hfs_sync_incomplete > 0)
-               {
-                       msleep((caddr_t)&hfsmp->hfs_sync_incomplete, NULL, PWAIT, "hfs_unmount", &ts);
-               }
-               
-               if (hfsmp->hfs_sync_incomplete < 0)
-                       panic("hfs_unmount: pm_sync_incomplete underflow!\n");
+        hfs_syncer_lock(hfsmp);
+
+        /* First, make sure everything else knows we don't want any more
+           requests queued. */
+        thread_call_t syncer = hfsmp->hfs_syncer;
+        hfsmp->hfs_syncer = NULL;
+
+        hfs_syncer_unlock(hfsmp);
+
+        // Now deal with requests that are outstanding
+        if (hfsmp->hfs_sync_incomplete) {
+            if (thread_call_cancel(syncer)) {
+                // We managed to cancel the timer so we're done
+                hfsmp->hfs_sync_incomplete = FALSE;
+            } else {
+                // Syncer must be running right now so we have to wait
+                hfs_syncer_lock(hfsmp);
+                while (hfsmp->hfs_sync_incomplete)
+                    hfs_syncer_wait(hfsmp);
+                hfs_syncer_unlock(hfsmp);
+            }
+        }
+
+        // Now we're safe to free the syncer
+               thread_call_free(syncer);
        }
 
        if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) {
@@ -7414,9 +7457,9 @@ hfs_getvoluuid(struct hfsmount *hfsmp, uuid_t result)
 static int
 hfs_vfs_getattr(struct mount *mp, struct vfs_attr *fsap, __unused vfs_context_t context)
 {
-#define HFS_ATTR_CMN_VALIDMASK (ATTR_CMN_VALIDMASK & ~(ATTR_CMN_NAMEDATTRCOUNT | ATTR_CMN_NAMEDATTRLIST))
+#define HFS_ATTR_CMN_VALIDMASK ATTR_CMN_VALIDMASK
 #define HFS_ATTR_FILE_VALIDMASK (ATTR_FILE_VALIDMASK & ~(ATTR_FILE_FILETYPE | ATTR_FILE_FORKCOUNT | ATTR_FILE_FORKLIST))
-#define HFS_ATTR_CMN_VOL_VALIDMASK (ATTR_CMN_VALIDMASK & ~(ATTR_CMN_NAMEDATTRCOUNT | ATTR_CMN_NAMEDATTRLIST | ATTR_CMN_ACCTIME))
+#define HFS_ATTR_CMN_VOL_VALIDMASK (ATTR_CMN_VALIDMASK & ~(ATTR_CMN_ACCTIME))
 
        ExtendedVCB *vcb = VFSTOVCB(mp);
        struct hfsmount *hfsmp = VFSTOHFS(mp);
index f67adacc45b0b493df2b0c588afa895e0028d39a..5fe09c2ed69ec1fe6d7ed08fdf316a5cae58152b 100644 (file)
@@ -3008,6 +3008,50 @@ hfs_virtualmetafile(struct cnode *cp)
        return (0);
 }
 
+__private_extern__
+void hfs_syncer_lock(struct hfsmount *hfsmp)
+{
+    hfs_lock_mount(hfsmp);
+}
+
+__private_extern__ 
+void hfs_syncer_unlock(struct hfsmount *hfsmp)
+{
+    hfs_unlock_mount(hfsmp);
+}
+
+__private_extern__
+void hfs_syncer_wait(struct hfsmount *hfsmp)
+{
+    msleep(&hfsmp->hfs_sync_incomplete, &hfsmp->hfs_mutex, PWAIT, 
+           "hfs_syncer_wait", NULL);
+}
+
+__private_extern__
+void hfs_syncer_wakeup(struct hfsmount *hfsmp)
+{
+    wakeup(&hfsmp->hfs_sync_incomplete);
+}
+
+__private_extern__
+uint64_t hfs_usecs_to_deadline(uint64_t usecs)
+{
+    uint64_t deadline;
+    clock_interval_to_deadline(usecs, NSEC_PER_USEC, &deadline);
+    return deadline;
+}
+
+__private_extern__
+void hfs_syncer_queue(thread_call_t syncer)
+{
+    if (thread_call_enter_delayed_with_leeway(syncer,
+                                              NULL,
+                                              hfs_usecs_to_deadline(HFS_META_DELAY),
+                                              0,
+                                              THREAD_CALL_DELAY_SYS_BACKGROUND)) {
+        printf ("hfs: syncer already scheduled!");
+    }
+}
 
 //
 // Fire off a timed callback to sync the disk if the
@@ -3017,44 +3061,30 @@ hfs_virtualmetafile(struct cnode *cp)
 void
 hfs_sync_ejectable(struct hfsmount *hfsmp)
 {
-       if (hfsmp->hfs_syncer)  {
-               clock_sec_t secs;
-               clock_usec_t usecs;
-               uint64_t now;
+    // If we don't have a syncer or we get called by the syncer, just return
+    if (!hfsmp->hfs_syncer || current_thread() == hfsmp->hfs_syncer_thread)
+        return;
 
-               clock_get_calendar_microtime(&secs, &usecs);
-               now = ((uint64_t)secs * 1000000ULL) + (uint64_t)usecs;
+    hfs_syncer_lock(hfsmp);
 
-               if (hfsmp->hfs_sync_incomplete && hfsmp->hfs_mp->mnt_pending_write_size >= hfsmp->hfs_max_pending_io) {
-                       // if we have a sync scheduled but i/o is starting to pile up,
-                       // don't call thread_call_enter_delayed() again because that
-                       // will defer the sync.
-                       return;
-               }
+    if (!timerisset(&hfsmp->hfs_sync_req_oldest))
+        microuptime(&hfsmp->hfs_sync_req_oldest);
 
-               if (hfsmp->hfs_sync_scheduled == 0) {
-                       uint64_t deadline;
+    /* If hfs_unmount is running, it will set hfs_syncer to NULL. Also we
+       don't want to queue again if there is a sync outstanding. */
+    if (!hfsmp->hfs_syncer || hfsmp->hfs_sync_incomplete) {
+        hfs_syncer_unlock(hfsmp);
+        return;
+    }
 
-                       hfsmp->hfs_last_sync_request_time = now;
+    hfsmp->hfs_sync_incomplete = TRUE;
 
-                       clock_interval_to_deadline(HFS_META_DELAY, NSEC_PER_USEC, &deadline);
+    thread_call_t syncer = hfsmp->hfs_syncer;
 
-                       /*
-                        * Increment hfs_sync_scheduled on the assumption that we're the
-                        * first thread to schedule the timer.  If some other thread beat
-                        * us, then we'll decrement it.  If we *were* the first to
-                        * schedule the timer, then we need to keep track that the
-                        * callback is waiting to complete.
-                        */
-                       OSIncrementAtomic((volatile SInt32 *)&hfsmp->hfs_sync_scheduled);
-                       if (thread_call_enter_delayed(hfsmp->hfs_syncer, deadline))
-                               OSDecrementAtomic((volatile SInt32 *)&hfsmp->hfs_sync_scheduled);
-                       else
-                               OSIncrementAtomic((volatile SInt32 *)&hfsmp->hfs_sync_incomplete);
-               }               
-       }
-}
+    hfs_syncer_unlock(hfsmp);
 
+    hfs_syncer_queue(syncer);
+}
 
 int
 hfs_start_transaction(struct hfsmount *hfsmp)
@@ -3354,3 +3384,59 @@ check_for_dataless_file(struct vnode *vp, uint64_t op_type)
 
        return error;
 }
+
+
+//
+// NOTE: this function takes care of starting a transaction and
+//       acquiring the systemfile lock so that it can call
+//       cat_update().
+//
+// NOTE: do NOT hold and cnode locks while calling this function
+//       to avoid deadlocks (because we take a lock on the root
+//       cnode)
+//
+int
+hfs_generate_document_id(struct hfsmount *hfsmp, uint32_t *docid)
+{
+       struct vnode *rvp;
+       struct cnode *cp;
+       int error;
+       
+       error = VFS_ROOT(HFSTOVFS(hfsmp), &rvp, vfs_context_kernel());
+       if (error) {
+               return error;
+       }
+
+       cp = VTOC(rvp);
+       if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)) != 0) {
+               return error;
+       }
+       struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((void *)((char *)&cp->c_attr.ca_finderinfo + 16));
+       
+       int lockflags;
+       if (hfs_start_transaction(hfsmp) != 0) {
+               return error;
+       }
+       lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
+                                       
+       if (extinfo->document_id == 0) {
+               // initialize this to start at 3 (one greater than the root-dir id)
+               extinfo->document_id = 3;
+       }
+
+       *docid = extinfo->document_id++;
+
+       // mark the root cnode dirty
+       cp->c_flag |= C_MODIFIED | C_FORCEUPDATE;
+       (void) cat_update(hfsmp, &cp->c_desc, &cp->c_attr, NULL, NULL);
+
+       hfs_systemfile_unlock (hfsmp, lockflags);
+       (void) hfs_end_transaction(hfsmp);
+               
+       (void) hfs_unlock(cp);
+
+       vnode_put(rvp);
+       rvp = NULL;
+
+       return 0;
+}
index 1c74ab1f7e0a66cdda3122751b666e0cc8f8968c..414d6de78975f7cc8810d272ff2e5b1ecbb6fb1d 100644 (file)
@@ -50,6 +50,8 @@
 #include <sys/cprotect.h>
 #include <sys/xattr.h>
 #include <string.h>
+#include <sys/fsevents.h>
+#include <kern/kalloc.h>
 
 #include <miscfs/specfs/specdev.h>
 #include <miscfs/fifofs/fifo.h>
@@ -134,7 +136,6 @@ int hfsspec_close(struct vnop_close_args *);
 
 
 
-
 /*****************************************************************************
 *
 * Common Operations on vnodes
@@ -448,6 +449,195 @@ hfs_hides_xattr(vfs_context_t ctx, struct cnode *cp, const char *name, int skipl
 }
 #endif /* HFS_COMPRESSION */
                
+
+//
+// This function gets the doc_tombstone structure for the
+// current thread.  If the thread doesn't have one, the
+// structure is allocated.
+//
+static struct doc_tombstone *
+get_uthread_doc_tombstone(void)
+{
+       struct  uthread *ut;
+       ut = get_bsdthread_info(current_thread());
+
+       if (ut->t_tombstone == NULL) {
+               ut->t_tombstone = kalloc(sizeof(struct doc_tombstone));
+               if (ut->t_tombstone) {
+                       memset(ut->t_tombstone, 0, sizeof(struct doc_tombstone));
+               }
+       }
+       
+       return ut->t_tombstone;
+}
+
+//
+// This routine clears out the current tombstone for the
+// current thread and if necessary passes the doc-id of
+// the tombstone on to the dst_cnode.
+//
+// If the doc-id transfers to dst_cnode, we also generate
+// a doc-id changed fsevent.  Unlike all the other fsevents,
+// doc-id changed events can only be generated here in HFS
+// where we have the necessary info.
+// 
+static void
+clear_tombstone_docid(struct  doc_tombstone *ut, struct hfsmount *hfsmp, struct cnode *dst_cnode)
+{
+       uint32_t old_id = ut->t_lastop_document_id;
+
+       ut->t_lastop_document_id = 0;
+       ut->t_lastop_parent = NULL;
+       ut->t_lastop_parent_vid = 0;
+       ut->t_lastop_filename[0] = '\0';
+
+       //
+       // If the lastop item is still the same and needs to be cleared,
+       // clear it.
+       //
+       if (dst_cnode && old_id && ut->t_lastop_item && vnode_vid(ut->t_lastop_item) == ut->t_lastop_item_vid) {
+               //
+               // clear the document_id from the file that used to have it.
+               // XXXdbg - we need to lock the other vnode and make sure to
+               // update it on disk.
+               //
+               struct cnode *ocp = VTOC(ut->t_lastop_item);
+               struct FndrExtendedFileInfo *ofip = (struct FndrExtendedFileInfo *)((char *)&ocp->c_attr.ca_finderinfo + 16);
+
+               // printf("clearing doc-id from ino %d\n", ocp->c_desc.cd_cnid);
+               ofip->document_id = 0;
+               ocp->c_bsdflags &= ~UF_TRACKED;
+               ocp->c_flag |= C_MODIFIED | C_FORCEUPDATE;   // mark it dirty
+               /* cat_update(hfsmp, &ocp->c_desc, &ocp->c_attr, NULL, NULL); */
+
+       }
+
+#if CONFIG_FSE
+       if (dst_cnode && old_id) {
+               struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&dst_cnode->c_attr.ca_finderinfo + 16);
+
+               add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
+                           FSE_ARG_DEV, hfsmp->hfs_raw_dev,
+                           FSE_ARG_INO, (ino64_t)ut->t_lastop_fileid,    // src inode #
+                           FSE_ARG_INO, (ino64_t)dst_cnode->c_fileid,    // dst inode #
+                           FSE_ARG_INT32, (uint32_t)fip->document_id,
+                           FSE_ARG_DONE);
+       }
+#endif
+       // last, clear these now that we're all done
+       ut->t_lastop_item     = NULL;
+       ut->t_lastop_fileid   = 0;
+       ut->t_lastop_item_vid = 0;
+}
+
+
+//
+// This function is used to filter out operations on temp
+// filenames.  We have to filter out operations on certain
+// temp filenames to work-around questionable application
+// behavior from apps like Autocad that perform unusual
+// sequences of file system operations for a "safe save".
+static int
+is_ignorable_temp_name(const char *nameptr, int len)
+{
+       if (len == 0) {
+               len = strlen(nameptr);
+       }
+       
+       if (   strncmp(nameptr, "atmp", 4) == 0
+          || (len > 4 && strncmp(nameptr+len-4, ".bak", 4) == 0)
+          || (len > 4 && strncmp(nameptr+len-4, ".tmp", 4) == 0)) {
+               return 1;
+       }
+
+       return 0;
+}
+
+//
+// Decide if we need to save a tombstone or not.  Normally we always
+// save a tombstone - but if there already is one and the name we're
+// given is an ignorable name, then we will not save a tombstone.
+// 
+static int
+should_save_docid_tombstone(struct doc_tombstone *ut, struct vnode *vp, struct componentname *cnp)
+{
+       if (cnp->cn_nameptr == NULL) {
+               return 0;
+       }
+
+       if (ut->t_lastop_document_id && ut->t_lastop_item == vp && is_ignorable_temp_name(cnp->cn_nameptr, cnp->cn_namelen)) {
+               return 0;
+       }
+
+       return 1;
+}
+
+
+//
+// This function saves a tombstone for the given vnode and name.  The
+// tombstone represents the parent directory and name where the document
+// used to live and the document-id of that file.  This info is recorded
+// in the doc_tombstone structure hanging off the uthread (which assumes
+// that all safe-save operations happen on the same thread).
+//
+// If later on the same parent/name combo comes back into existence then
+// we'll preserve the doc-id from this vnode onto the new vnode.
+//
+static void
+save_tombstone(struct hfsmount *hfsmp, struct vnode *dvp, struct vnode *vp, struct componentname *cnp, int for_unlink)
+{
+       struct cnode *cp = VTOC(vp);
+       struct  doc_tombstone *ut;
+       ut = get_uthread_doc_tombstone();
+                               
+       if (for_unlink && vp->v_type == VREG && cp->c_linkcount > 1) {
+               //
+               // a regular file that is being unlinked and that is also
+               // hardlinked should not clear the UF_TRACKED state or
+               // mess with the tombstone because somewhere else in the
+               // file system the file is still alive.
+               // 
+               return;
+       }
+
+       ut->t_lastop_parent     = dvp;
+       ut->t_lastop_parent_vid = vnode_vid(dvp);
+       ut->t_lastop_fileid     = cp->c_fileid;
+       if (for_unlink) {
+               ut->t_lastop_item      = NULL;
+               ut->t_lastop_item_vid  = 0;
+       } else {
+               ut->t_lastop_item      = vp;
+               ut->t_lastop_item_vid  = vnode_vid(vp);
+       }
+               
+       strlcpy((char *)&ut->t_lastop_filename[0], cnp->cn_nameptr, sizeof(ut->t_lastop_filename));
+               
+       struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
+       ut->t_lastop_document_id = fip->document_id;
+
+       if (for_unlink) {
+               // clear this so it's never returned again
+               fip->document_id = 0;
+               cp->c_bsdflags &= ~UF_TRACKED;
+
+               if (ut->t_lastop_document_id) {
+                       (void) cat_update(hfsmp, &cp->c_desc, &cp->c_attr, NULL, NULL);
+
+#if CONFIG_FSE
+                       // this event is more of a "pending-delete" 
+                       add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
+                                   FSE_ARG_DEV, hfsmp->hfs_raw_dev,
+                                   FSE_ARG_INO, (ino64_t)cp->c_fileid,       // src inode #
+                                   FSE_ARG_INO, (ino64_t)0,                  // dst inode #
+                                   FSE_ARG_INT32, ut->t_lastop_document_id,  // document id
+                                   FSE_ARG_DONE);
+#endif
+               }
+       }
+}
+
+
 /*
  * Open a file/directory.
  */
@@ -1012,7 +1202,29 @@ hfs_vnop_getattr(struct vnop_getattr_args *ap)
        vap->va_data_size = data_size;
        vap->va_supported |= VNODE_ATTR_va_data_size;
 #endif
-    
+
+       if (VATTR_IS_ACTIVE(vap, va_gen)) {
+               if (UBCINFOEXISTS(vp) && (vp->v_ubcinfo->ui_flags & UI_ISMAPPED)) {
+                       /* While file is mmapped the generation count is invalid. 
+                        * However, bump the value so that the write-gen counter 
+                        * will be different once the file is unmapped (since,
+                        * when unmapped the pageouts may not yet have happened)
+                        */
+                       if (vp->v_ubcinfo->ui_flags & UI_MAPPEDWRITE) {
+                               hfs_incr_gencount (cp);
+                       }
+                       vap->va_gen = 0;
+               } else {
+                       vap->va_gen = hfs_get_gencount(cp);
+               }
+                       
+               VATTR_SET_SUPPORTED(vap, va_gen);
+       }
+       if (VATTR_IS_ACTIVE(vap, va_document_id)) {
+               vap->va_document_id = hfs_get_document_id(cp);
+               VATTR_SET_SUPPORTED(vap, va_document_id);
+       }
+
        /* Mark them all at once instead of individual VATTR_SET_SUPPORTED calls. */
        vap->va_supported |= VNODE_ATTR_va_create_time | VNODE_ATTR_va_modify_time |
                             VNODE_ATTR_va_change_time| VNODE_ATTR_va_backup_time |
@@ -1159,6 +1371,26 @@ hfs_vnop_setattr(ap)
                return (EPERM);
        }
 
+       //
+       // Check if we'll need a document_id and if so, get it before we lock the
+       // the cnode to avoid any possible deadlock with the root vnode which has
+       // to get locked to get the document id
+       //
+       u_int32_t document_id=0;
+       if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & UF_TRACKED) && !(VTOC(vp)->c_bsdflags & UF_TRACKED)) {
+               struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&(VTOC(vp)->c_attr.ca_finderinfo) + 16);
+               //
+               // If the document_id is not set, get a new one.  It will be set
+               // on the file down below once we hold the cnode lock.
+               //
+               if (fip->document_id == 0) {
+                       if (hfs_generate_document_id(hfsmp, &document_id) != 0) {
+                               document_id = 0;
+                       }
+               }
+       }
+
+
        /*
         * File size change request.
         * We are guaranteed that this is not a directory, and that
@@ -1283,9 +1515,53 @@ hfs_vnop_setattr(ap)
                        decmpfs_reset_state = 1;
                }
 #endif
+               if ((vap->va_flags & UF_TRACKED) && !(cp->c_bsdflags & UF_TRACKED)) {
+                       struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
+
+                       //
+                       // we're marking this item UF_TRACKED.  if the document_id is
+                       // not set, get a new one and put it on the file.
+                       //
+                       if (fip->document_id == 0) {
+                               if (document_id != 0) {
+                                       // printf("SETATTR: assigning doc-id %d to %s (ino %d)\n", document_id, vp->v_name, cp->c_desc.cd_cnid);
+                                       fip->document_id = (uint32_t)document_id;
+#if CONFIG_FSE
+                                       add_fsevent(FSE_DOCID_CHANGED, ap->a_context,
+                                                   FSE_ARG_DEV, hfsmp->hfs_raw_dev,
+                                                   FSE_ARG_INO, (ino64_t)0,                // src inode #
+                                                   FSE_ARG_INO, (ino64_t)cp->c_fileid,     // dst inode #
+                                                   FSE_ARG_INT32, document_id,
+                                                   FSE_ARG_DONE);
+#endif
+                               } else {
+                                       // printf("hfs: could not acquire a new document_id for %s (ino %d)\n", vp->v_name, cp->c_desc.cd_cnid);
+                               }
+                       }
+
+               } else if (!(vap->va_flags & UF_TRACKED) && (cp->c_bsdflags & UF_TRACKED)) {
+                       //
+                       // UF_TRACKED is being cleared so clear the document_id
+                       //
+                       struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
+                       if (fip->document_id) {
+                               // printf("SETATTR: clearing doc-id %d from %s (ino %d)\n", fip->document_id, vp->v_name, cp->c_desc.cd_cnid);
+#if CONFIG_FSE
+                               add_fsevent(FSE_DOCID_CHANGED, ap->a_context,
+                                           FSE_ARG_DEV, hfsmp->hfs_raw_dev,
+                                           FSE_ARG_INO, (ino64_t)cp->c_fileid,          // src inode #
+                                           FSE_ARG_INO, (ino64_t)0,                     // dst inode #
+                                           FSE_ARG_INT32, fip->document_id,             // document id
+                                           FSE_ARG_DONE);
+#endif
+                               fip->document_id = 0;
+                               cp->c_bsdflags &= ~UF_TRACKED;
+                       }
+               }
 
                cp->c_bsdflags = vap->va_flags;
                cp->c_touch_chgtime = TRUE;
+
                
                /*
                 * Mirror the UF_HIDDEN flag to the invisible bit of the Finder Info.
@@ -2691,6 +2967,32 @@ hfs_vnop_rmdir(ap)
                hfs_unlockpair (dcp, cp);
                return ENOENT;
        }
+
+       //
+       // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
+       //
+       if ((cp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id == 0) {
+               uint32_t newid;
+
+               hfs_unlockpair(dcp, cp);
+
+               if (hfs_generate_document_id(VTOHFS(vp), &newid) == 0) {
+                       hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
+                       ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id = newid;
+#if CONFIG_FSE
+                       add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
+                                   FSE_ARG_DEV,   VTOHFS(vp)->hfs_raw_dev,
+                                   FSE_ARG_INO,   (ino64_t)0,             // src inode #
+                                   FSE_ARG_INO,   (ino64_t)cp->c_fileid,  // dst inode #
+                                   FSE_ARG_INT32, newid,
+                                   FSE_ARG_DONE);
+#endif
+               } else {
+                       // XXXdbg - couldn't get a new docid... what to do?  can't really fail the rm...
+                       hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
+               }
+       }
+
        error = hfs_removedir(dvp, vp, ap->a_cnp, 0, 0);
 
        hfs_unlockpair(dcp, cp);
@@ -2858,12 +3160,34 @@ hfs_removedir(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
        }
 
        error = cat_delete(hfsmp, &desc, &cp->c_attr);
-       if (error == 0) {
+
+       if (!error) {
+               //
+               // if skip_reserve == 1 then we're being called from hfs_vnop_rename() and thus
+               // we don't need to touch the document_id as it's handled by the rename code.
+               // otherwise it's a normal remove and we need to save the document id in the
+               // per thread struct and clear it from the cnode.
+               //
+               struct  doc_tombstone *ut;
+               ut = get_uthread_doc_tombstone();
+               if (!skip_reserve && (cp->c_bsdflags & UF_TRACKED) && should_save_docid_tombstone(ut, vp, cnp)) {
+               
+                       if (ut->t_lastop_document_id) {
+                               clear_tombstone_docid(ut, hfsmp, NULL);
+                       }
+                       save_tombstone(hfsmp, dvp, vp, cnp, 1);
+
+               }
+
                /* The parent lost a child */
                if (dcp->c_entries > 0)
                        dcp->c_entries--;
                DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
                dcp->c_dirchangecnt++;
+               {
+                       struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)dcp->c_finderinfo + 16);
+                       extinfo->write_gen_counter = OSSwapHostToBigInt32(OSSwapBigToHostInt32(extinfo->write_gen_counter) + 1);
+               }
                dcp->c_touch_chgtime = TRUE;
                dcp->c_touch_modtime = TRUE;
                hfs_touchtimes(hfsmp, cp);
@@ -2947,6 +3271,30 @@ relock:
                }       
                return (error);
        }
+       //
+       // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
+       //
+       if ((cp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id == 0) {
+               uint32_t newid;
+
+               hfs_unlockpair(dcp, cp);
+
+               if (hfs_generate_document_id(VTOHFS(vp), &newid) == 0) {
+                       hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
+                       ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id = newid;
+#if CONFIG_FSE
+                       add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
+                                   FSE_ARG_DEV,   VTOHFS(vp)->hfs_raw_dev,
+                                   FSE_ARG_INO,   (ino64_t)0,             // src inode #
+                                   FSE_ARG_INO,   (ino64_t)cp->c_fileid,  // dst inode #
+                                   FSE_ARG_INT32, newid,
+                                   FSE_ARG_DONE);
+#endif
+               } else {
+                       // XXXdbg - couldn't get a new docid... what to do?  can't really fail the rm...
+                       hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
+               }
+       }
        
        /*
         * Lazily respond to determining if there is a valid resource fork
@@ -3415,6 +3763,10 @@ hfs_removefile(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
                                DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
                        }
                        dcp->c_dirchangecnt++;
+                       {
+                               struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)dcp->c_finderinfo + 16);
+                               extinfo->write_gen_counter = OSSwapHostToBigInt32(OSSwapBigToHostInt32(extinfo->write_gen_counter) + 1);
+                       }
                        dcp->c_ctime = tv.tv_sec;
                        dcp->c_mtime = tv.tv_sec;
                        (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
@@ -3496,6 +3848,10 @@ hfs_removefile(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
                        if (dcp->c_entries > 0)
                                dcp->c_entries--;
                        dcp->c_dirchangecnt++;
+                       {
+                               struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)dcp->c_finderinfo + 16);
+                               extinfo->write_gen_counter = OSSwapHostToBigInt32(OSSwapBigToHostInt32(extinfo->write_gen_counter) + 1);
+                       }
                        dcp->c_ctime = tv.tv_sec;
                        dcp->c_mtime = tv.tv_sec;
                        (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
@@ -3589,6 +3945,24 @@ hfs_removefile(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
                
        }
 
+       //
+       // if skip_reserve == 1 then we're being called from hfs_vnop_rename() and thus
+       // we don't need to touch the document_id as it's handled by the rename code.
+       // otherwise it's a normal remove and we need to save the document id in the
+       // per thread struct and clear it from the cnode.
+       //
+       struct  doc_tombstone *ut;
+       ut = get_uthread_doc_tombstone();
+       if (!error && !skip_reserve && (cp->c_bsdflags & UF_TRACKED) && should_save_docid_tombstone(ut, vp, cnp)) {
+
+               if (ut->t_lastop_document_id) {
+                       clear_tombstone_docid(ut, hfsmp, NULL);
+               }
+               save_tombstone(hfsmp, dvp, vp, cnp, 1);
+
+       }
+
+
        /*
         * All done with this cnode's descriptor...
         *
@@ -3717,6 +4091,7 @@ hfs_vnop_rename(ap)
        int emit_rename = 1;
        int emit_delete = 1;
        int is_tracked = 0;
+       int unlocked;
 
        orig_from_ctime = VTOC(fvp)->c_ctime;
        if (tvp && VTOC(tvp)) {
@@ -3790,6 +4165,7 @@ retry:
                took_trunc_lock = 1;
        }
 
+relock:
        error = hfs_lockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL,
                             HFS_EXCLUSIVE_LOCK, &error_cnode);
        if (error) {
@@ -3835,6 +4211,75 @@ retry:
        tdcp = VTOC(tdvp);
        tcp = tvp ? VTOC(tvp) : NULL;
 
+       //
+       // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
+       //
+       unlocked = 0;
+       if ((fcp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16))->document_id == 0) {
+               uint32_t newid;
+
+               hfs_unlockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL);
+               unlocked = 1;
+
+               if (hfs_generate_document_id(hfsmp, &newid) == 0) {
+                       hfs_lock(fcp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
+                       ((struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16))->document_id = newid;
+#if CONFIG_FSE
+                       add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
+                                   FSE_ARG_DEV,   hfsmp->hfs_raw_dev,
+                                   FSE_ARG_INO,   (ino64_t)0,             // src inode #
+                                   FSE_ARG_INO,   (ino64_t)fcp->c_fileid,  // dst inode #
+                                   FSE_ARG_INT32, newid,
+                                   FSE_ARG_DONE);
+#endif
+                       hfs_unlock(fcp);
+               } else {
+                       // XXXdbg - couldn't get a new docid... what to do?  can't really fail the rename...
+               }
+
+               //
+               // check if we're going to need to fix tcp as well.  if we aren't, go back relock
+               // everything.  otherwise continue on and fix up tcp as well before relocking.
+               //
+               if (tcp == NULL || !(tcp->c_bsdflags & UF_TRACKED) || ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id != 0) {
+                       goto relock;
+               }
+       }
+
+       //
+       // same thing for tcp if it's set
+       //
+       if (tcp && (tcp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id == 0) {
+               uint32_t newid;
+
+               if (!unlocked) {
+                       hfs_unlockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL);
+                       unlocked = 1;
+               }
+
+               if (hfs_generate_document_id(hfsmp, &newid) == 0) {
+                       hfs_lock(tcp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
+                       ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id = newid;
+#if CONFIG_FSE
+                       add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
+                                   FSE_ARG_DEV,   hfsmp->hfs_raw_dev,
+                                   FSE_ARG_INO,   (ino64_t)0,             // src inode #
+                                   FSE_ARG_INO,   (ino64_t)tcp->c_fileid,  // dst inode #
+                                   FSE_ARG_INT32, newid,
+                                   FSE_ARG_DONE);
+#endif
+                       hfs_unlock(tcp);
+               } else {
+                       // XXXdbg - couldn't get a new docid... what to do?  can't really fail the rename...
+               }
+
+               // go back up and relock everything.  next time through the if statement won't be true
+               // and we'll skip over this block of code.
+               goto relock;
+       }
+
+
+
        /* 
         * Acquire iocounts on the destination's resource fork vnode 
         * if necessary. If dst/src are files and the dst has a resource 
@@ -4133,6 +4578,57 @@ retry:
         * capable of clearing out unused blocks for an open-unlinked file or dir.
         */
        if (tvp) {
+               //
+               // if the destination has a document id, we need to preserve it
+               //
+               if (fvp != tvp) {
+                       uint32_t document_id;
+                       struct FndrExtendedDirInfo *ffip = (struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16);
+                       struct FndrExtendedDirInfo *tfip = (struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16);
+                       
+                       if (ffip->document_id && tfip->document_id) {
+                               // both documents are tracked.  only save a tombstone from tcp and do nothing else.
+                               save_tombstone(hfsmp, tdvp, tvp, tcnp, 0);
+                       } else {
+                               struct  doc_tombstone *ut;
+                               ut = get_uthread_doc_tombstone();
+                               
+                               document_id = tfip->document_id;
+                               tfip->document_id = 0;
+                       
+                               if (document_id != 0) {
+                                       // clear UF_TRACKED as well since tcp is now no longer tracked
+                                       tcp->c_bsdflags &= ~UF_TRACKED;
+                                       (void) cat_update(hfsmp, &tcp->c_desc, &tcp->c_attr, NULL, NULL);
+                               }
+
+                               if (ffip->document_id == 0 && document_id != 0) {
+                                       // printf("RENAME: preserving doc-id %d onto %s (from ino %d, to ino %d)\n", document_id, tcp->c_desc.cd_nameptr, tcp->c_desc.cd_cnid, fcp->c_desc.cd_cnid);
+                                       fcp->c_bsdflags |= UF_TRACKED;
+                                       ffip->document_id = document_id;
+                                       
+                                       (void) cat_update(hfsmp, &fcp->c_desc, &fcp->c_attr, NULL, NULL);
+#if CONFIG_FSE
+                                       add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
+                                                   FSE_ARG_DEV, hfsmp->hfs_raw_dev,
+                                                   FSE_ARG_INO, (ino64_t)tcp->c_fileid,           // src inode #
+                                                   FSE_ARG_INO, (ino64_t)fcp->c_fileid,           // dst inode #
+                                                   FSE_ARG_INT32, (uint32_t)ffip->document_id,
+                                                   FSE_ARG_DONE);
+#endif
+                               } else if ((fcp->c_bsdflags & UF_TRACKED) && should_save_docid_tombstone(ut, fvp, fcnp)) {
+
+                                       if (ut->t_lastop_document_id) {
+                                               clear_tombstone_docid(ut, hfsmp, NULL);
+                                       }
+                                       save_tombstone(hfsmp, fdvp, fvp, fcnp, 0);
+
+                                       //printf("RENAME: (dest-exists): saving tombstone doc-id %lld @ %s (ino %d)\n",
+                                       //       ut->t_lastop_document_id, ut->t_lastop_filename, fcp->c_desc.cd_cnid);
+                               }
+                       }
+               }
+
                /*
                 * When fvp matches tvp they could be case variants
                 * or matching hard links.
@@ -4235,6 +4731,47 @@ retry:
                 * as quickly as possible.
                 */
                vnode_recycle(tvp);
+       } else {
+               struct  doc_tombstone *ut;
+               ut = get_uthread_doc_tombstone();
+               
+               //
+               // There is nothing at the destination.  If the file being renamed is
+               // tracked, save a "tombstone" of the document_id.  If the file is
+               // not a tracked file, then see if it needs to inherit a tombstone.
+               //
+               // NOTE: we do not save a tombstone if the file being renamed begins
+               //       with "atmp" which is done to work-around AutoCad's bizarre
+               //       5-step un-safe save behavior
+               //
+               if (fcp->c_bsdflags & UF_TRACKED) {
+                       if (should_save_docid_tombstone(ut, fvp, fcnp)) {
+                               save_tombstone(hfsmp, fdvp, fvp, fcnp, 0);
+                               
+                               //printf("RENAME: (no dest): saving tombstone doc-id %lld @ %s (ino %d)\n",
+                               //       ut->t_lastop_document_id, ut->t_lastop_filename, fcp->c_desc.cd_cnid);
+                       } else {
+                               // intentionally do nothing
+                       }
+               } else if (   ut->t_lastop_document_id != 0
+                          && tdvp == ut->t_lastop_parent
+                          && vnode_vid(tdvp) == ut->t_lastop_parent_vid
+                          && strcmp((char *)ut->t_lastop_filename, (char *)tcnp->cn_nameptr) == 0) {
+
+                       //printf("RENAME: %s (ino %d) inheriting doc-id %lld\n", tcnp->cn_nameptr, fcp->c_desc.cd_cnid, ut->t_lastop_document_id);
+                       struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16);
+                       fcp->c_bsdflags |= UF_TRACKED;
+                       fip->document_id = ut->t_lastop_document_id;
+                       cat_update(hfsmp, &fcp->c_desc, &fcp->c_attr, NULL, NULL);
+                       
+                       clear_tombstone_docid(ut, hfsmp, fcp);    // will send the docid-changed fsevent
+
+               } else if (ut->t_lastop_document_id && should_save_docid_tombstone(ut, fvp, fcnp) && should_save_docid_tombstone(ut, tvp, tcnp)) {
+                       // no match, clear the tombstone
+                       //printf("RENAME: clearing the tombstone %lld @ %s\n", ut->t_lastop_document_id, ut->t_lastop_filename);
+                       clear_tombstone_docid(ut, hfsmp, NULL);
+               }
+                          
        }
 skip_rm:
        /*
@@ -4306,6 +4843,10 @@ skip_rm:
                }
                tdcp->c_entries++;
                tdcp->c_dirchangecnt++;
+               {
+                       struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)tdcp->c_finderinfo + 16);
+                       extinfo->write_gen_counter = OSSwapHostToBigInt32(OSSwapBigToHostInt32(extinfo->write_gen_counter) + 1);
+               }
                if (fdcp->c_entries > 0)
                        fdcp->c_entries--;
                fdcp->c_dirchangecnt++;
@@ -4315,6 +4856,11 @@ skip_rm:
                fdcp->c_flag |= C_FORCEUPDATE;  // XXXdbg - force it out!
                (void) hfs_update(fdvp, 0);
        }
+       {       
+               struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)fdcp->c_finderinfo + 16);
+               extinfo->write_gen_counter = OSSwapHostToBigInt32(OSSwapBigToHostInt32(extinfo->write_gen_counter) + 1);
+       }
+               
        tdcp->c_childhint = out_desc.cd_hint;   /* Cache directory's location */
        tdcp->c_touch_chgtime = TRUE;
        tdcp->c_touch_modtime = TRUE;
@@ -4586,7 +5132,7 @@ typedef union {
  *
  *  In fact, the offset used by HFS is essentially an index (26 bits)
  *  with a tag (6 bits).  The tag is for associating the next request
- *  with the current request.  This enables us to have multiple threads
 *  with the current request.  This enables us to have multiple threads
  *  reading the directory while the directory is also being modified.
  *
  *  Each tag/index pair is tied to a unique directory hint.  The hint
@@ -5443,10 +5989,18 @@ hfs_makenode(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
                /* Update the parent directory */
                dcp->c_childhint = out_desc.cd_hint;    /* Cache directory's location */
                dcp->c_entries++;
+               {
+                       struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)dcp->c_finderinfo + 16);
+                       extinfo->write_gen_counter = OSSwapHostToBigInt32(OSSwapBigToHostInt32(extinfo->write_gen_counter) + 1);
+               }
                if (vnodetype == VDIR) {
                        INC_FOLDERCOUNT(hfsmp, dcp->c_attr);
                }
                dcp->c_dirchangecnt++;
+               {       
+                       struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)dcp->c_finderinfo + 16);
+                       extinfo->write_gen_counter = OSSwapHostToBigInt32(OSSwapBigToHostInt32(extinfo->write_gen_counter) + 1);
+               }
                dcp->c_ctime = tv.tv_sec;
                dcp->c_mtime = tv.tv_sec;
                (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
@@ -5559,6 +6113,47 @@ hfs_makenode(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
                goto exit;
 
        cp = VTOC(tvp);
+
+       struct  doc_tombstone *ut;
+       ut = get_uthread_doc_tombstone();
+       if (   ut->t_lastop_document_id != 0 
+           && ut->t_lastop_parent == dvp
+           && ut->t_lastop_parent_vid == vnode_vid(dvp)
+           && strcmp((char *)ut->t_lastop_filename, (char *)cp->c_desc.cd_nameptr) == 0) {
+               struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
+
+               //printf("CREATE: preserving doc-id %lld on %s\n", ut->t_lastop_document_id, ut->t_lastop_filename);
+               fip->document_id = (uint32_t)(ut->t_lastop_document_id & 0xffffffff);
+
+               cp->c_bsdflags |= UF_TRACKED;
+               // mark the cnode dirty
+               cp->c_flag |= C_MODIFIED | C_FORCEUPDATE;
+
+               if ((error = hfs_start_transaction(hfsmp)) == 0) {
+                       lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
+
+                       (void) cat_update(hfsmp, &cp->c_desc, &cp->c_attr, NULL, NULL);
+
+                       hfs_systemfile_unlock (hfsmp, lockflags);
+                       (void) hfs_end_transaction(hfsmp);
+               }
+
+               clear_tombstone_docid(ut, hfsmp, cp);       // will send the docid-changed fsevent
+       } else if (ut->t_lastop_document_id != 0) {
+               int len = cnp->cn_namelen;
+               if (len == 0) {
+                       len = strlen(cnp->cn_nameptr);
+               }
+
+               if (is_ignorable_temp_name(cnp->cn_nameptr, cnp->cn_namelen)) {
+                       // printf("CREATE: not clearing tombstone because %s is a temp name.\n", cnp->cn_nameptr);
+               } else {
+                       // Clear the tombstone because the thread is not recreating the same path
+                       // printf("CREATE: clearing tombstone because %s is NOT a temp name.\n", cnp->cn_nameptr);
+                       clear_tombstone_docid(ut, hfsmp, NULL);
+               }
+       }
+
        *vpp = tvp;
 
 #if CONFIG_PROTECT
@@ -6078,6 +6673,46 @@ hfsfifo_close(ap)
 
 #endif /* FIFO */
 
+/* 
+ * Getter for the document_id 
+ * the document_id is stored in FndrExtendedFileInfo/FndrExtendedDirInfo
+ */
+static u_int32_t 
+hfs_get_document_id_internal(const uint8_t *finderinfo, mode_t mode)
+{
+       u_int8_t *finfo = NULL;
+       u_int32_t doc_id = 0;
+       
+       /* overlay the FinderInfo to the correct pointer, and advance */
+       finfo = ((uint8_t *)finderinfo) + 16;
+
+       if (S_ISDIR(mode) || S_ISREG(mode)) {
+               struct FndrExtendedFileInfo *extinfo = (struct FndrExtendedFileInfo *)finfo;
+               doc_id = extinfo->document_id;
+       } else if (S_ISDIR(mode)) {
+               struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)finderinfo + 16);
+               doc_id = extinfo->document_id;
+       }       
+
+       return doc_id;
+}
+
+
+/* getter(s) for document id */
+u_int32_t
+hfs_get_document_id(struct cnode *cp)
+{
+       return (hfs_get_document_id_internal((u_int8_t*)cp->c_finderinfo,
+           cp->c_attr.ca_mode));
+}
+
+/* If you have finderinfo and mode, you can use this */
+u_int32_t
+hfs_get_document_id_from_blob(const uint8_t *finderinfo, mode_t mode)
+{
+       return (hfs_get_document_id_internal(finderinfo, mode));
+}
+
 /*
  * Synchronize a file's in-core state with that on disk.
  */
index c29b4a0c8f3d9edb857071e07c576d702172e099..29145dd4a5ecafe68444da1483b143ceac54d004 100644 (file)
@@ -241,11 +241,14 @@ static int hfs_zero_hidden_fields (struct cnode *cp, u_int8_t *finderinfo)
        
        if (S_ISREG(cp->c_attr.ca_mode) || S_ISLNK(cp->c_attr.ca_mode)) {
                struct FndrExtendedFileInfo *extinfo = (struct FndrExtendedFileInfo *)finfo;
+               extinfo->document_id = 0;
                extinfo->date_added = 0;
                extinfo->write_gen_counter = 0;
        } else if (S_ISDIR(cp->c_attr.ca_mode)) {
                struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)finfo;
+               extinfo->document_id = 0;
                extinfo->date_added = 0;
+               extinfo->write_gen_counter = 0;
        } else {
                /* Return an error */
                return -1;
@@ -724,6 +727,7 @@ hfs_vnop_setxattr(struct vnop_setxattr_args *ap)
                u_int16_t fdFlags;
                u_int32_t dateadded = 0;
                u_int32_t write_gen_counter = 0;
+               u_int32_t document_id = 0;
 
                attrsize = sizeof(VTOC(vp)->c_finderinfo);
 
@@ -761,7 +765,13 @@ hfs_vnop_setxattr(struct vnop_setxattr_args *ap)
                /* Grab the current date added from the cnode */
                dateadded = hfs_get_dateadded (cp);
                if (S_ISREG(cp->c_attr.ca_mode) || S_ISLNK(cp->c_attr.ca_mode)) {
-                       write_gen_counter = hfs_get_gencount(cp);
+                       struct FndrExtendedFileInfo *extinfo = (struct FndrExtendedFileInfo *)((u_int8_t*)cp->c_finderinfo + 16);
+                       write_gen_counter = extinfo->write_gen_counter;
+                       document_id = extinfo->document_id;
+               } else if (S_ISDIR(cp->c_attr.ca_mode)) {
+                       struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)cp->c_finderinfo + 16);
+                       write_gen_counter = extinfo->write_gen_counter;
+                       document_id = extinfo->document_id;
                }
 
                /* Zero out the date added field to ignore user's attempts to set it */
@@ -796,9 +806,12 @@ hfs_vnop_setxattr(struct vnop_setxattr_args *ap)
                        struct FndrExtendedFileInfo *extinfo = (struct FndrExtendedFileInfo *)finfo;
                        extinfo->date_added = OSSwapHostToBigInt32(dateadded);
                        extinfo->write_gen_counter = write_gen_counter;
+                       extinfo->document_id = document_id;
                } else if (S_ISDIR(cp->c_attr.ca_mode)) {
                        struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)finfo;
                        extinfo->date_added = OSSwapHostToBigInt32(dateadded);
+                       extinfo->write_gen_counter = write_gen_counter;
+                       extinfo->document_id = document_id;
                }
 
                /* Set the cnode's Finder Info. */
@@ -1372,7 +1385,7 @@ hfs_vnop_removexattr(struct vnop_removexattr_args *ap)
                void * finderinfo_start;
                int finderinfo_size;
                u_int8_t finderinfo[32];
-               u_int32_t date_added, write_gen_counter;
+               u_int32_t date_added, write_gen_counter, document_id;
                u_int8_t *finfo = NULL;
         
                if ((result = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
@@ -1411,9 +1424,12 @@ hfs_vnop_removexattr(struct vnop_removexattr_args *ap)
                        struct FndrExtendedFileInfo *extinfo = (struct FndrExtendedFileInfo *)finfo;
                        date_added = extinfo->date_added;
                        write_gen_counter = extinfo->write_gen_counter;
+                       document_id = extinfo->document_id;
                } else if (S_ISDIR(cp->c_attr.ca_mode)) {
                        struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)finfo;
                        date_added = extinfo->date_added;
+                       write_gen_counter = extinfo->write_gen_counter;
+                       document_id = extinfo->document_id;
                }
                
                if (vnode_islnk(vp)) {
@@ -1432,9 +1448,12 @@ hfs_vnop_removexattr(struct vnop_removexattr_args *ap)
                        struct FndrExtendedFileInfo *extinfo = (struct FndrExtendedFileInfo *)finfo;
                        extinfo->date_added = date_added;
                        extinfo->write_gen_counter = write_gen_counter;
+                       extinfo->document_id = document_id;
                } else if (S_ISDIR(cp->c_attr.ca_mode)) {
                        struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)finfo;
                        extinfo->date_added = date_added;
+                       extinfo->write_gen_counter = write_gen_counter;
+                       extinfo->document_id = document_id;
                }
         
                /* Updating finderInfo updates change time and modified time */
index c6dd52824f21e35d8e7ededd5c1f36e97fc74499..da7a5395ccc7a6ae074232042d5f09139c3da9ef 100644 (file)
@@ -1610,6 +1610,13 @@ uthread_cred_free(void *uthread)
 void
 uthread_zone_free(void *uthread)
 {
+       uthread_t uth = (uthread_t)uthread;
+
+       if (uth->t_tombstone) {
+               kfree(uth->t_tombstone, sizeof(struct doc_tombstone));
+               uth->t_tombstone = NULL;
+       }
+
        /* and free the uthread itself */
        zfree(uthread_zone, uthread);
 }
index 25a484798d090dd5340869ed0a79606f3f30f5f9..caed4c433c277da2bf3a9e2efe4b484e02215af2 100644 (file)
@@ -585,10 +585,20 @@ shmctl(__unused struct proc *p, struct shmctl_args *uap, int32_t *retval)
                }
 
                if (IS_64BIT_PROCESS(p)) {
-                       error = copyout((caddr_t)&shmseg->u, uap->buf, sizeof(struct user_shmid_ds));
+                       struct user_shmid_ds shmid_ds;
+                       memcpy(&shmid_ds, &shmseg->u, sizeof(struct user_shmid_ds));
+                       
+                       /* Clear kernel reserved pointer before copying to user space */
+                       shmid_ds.shm_internal = USER_ADDR_NULL;
+                       
+                       error = copyout(&shmid_ds, uap->buf, sizeof(shmid_ds));
                } else {
                        struct user32_shmid_ds shmid_ds32;
                        shmid_ds_64to32(&shmseg->u, &shmid_ds32);
+                       
+                       /* Clear kernel reserved pointer before copying to user space */
+                       shmid_ds32.shm_internal = (user32_addr_t)0;
+                       
                        error = copyout(&shmid_ds32, uap->buf, sizeof(shmid_ds32));
                }
                if (error) {
@@ -1071,6 +1081,7 @@ IPCS_shm_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1,
                struct user_IPCS_command u64;
        } ipcs;
        struct user32_shmid_ds shmid_ds32;      /* post conversion, 32 bit version */
+       struct user_shmid_ds   shmid_ds;        /* 64 bit version */
        void *shmid_dsp;
        size_t ipcs_sz = sizeof(struct user_IPCS_command);
        size_t shmid_ds_sz = sizeof(struct user_shmid_ds);
@@ -1142,7 +1153,18 @@ IPCS_shm_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1,
                 */
                if (!IS_64BIT_PROCESS(p)) {
                        shmid_ds_64to32(shmid_dsp, &shmid_ds32);
+                       
+                       /* Clear kernel reserved pointer before copying to user space */
+                       shmid_ds32.shm_internal = (user32_addr_t)0;
+                       
                        shmid_dsp = &shmid_ds32;
+               } else {
+                       memcpy(&shmid_ds, shmid_dsp, sizeof(shmid_ds));
+
+                       /* Clear kernel reserved pointer before copying to user space */
+                       shmid_ds.shm_internal = USER_ADDR_NULL;
+                       
+                       shmid_dsp = &shmid_ds;
                }
                error = copyout(shmid_dsp, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen);
                if (!error) {
index f9ae6591bdea4cce463730d31b26a289725ac1cc..2916f3e08b3546f6fc770034999b9d780cbea371 100644 (file)
@@ -1744,6 +1744,9 @@ ubc_map(vnode_t vp, int flags)
                        if ( !ISSET(uip->ui_flags, UI_ISMAPPED))
                                need_ref = 1;
                        SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
+                       if (flags & PROT_WRITE) {
+                               SET(uip->ui_flags, UI_MAPPEDWRITE);
+                       }
                }
                CLR(uip->ui_flags, UI_MAPBUSY);
 
index 4eacfc744e64d56c7297c3755582207845e99694..cd5ecec187cdce67fb8a71608377aeb3f7ce04ed 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2010 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2014 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
@@ -357,7 +357,7 @@ typedef struct vol_attributes_attr {
  */
 #define ATTR_CMN_RETURNED_ATTRS                        0x80000000
 
-#define ATTR_CMN_VALIDMASK                     0x9FE7FFFF
+#define ATTR_CMN_VALIDMASK                     0xBFFFFFFF
 #define ATTR_CMN_SETMASK                       0x01C7FF00
 #define ATTR_CMN_VOLSETMASK                    0x00006700
 
index 82c16ac4811a90c10bb09fc9ddced89b3d696e10..f1208ffc3f17a7a9fb354e6633bfbbeacaabd02b 100644 (file)
 #define FSE_CHOWN                8
 #define FSE_XATTR_MODIFIED       9
 #define FSE_XATTR_REMOVED       10
+#define FSE_DOCID_CREATED       11
+#define FSE_DOCID_CHANGED       12
 
-#define FSE_MAX_EVENTS          11
+#define FSE_MAX_EVENTS          13
 #define FSE_ALL_EVENTS         998
 
 #define FSE_EVENTS_DROPPED     999
index dbc19de88b4f3fe0d9348e3b606e178b68ea12bc..7f0644724082b79635e32faaed5dd1324ed46c04 100644 (file)
@@ -148,6 +148,7 @@ struct ubc_info {
 #define        UI_ISMAPPED     0x00000010      /* vnode is currently mapped */
 #define UI_MAPBUSY     0x00000020      /* vnode is being mapped or unmapped */
 #define UI_MAPWAITING  0x00000040      /* someone waiting for UI_MAPBUSY */
+#define UI_MAPPEDWRITE 0x00000080      /* it's mapped with PROT_WRITE */
 
 /*
  * exported primitives for loadable file systems.
index 771563663a6839003193b7d35e4e7485c45e6c34..27ff9bdab6c04a8d1951c6df985db23098e85fd2 100644 (file)
@@ -96,6 +96,26 @@ struct vfs_context {
        kauth_cred_t    vc_ucred;               /* per thread credential */
 };
 
+/*
+ * struct representing a document "tombstone" that's recorded
+ * when a thread manipulates files marked with a document-id.
+ * if the thread recreates the same item, this tombstone is
+ * used to preserve the document_id on the new file.
+ *
+ * It is a separate structure because of its size - we want to
+ * allocate it on demand instead of just stuffing it into the
+ * uthread structure.
+ */
+struct doc_tombstone {
+       struct vnode     *t_lastop_parent;
+       struct vnode     *t_lastop_item;
+       uint32_t          t_lastop_parent_vid;
+       uint32_t          t_lastop_item_vid;
+       uint64_t          t_lastop_fileid;
+       uint64_t          t_lastop_document_id;
+       unsigned char     t_lastop_filename[NAME_MAX+1];
+};
+
 #endif /* !__LP64 || XNU_KERNEL_PRIVATE */
 
 #ifdef BSD_KERNEL_PRIVATE
@@ -249,6 +269,9 @@ struct uthread {
        void *          uu_threadlist;
        char *          pth_name;
        struct label *  uu_label;       /* MAC label */
+
+       /* Document Tracking struct used to track a "tombstone" for a document */
+       struct doc_tombstone *t_tombstone;
 };
 
 typedef struct uthread * uthread_t;
index 8b97538acfab924ea781d2465bfb76b3d1006a91..ea55aa920efd67735a4433423f2e88e297e6d86e 100644 (file)
@@ -521,9 +521,10 @@ struct vnode_trigger_param {
 #define VNODE_ATTR_va_guuid            (1LL<<27)       /* 08000000 */
 #define VNODE_ATTR_va_nchildren                (1LL<<28)       /* 10000000 */
 #define VNODE_ATTR_va_dirlinkcount     (1LL<<29)       /* 20000000 */
-#define VNODE_ATTR_va_addedtime                (1LL<<30)               /* 40000000 */
-#define VNODE_ATTR_va_dataprotect_class                (1LL<<31)               /* 80000000 */
-#define VNODE_ATTR_va_dataprotect_flags                (1LL<<32)               /* 100000000 */
+#define VNODE_ATTR_va_addedtime                (1LL<<30)       /* 40000000 */
+#define VNODE_ATTR_va_dataprotect_class        (1LL<<31)       /* 80000000 */
+#define VNODE_ATTR_va_dataprotect_flags        (1LL<<32)       /* 100000000 */
+#define VNODE_ATTR_va_document_id      (1LL<<33)       /* 200000000 */
 
 #define VNODE_ATTR_BIT(n)      (VNODE_ATTR_ ## n)
 /*
@@ -563,7 +564,8 @@ struct vnode_trigger_param {
                                VNODE_ATTR_BIT(va_uuuid) |              \
                                VNODE_ATTR_BIT(va_guuid) |              \
                                VNODE_ATTR_BIT(va_dataprotect_class) |  \
-                               VNODE_ATTR_BIT(va_dataprotect_flags))
+                               VNODE_ATTR_BIT(va_dataprotect_flags) |  \
+                               VNODE_ATTR_BIT(va_document_id))
 
 
 struct vnode_attr {
@@ -631,6 +633,7 @@ struct vnode_attr {
        /* Data Protection fields */
        uint32_t va_dataprotect_class;  /* class specified for this file if it didn't exist */
        uint32_t va_dataprotect_flags;  /* flags from NP open(2) to the filesystem */
+       uint32_t va_document_id;
 };
 
 #ifdef BSD_KERNEL_PRIVATE
index 25c57ee40336dc17da56f7acbe0ecccc82386d05..ca03027b26b2e437b6ccb3c4f23cc5b2ef88baf1 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1995-2012 Apple Inc. All rights reserved.
+ * Copyright (c) 1995-2014 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
 
 #define ATTR_TIME_SIZE -1
 
+/*
+ * SPI.
+ */
+#define FSOPT_ATTRLIST_EXTENDED        0x00000020
+
+/* Valid only if FSOPT_ATTRLIST_EXTENDED is set */
+#define ATTR_CMN_GEN_COUNT     0x00080000 /* same as ATTR_CMN_NAMEDATTRCOUNT */
+#define ATTR_CMN_DOCUMENT_ID   0x00100000 /* same as ATTR_CMN_NAMEDATTRLIST */
+
+#define ATTR_CMN_ERROR         0x20000000
+
 /*
  * Structure describing the state of an in-progress attrlist operation.
  */
@@ -117,7 +128,8 @@ attrlist_pack_fixed(struct _attrlist_buf *ab, void *source, ssize_t count)
  */
 static void
 attrlist_pack_variable2(struct _attrlist_buf *ab, const void *source, ssize_t count, 
-               const void *ext, ssize_t extcount) {
+                       const void *ext, ssize_t extcount)
+{
 
        /* Use ssize_t's for pointer math ease */
        struct attrreference ar;
@@ -318,6 +330,7 @@ static struct getvolattrlist_attrtab getvolattrlist_common_tab[] = {
        {ATTR_CMN_FILEID,       0,                              sizeof(uint64_t)},
        {ATTR_CMN_PARENTID,     0,                              sizeof(uint64_t)},
        {ATTR_CMN_RETURNED_ATTRS, 0,                            sizeof(attribute_set_t)},
+       {ATTR_CMN_ERROR,        0,                              sizeof(uint32_t)},
        {0, 0, 0}
 };
 #define ATTR_CMN_VOL_INVALID \
@@ -338,7 +351,7 @@ static struct getvolattrlist_attrtab getvolattrlist_vol_tab[] = {
        {ATTR_VOL_DIRCOUNT,             VFSATTR_BIT(f_dircount),                        sizeof(uint32_t)},
        {ATTR_VOL_MAXOBJCOUNT,          VFSATTR_BIT(f_maxobjcount),                     sizeof(uint32_t)},
        {ATTR_VOL_MOUNTPOINT,           0,                                              sizeof(struct attrreference)},
-       {ATTR_VOL_NAME,                 VFSATTR_BIT(f_vol_name),                                sizeof(struct attrreference)},
+       {ATTR_VOL_NAME,                 VFSATTR_BIT(f_vol_name),                        sizeof(struct attrreference)},
        {ATTR_VOL_MOUNTFLAGS,           0,                                              sizeof(uint32_t)},
        {ATTR_VOL_MOUNTEDDEVICE,        0,                                              sizeof(struct attrreference)},
        {ATTR_VOL_ENCODINGSUSED,        0,                                              sizeof(uint64_t)},
@@ -478,14 +491,50 @@ static struct getattrlist_attrtab getattrlist_common_tab[] = {
        {ATTR_CMN_ACCESSMASK,   VATTR_BIT(va_mode),             sizeof(uint32_t),               KAUTH_VNODE_READ_ATTRIBUTES},
        {ATTR_CMN_FLAGS,        VATTR_BIT(va_flags),            sizeof(uint32_t),               KAUTH_VNODE_READ_ATTRIBUTES},
        {ATTR_CMN_USERACCESS,   0,                              sizeof(uint32_t),               KAUTH_VNODE_READ_ATTRIBUTES},
-       {ATTR_CMN_EXTENDED_SECURITY, VATTR_BIT(va_acl), sizeof(struct attrreference),           KAUTH_VNODE_READ_SECURITY},
+       {ATTR_CMN_EXTENDED_SECURITY, VATTR_BIT(va_acl),         sizeof(struct attrreference),   KAUTH_VNODE_READ_SECURITY},
        {ATTR_CMN_UUID,         VATTR_BIT(va_uuuid),            sizeof(guid_t),                 KAUTH_VNODE_READ_ATTRIBUTES},
        {ATTR_CMN_GRPUUID,      VATTR_BIT(va_guuid),            sizeof(guid_t),                 KAUTH_VNODE_READ_ATTRIBUTES},
        {ATTR_CMN_FILEID,       VATTR_BIT(va_fileid),           sizeof(uint64_t),               KAUTH_VNODE_READ_ATTRIBUTES},
        {ATTR_CMN_PARENTID,     VATTR_BIT(va_parentid),         sizeof(uint64_t),               KAUTH_VNODE_READ_ATTRIBUTES},
-       {ATTR_CMN_FULLPATH, 0,  sizeof(struct attrreference),   KAUTH_VNODE_READ_ATTRIBUTES     },
-       {ATTR_CMN_ADDEDTIME, VATTR_BIT(va_addedtime), ATTR_TIME_SIZE,   KAUTH_VNODE_READ_ATTRIBUTES}, 
+       {ATTR_CMN_FULLPATH,     0,                              sizeof(struct attrreference),   KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_ADDEDTIME,    VATTR_BIT(va_addedtime),        ATTR_TIME_SIZE,                 KAUTH_VNODE_READ_ATTRIBUTES},
        {ATTR_CMN_RETURNED_ATTRS, 0,                            sizeof(attribute_set_t),        0},
+       {ATTR_CMN_ERROR,        0,                              sizeof(uint32_t),               0},
+       {0, 0, 0, 0}
+};
+
+static struct getattrlist_attrtab getattrlist_common_tab_extended[] = {
+       {ATTR_CMN_NAME,         VATTR_BIT(va_name),             sizeof(struct attrreference),   KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_DEVID,        0,                              sizeof(dev_t),                  KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_FSID,         VATTR_BIT(va_fsid),             sizeof(fsid_t),                 KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_OBJTYPE,      0,                              sizeof(fsobj_type_t),           KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_OBJTAG,       0,                              sizeof(fsobj_tag_t),            KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_OBJID,        VATTR_BIT(va_fileid) | VATTR_BIT(va_linkid), sizeof(fsobj_id_t), KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_OBJPERMANENTID, VATTR_BIT(va_fileid) | VATTR_BIT(va_linkid), sizeof(fsobj_id_t), KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_PAROBJID,     VATTR_BIT(va_parentid),         sizeof(fsobj_id_t),             KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_SCRIPT,       VATTR_BIT(va_encoding),         sizeof(text_encoding_t),        KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_CRTIME,       VATTR_BIT(va_create_time),      ATTR_TIME_SIZE,                 KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_MODTIME,      VATTR_BIT(va_modify_time),      ATTR_TIME_SIZE,                 KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_CHGTIME,      VATTR_BIT(va_change_time),      ATTR_TIME_SIZE,                 KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_ACCTIME,      VATTR_BIT(va_access_time),      ATTR_TIME_SIZE,                 KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_BKUPTIME,     VATTR_BIT(va_backup_time),      ATTR_TIME_SIZE,                 KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_FNDRINFO,     0,                              32,                             KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_OWNERID,      VATTR_BIT(va_uid),              sizeof(uid_t),                  KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_GRPID,        VATTR_BIT(va_gid),              sizeof(gid_t),                  KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_ACCESSMASK,   VATTR_BIT(va_mode),             sizeof(uint32_t),               KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_FLAGS,        VATTR_BIT(va_flags),            sizeof(uint32_t),               KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_GEN_COUNT,    VATTR_BIT(va_gen),              sizeof(uint32_t),               KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_DOCUMENT_ID,  VATTR_BIT(va_document_id),      sizeof(uint32_t),               KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_USERACCESS,   0,                              sizeof(uint32_t),               KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_EXTENDED_SECURITY, VATTR_BIT(va_acl),         sizeof(struct attrreference),   KAUTH_VNODE_READ_SECURITY},
+       {ATTR_CMN_UUID,         VATTR_BIT(va_uuuid),            sizeof(guid_t),                 KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_GRPUUID,      VATTR_BIT(va_guuid),            sizeof(guid_t),                 KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_FILEID,       VATTR_BIT(va_fileid),           sizeof(uint64_t),               KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_PARENTID,     VATTR_BIT(va_parentid),         sizeof(uint64_t),               KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_FULLPATH,     0,                              sizeof(struct attrreference),   KAUTH_VNODE_READ_ATTRIBUTES},
+       {ATTR_CMN_ADDEDTIME,    VATTR_BIT(va_addedtime),        ATTR_TIME_SIZE,                 KAUTH_VNODE_READ_ATTRIBUTES}, 
+       {ATTR_CMN_RETURNED_ATTRS, 0,                            sizeof(attribute_set_t),        0},
+       {ATTR_CMN_ERROR,        0,                              sizeof(uint32_t),               0},
        {0, 0, 0, 0}
 };
 
@@ -530,7 +579,10 @@ static struct getattrlist_attrtab getattrlist_file_tab[] = {
                                 ATTR_CMN_OWNERID  | ATTR_CMN_GRPID |  \
                                 ATTR_CMN_ACCESSMASK | ATTR_CMN_FLAGS |  \
                                 ATTR_CMN_USERACCESS | ATTR_CMN_FILEID | \
-                                ATTR_CMN_PARENTID | ATTR_CMN_RETURNED_ATTRS)
+                                ATTR_CMN_PARENTID | ATTR_CMN_RETURNED_ATTRS | \
+                                ATTR_CMN_DOCUMENT_ID | ATTR_CMN_GEN_COUNT)
+
+#define VFS_DFLT_ATTR_CMN_EXT  (ATTR_CMN_EXT_GEN_COUNT | ATTR_CMN_EXT_DOCUMENT_ID)
 
 #define VFS_DFLT_ATTR_DIR      (ATTR_DIR_LINKCOUNT | ATTR_DIR_MOUNTSTATUS)
 
@@ -578,17 +630,23 @@ getattrlist_parsetab(struct getattrlist_attrtab *tab, attrgroup_t attrs, struct
  * the data from a filesystem.
  */
 static int
-getattrlist_setupvattr(struct attrlist *alp, struct vnode_attr *vap, ssize_t *sizep, kauth_action_t *actionp, int is_64bit, int isdir)
+getattrlist_setupvattr(struct attrlist *alp, int attr_cmn_extended, struct vnode_attr *vap, ssize_t *sizep, kauth_action_t *actionp, int is_64bit, int isdir)
 {
        int     error;
+       struct getattrlist_attrtab *cmn_tab;
+
 
+       if (attr_cmn_extended)
+               cmn_tab = getattrlist_common_tab_extended;
+       else
+               cmn_tab = getattrlist_common_tab;
        /*
         * Parse the above tables.
         */
        *sizep = sizeof(uint32_t);      /* length count */
        *actionp = 0;
        if (alp->commonattr &&
-           (error = getattrlist_parsetab(getattrlist_common_tab, alp->commonattr, vap, sizep, actionp, is_64bit)) != 0)
+           (error = getattrlist_parsetab(cmn_tab, alp->commonattr, vap, sizep, actionp, is_64bit)) != 0)
                return(error);
        if (isdir && alp->dirattr &&
            (error = getattrlist_parsetab(getattrlist_dir_tab, alp->dirattr, vap, sizep, actionp, is_64bit)) != 0)
@@ -984,6 +1042,11 @@ getvolattrlist(vnode_t vp, struct getattrlist_args *uap, struct attrlist *alp,
                attrlist_pack_string(&ab, cnp, cnl);
                ab.actual.commonattr |= ATTR_CMN_NAME;
        }
+       if ((alp->commonattr & ATTR_CMN_ERROR) &&
+           (!return_valid || pack_invalid)) {
+               ATTR_PACK4(ab, 0);
+               ab.actual.commonattr |= ATTR_CMN_ERROR;
+       }
        if (alp->commonattr & ATTR_CMN_DEVID) {
                ATTR_PACK4(ab, mnt->mnt_vfsstat.f_fsid.val[0]);
                ab.actual.commonattr |= ATTR_CMN_DEVID;
@@ -1292,13 +1355,14 @@ getattrlist_internal(vnode_t vp, struct getattrlist_args *uap,
        ssize_t         fixedsize, varsize;
        const char      *cnp;
        const char      *vname = NULL;
-       char    *fullpathptr;
+       char            *fullpathptr;
        ssize_t         fullpathlen;
        ssize_t         cnl;
        int             proc_is64;
        int             error;
        int             return_valid;
        int             pack_invalid;
+       int             attr_extended;
        int             vtype = 0;
        uint32_t        perms = 0;
 
@@ -1347,8 +1411,9 @@ getattrlist_internal(vnode_t vp, struct getattrlist_args *uap,
        }
 
        /* Check for special packing semantics */
-       return_valid = (al.commonattr & ATTR_CMN_RETURNED_ATTRS);
-       pack_invalid = (uap->options & FSOPT_PACK_INVAL_ATTRS);
+       return_valid = (al.commonattr & ATTR_CMN_RETURNED_ATTRS) ? 1 : 0;
+       pack_invalid = (uap->options & FSOPT_PACK_INVAL_ATTRS) ? 1 : 0;
+       attr_extended = (uap->options & FSOPT_ATTRLIST_EXTENDED) ? 1 : 0;
        if (pack_invalid) {
                /* FSOPT_PACK_INVAL_ATTRS requires ATTR_CMN_RETURNED_ATTRS */
                if (!return_valid || al.forkattr) {
@@ -1370,7 +1435,7 @@ getattrlist_internal(vnode_t vp, struct getattrlist_args *uap,
        /*
         * Set up the vnode_attr structure and authorise.
         */
-       if ((error = getattrlist_setupvattr(&al, &va, &fixedsize, &action, proc_is64, (vtype == VDIR))) != 0) {
+       if ((error = getattrlist_setupvattr(&al, attr_extended, &va, &fixedsize, &action, proc_is64, (vtype == VDIR))) != 0) {
                VFS_DEBUG(ctx, vp, "ATTRLIST - ERROR: setup for request failed");
                goto out;
        }
@@ -1646,6 +1711,11 @@ getattrlist_internal(vnode_t vp, struct getattrlist_args *uap,
                attrlist_pack_string(&ab, cnp, cnl);
                ab.actual.commonattr |= ATTR_CMN_NAME;
        }
+       if ((al.commonattr & ATTR_CMN_ERROR) &&
+           (!return_valid || pack_invalid)) {
+               ATTR_PACK4(ab, 0);
+               ab.actual.commonattr |= ATTR_CMN_ERROR;
+       }
        if (al.commonattr & ATTR_CMN_DEVID) {
                ATTR_PACK4(ab, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
                ab.actual.commonattr |= ATTR_CMN_DEVID;
@@ -1806,6 +1876,25 @@ getattrlist_internal(vnode_t vp, struct getattrlist_args *uap,
                ATTR_PACK4(ab, va.va_flags);
                ab.actual.commonattr |= ATTR_CMN_FLAGS;
        }
+       if (attr_extended) {
+               if (al.commonattr & ATTR_CMN_GEN_COUNT) {
+                       if (VATTR_IS_SUPPORTED(&va, va_gen)) {
+                               ATTR_PACK4(ab, va.va_gen);
+                               ab.actual.commonattr |= ATTR_CMN_GEN_COUNT;
+                       } else if (!return_valid || pack_invalid) {
+                               ATTR_PACK4(ab, 0);
+                       }
+               }
+
+               if (al.commonattr & ATTR_CMN_DOCUMENT_ID) {
+                       if (VATTR_IS_SUPPORTED(&va, va_document_id)) {
+                               ATTR_PACK4(ab, va.va_document_id);
+                               ab.actual.commonattr |= ATTR_CMN_DOCUMENT_ID;
+                       } else if (!return_valid || pack_invalid) {
+                               ATTR_PACK4(ab, 0);
+                       }
+               }
+       }
        /* We already obtain the user access, so just fill in the buffer here */
        if (al.commonattr & ATTR_CMN_USERACCESS) {
 #if CONFIG_MACF
index 5c816aad5ce1bfcd5287932cea258381f2fa9237..3044c8e431f5971330fb648942f9628d03c22656 100644 (file)
@@ -392,7 +392,7 @@ add_fsevent(int type, vfs_context_t ctx, ...)
     // (as long as it's not an event type that can never be the
     // same as a previous event)
     //
-    if (type != FSE_CREATE_FILE && type != FSE_DELETE && type != FSE_RENAME && type != FSE_EXCHANGE && type != FSE_CHOWN) {
+    if (type != FSE_CREATE_FILE && type != FSE_DELETE && type != FSE_RENAME && type != FSE_EXCHANGE && type != FSE_CHOWN && type != FSE_DOCID_CHANGED && type != FSE_DOCID_CREATED) {
        void *ptr=NULL;
        int   vid=0, was_str=0, nlen=0;
 
@@ -563,6 +563,60 @@ add_fsevent(int type, vfs_context_t ctx, ...)
     //
     
     cur = kfse;
+
+    if (type == FSE_DOCID_CREATED || type == FSE_DOCID_CHANGED) {
+           uint64_t val;
+
+           //
+           // These events are special and not like the other events.  They only
+           // have a dev_t, src inode #, dest inode #, and a doc-id.  We use the
+           // fields that we can in the kfse but have to overlay the dest inode
+           // number and the doc-id on the other fields.
+           //
+
+           // First the dev_t
+           arg_type = va_arg(ap, int32_t);
+           if (arg_type == FSE_ARG_DEV) {
+                   cur->dev = (dev_t)(va_arg(ap, dev_t));
+           } else {
+                   cur->dev = (dev_t)0xbadc0de1;
+           }
+
+           // next the source inode #
+           arg_type = va_arg(ap, int32_t);
+           if (arg_type == FSE_ARG_INO) {
+                   cur->ino = (ino64_t)(va_arg(ap, ino64_t));
+           } else {
+                   cur->ino = 0xbadc0de2;
+           }
+
+           // now the dest inode #
+           arg_type = va_arg(ap, int32_t);
+           if (arg_type == FSE_ARG_INO) {
+                   val = (ino64_t)(va_arg(ap, ino64_t));
+           } else {
+                   val = 0xbadc0de2;
+           }
+           // overlay the dest inode number on the str/dest pointer fields
+           memcpy(&cur->str, &val, sizeof(ino64_t));
+
+
+           // and last the document-id
+           arg_type = va_arg(ap, int32_t);
+           if (arg_type == FSE_ARG_INT32) {
+                   val = (uint64_t)va_arg(ap, uint32_t);
+           } else if (arg_type == FSE_ARG_INT64) {
+                   val = (uint64_t)va_arg(ap, uint64_t);
+           } else {
+                   val = 0xbadc0de3;
+           }
+           
+           // the docid is 64-bit and overlays the uid/gid fields
+           memcpy(&cur->uid, &val, sizeof(uint64_t));
+
+           goto done_with_args;
+    }
+
     for(arg_type=va_arg(ap, int32_t); arg_type != FSE_ARG_DONE; arg_type=va_arg(ap, int32_t))
 
        switch(arg_type) {
@@ -687,12 +741,19 @@ add_fsevent(int type, vfs_context_t ctx, ...)
                }
                break;
 
+           case FSE_ARG_INT32: {
+                   uint32_t ival = (uint32_t)va_arg(ap, int32_t);
+                   kfse->uid = (ino64_t)ival;
+               break;
+           }
+                   
            default:
                printf("add_fsevent: unknown type %d\n", arg_type);
                // just skip one 32-bit word and hope we sync up...
                (void)va_arg(ap, int32_t);
        }
 
+done_with_args:
     va_end(ap);
 
     OSBitAndAtomic16(~KFSE_BEING_CREATED, &kfse->flags);
@@ -835,7 +896,7 @@ release_event_ref(kfs_event *kfse)
     unlock_fs_event_list();
     
     // if we have a pointer in the union
-    if (copy.str) {
+    if (copy.str && copy.type != FSE_DOCID_CHANGED) {
        if (copy.len == 0) {    // and it's not a string
            panic("%s:%d: no more fref.vp!\n", __FILE__, __LINE__);
            // vnode_rele_ext(copy.fref.vp, O_EVTONLY, 0);
@@ -1248,6 +1309,36 @@ copy_out_kfse(fs_event_watcher *watcher, kfs_event *kfse, struct uio *uio)
 
   copy_again:
 
+    if (kfse->type == FSE_DOCID_CHANGED || kfse->type == FSE_DOCID_CREATED) {
+       dev_t    dev  = cur->dev;
+       ino_t    ino  = cur->ino;
+       uint64_t ival;
+
+       error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &dev, evbuff, &evbuff_idx, sizeof(evbuff), uio);
+       if (error != 0) {
+           goto get_out;
+       }
+
+       error = fill_buff(FSE_ARG_INO, sizeof(ino_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
+       if (error != 0) {
+           goto get_out;
+       }
+
+       memcpy(&ino, &cur->str, sizeof(ino_t));
+       error = fill_buff(FSE_ARG_INO, sizeof(ino_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
+       if (error != 0) {
+           goto get_out;
+       }
+
+       memcpy(&ival, &cur->uid, sizeof(uint64_t));   // the docid gets stuffed into the ino field
+       error = fill_buff(FSE_ARG_INT64, sizeof(uint64_t), &ival, evbuff, &evbuff_idx, sizeof(evbuff), uio);
+       if (error != 0) {
+           goto get_out;
+       }
+
+       goto done;
+    }
+
     if (cur->str == NULL || cur->str[0] == '\0') {
        printf("copy_out_kfse:2: empty/short path (%s)\n", cur->str);
        error = fill_buff(FSE_ARG_STRING, 2, "/", evbuff, &evbuff_idx, sizeof(evbuff), uio);
@@ -1438,7 +1529,7 @@ fmod_watch(fs_event_watcher *watcher, struct uio *uio)
 
        if (watcher->event_list[kfse->type] == FSE_REPORT && watcher_cares_about_dev(watcher, kfse->dev)) {
 
-         if (!(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE) & is_ignored_directory(kfse->str)) {
+         if (!(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE) && kfse->type != FSE_DOCID_CHANGED && is_ignored_directory(kfse->str)) {
            // If this is not an Apple System Service, skip specified directories
            // radar://12034844
            error = 0;
index 453ceb629b099ae08b0a23f999d0b4fc9e107e66..394f474c1402d81bb73aa67a72ae32e39eb11d13 100644 (file)
@@ -1,4 +1,4 @@
-13.1.0
+13.2.0
 
 # The first line of this file contains the master version number for the kernel.
 # All other instances of the kernel version in xnu are derived from this file.
index 3fe0984dba2d9ca6fb70f193b1d185d46d880000..7138ea5c20c8e7f1b76f9b098fecd37c9a5ec7b0 100644 (file)
@@ -617,7 +617,7 @@ private:
     unsigned int            ignoreTellChangeDown    :1;
     unsigned int            wranglerAsleep          :1;
     unsigned int            wranglerTickled         :1;
-    unsigned int            ignoreIdleSleepTimer    :1;
+    unsigned int            _preventUserActive      :1;
     unsigned int            graphicsSuppressed      :1;
 
     unsigned int            capabilityLoss          :1;
@@ -780,6 +780,8 @@ private:
 
     bool        latchDisplayWranglerTickle( bool latch );
     void        setDisplayPowerOn( uint32_t options );
+    void        systemDidNotSleep( void );
+    void        preventTransitionToUserActive( bool prevent );
 #endif /* XNU_KERNEL_PRIVATE */
 };
 
index 73963e117d7cb4b6af45f2b9334a4853faac2be1..3bd133ebac90398348579beafb3e70f45c899272 100644 (file)
@@ -227,7 +227,7 @@ static IOReturn IOHibernateDone(IOHibernateVars * vars);
 
 enum { kXPRamAudioVolume = 8 };
 enum { kDefaultIOSize = 128 * 1024 };
-enum { kVideoMapSize  = 32 * 1024 * 1024 };
+enum { kVideoMapSize  = 80 * 1024 * 1024 };
 
 #ifndef kIOMediaPreferredBlockSizeKey
 #define kIOMediaPreferredBlockSizeKey  "Preferred Block Size"
@@ -2927,9 +2927,13 @@ hibernate_machine_init(void)
     {
         vars->videoMapSize = round_page(gIOHibernateGraphicsInfo->height 
                                         * gIOHibernateGraphicsInfo->rowBytes);
-        IOMapPages(kernel_map, 
-                    vars->videoMapping, gIOHibernateGraphicsInfo->physicalAddress,
-                    vars->videoMapSize, kIOMapInhibitCache );
+       if (vars->videoMapSize > vars->videoAllocSize) vars->videoMapSize = 0;
+       else
+       {
+           IOMapPages(kernel_map, 
+                       vars->videoMapping, gIOHibernateGraphicsInfo->physicalAddress,
+                       vars->videoMapSize, kIOMapInhibitCache );
+       }
     }
 
     if (vars->videoMapSize)
index 45504157aa469ccacd400731e9c7a611a54d7b3f..1c39602d7b6f1b6ac0fe8097befb71be9ac81e2f 100644 (file)
@@ -119,9 +119,15 @@ iopa_t gIOBMDPageAllocator;
 static queue_head_t gIOMallocContiguousEntries;
 static lck_mtx_t *  gIOMallocContiguousEntriesLock;
 
-enum { kIOMaxPageableMaps = 16 };
-enum { kIOPageableMapSize = 96 * 1024 * 1024 };
+#if __x86_64__
+enum { kIOMaxPageableMaps    = 8 };
+enum { kIOPageableMapSize    = 512 * 1024 * 1024 };
+enum { kIOPageableMaxMapSize = 512 * 1024 * 1024 };
+#else
+enum { kIOMaxPageableMaps    = 16 };
+enum { kIOPageableMapSize    = 96 * 1024 * 1024 };
 enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
+#endif
 
 typedef struct {
     vm_map_t           map;
index 06c584b75066ffdb0fd243d90cfe0cd27fccb21a..60f715588df80a1cadccc4adf36d48bf15e70e84 100644 (file)
@@ -156,8 +156,8 @@ enum {
     kStimulusDarkWakeReentry,           // 7
     kStimulusDarkWakeEvaluate,          // 8
     kStimulusNoIdleSleepPreventers,     // 9
-    kStimulusUserIsActive,              // 10
-    kStimulusUserIsInactive             // 11
+    kStimulusEnterUserActiveState,      // 10
+    kStimulusLeaveUserActiveState       // 11
 };
 
 extern "C" {
@@ -955,7 +955,8 @@ bool IOPMrootDomain::start( IOService * nub )
     acAdaptorConnected = true;
     clamshellSleepDisabled = false;
 
-    // User active state at boot
+    // Initialize to user active.
+    // Will never transition to user inactive w/o wrangler.
     fullWakeReason = kFullWakeReasonLocalUser;
     userIsActive = userWasActive = true;
     setProperty(gIOPMUserIsActiveKey, kOSBooleanTrue);
@@ -2654,6 +2655,36 @@ void IOPMrootDomain::askChangeDownDone(
     }
 }
 
+//******************************************************************************
+// systemDidNotSleep
+//
+// Work common to both canceled or aborted sleep.
+//******************************************************************************
+
+void IOPMrootDomain::systemDidNotSleep( void )
+{
+    if (!wrangler)
+    {
+        if (idleSeconds)
+        {
+            // stay awake for at least idleSeconds
+            startIdleSleepTimer(idleSeconds);
+        }
+    }
+    else
+    {
+        if (sleepSlider && !userIsActive)
+        {
+            // Manually start the idle sleep timer besides waiting for
+            // the user to become inactive.
+            startIdleSleepTimer( kIdleSleepRetryInterval );
+        }
+    }
+
+    preventTransitionToUserActive(false);
+    IOService::setAdvisoryTickleEnable( true );
+}
+
 //******************************************************************************
 // tellNoChangeDown
 //
@@ -2674,24 +2705,7 @@ void IOPMrootDomain::tellNoChangeDown( unsigned long stateNum )
        // Sleep canceled, clear the sleep trace point.
     tracePoint(kIOPMTracePointSystemUp);
 
-    if (!wrangler)
-    {
-        if (idleSeconds)
-        {
-            // stay awake for at least idleSeconds
-            startIdleSleepTimer(idleSeconds);
-        }
-    }
-    else if (sleepSlider && !userIsActive)
-    {
-        // Display wrangler is already asleep, it won't trigger the next
-        // idle sleep attempt. Schedule a future idle sleep attempt, and
-        // also push out the next idle sleep attempt.
-
-        startIdleSleepTimer( kIdleSleepRetryInterval );
-    }
-    
-    IOService::setAdvisoryTickleEnable( true );
+    systemDidNotSleep();
     return tellClients( kIOMessageSystemWillNotSleep );
 }
 
@@ -2727,19 +2741,10 @@ void IOPMrootDomain::tellChangeUp( unsigned long stateNum )
         if (getPowerState() == ON_STATE)
         {
             // this is a quick wake from aborted sleep
-            ignoreIdleSleepTimer = false;
-            if (idleSeconds && !wrangler)
-            {
-                // stay awake for at least idleSeconds
-                startIdleSleepTimer(idleSeconds);
-            }
-            IOService::setAdvisoryTickleEnable( true );
+            systemDidNotSleep();
             tellClients( kIOMessageSystemWillPowerOn );
         }
 
-        tracePoint( kIOPMTracePointWakeApplications );
-
-
 #if defined(__i386__) || defined(__x86_64__)
         if (spindumpDesc)
         {
@@ -2749,6 +2754,7 @@ void IOPMrootDomain::tellChangeUp( unsigned long stateNum )
         }
 #endif
 
+        tracePoint( kIOPMTracePointWakeApplications );
         tellClients( kIOMessageSystemHasPoweredOn );
     }
 }
@@ -4722,8 +4728,14 @@ void IOPMrootDomain::handleOurPowerChangeStart(
             *inOutChangeFlags |= kIOPMSyncTellPowerDown;
             _systemMessageClientMask = kSystemMessageClientPowerd |
                                        kSystemMessageClientLegacyApp;
+
+            // rdar://15971327
+            // Prevent user active transitions before notifying clients
+            // that system will sleep.
+            preventTransitionToUserActive(true);
+
             IOService::setAdvisoryTickleEnable( false );
-        
+
             // Publish the sleep reason for full to dark wake
             publishSleepReason = true;
             lastSleepReason = fullToDarkReason = sleepReason;
@@ -4757,21 +4769,12 @@ void IOPMrootDomain::handleOurPowerChangeStart(
             timeline->setSleepCycleInProgressFlag(true);
 
         recordPMEvent(kIOPMEventTypeSleep, NULL, sleepReason, kIOReturnSuccess);
-
-        // Optimization to ignore wrangler power down thus skipping
-        // the disk spindown and arming the idle timer for demand sleep.
-
-        if (changeFlags & kIOPMIgnoreChildren)
-        {
-            ignoreIdleSleepTimer = true;
-        }
     }
 
     // 3. System wake.
 
     else if (kSystemTransitionWake == _systemTransitionType)
     {
-        ignoreIdleSleepTimer = false;
         tracePoint( kIOPMTracePointWakeWillPowerOnClients );
         if (pmStatsAppResponses)
         {
@@ -5162,6 +5165,7 @@ void IOPMrootDomain::handleActivityTickleForDisplayWrangler(
     IOService *     service,
     IOPMActions *   actions )
 {
+#if !NO_KERNEL_HID
     // Warning: Not running in PM work loop context - don't modify state !!!
     // Trap tickle directed to IODisplayWrangler while running with graphics
     // capability suppressed.
@@ -5191,6 +5195,7 @@ void IOPMrootDomain::handleActivityTickleForDisplayWrangler(
                 (void *) kStimulusDarkWakeActivityTickle );
         }
     }
+#endif
 }
 
 void IOPMrootDomain::handleUpdatePowerClientForDisplayWrangler(
@@ -5200,21 +5205,24 @@ void IOPMrootDomain::handleUpdatePowerClientForDisplayWrangler(
     IOPMPowerStateIndex     oldPowerState,
     IOPMPowerStateIndex     newPowerState )
 {
+#if !NO_KERNEL_HID
     assert(service == wrangler);
     
-    // This function implements half of the user activity detection.
-    // User is active if:
-    // 1. DeviceDesire increases to max,
-    //    and wrangler already in max power state
-    //    (no power state change, caught by this routine)
+    // This function implements half of the user active detection
+    // by monitoring changes to the display wrangler's device desire.
     //
-    // 2. Power change to max, and DeviceDesire is at max.
-    //    (wrangler must reset DeviceDesire before system sleep)
+    // User becomes active when either:
+    // 1. Wrangler's DeviceDesire increases to max, but wrangler is already
+    //    in max power state. This desire change in absence of a power state
+    //    change is detected within. This handles the case when user becomes
+    //    active while the display is already lit by setDisplayPowerOn().
     //
-    // User is inactive if:
-    // 1. DeviceDesire drops to sleep state or below
+    // 2. Power state change to max, and DeviceDesire is also at max.
+    //    Handled by displayWranglerNotification().
+    //
+    // User becomes inactive when DeviceDesire drops to sleep state or below.
 
-    DLOG("wrangler %s (%u, %u->%u)\n",
+    DLOG("wrangler %s (ps %u, %u->%u)\n",
         powerClient->getCStringNoCopy(),
         (uint32_t) service->getPowerState(),
         (uint32_t) oldPowerState, (uint32_t) newPowerState);
@@ -5225,15 +5233,41 @@ void IOPMrootDomain::handleUpdatePowerClientForDisplayWrangler(
             (newPowerState == kWranglerPowerStateMax) &&
             (service->getPowerState() == kWranglerPowerStateMax))
         {
-            evaluatePolicy( kStimulusUserIsActive );
+            evaluatePolicy( kStimulusEnterUserActiveState );
         }
         else
         if ((newPowerState < oldPowerState) &&
             (newPowerState <= kWranglerPowerStateSleep))
         {
-            evaluatePolicy( kStimulusUserIsInactive );
+            evaluatePolicy( kStimulusLeaveUserActiveState );
         }
     }
+#endif
+}
+
+//******************************************************************************
+// User active state management
+//******************************************************************************
+
+void IOPMrootDomain::preventTransitionToUserActive( bool prevent )
+{
+#if !NO_KERNEL_HID
+    _preventUserActive = prevent;
+    if (wrangler && !_preventUserActive)
+    {
+        // Allowing transition to user active, but the wrangler may have
+        // already powered ON in case of sleep cancel/revert. Poll the
+        // same conditions checked for in displayWranglerNotification()
+        // to bring the user active state up to date.
+
+        if ((wrangler->getPowerState() == kWranglerPowerStateMax) &&
+            (wrangler->getPowerStateForClient(gIOPMPowerClientDevice) ==
+             kWranglerPowerStateMax))
+        {
+            evaluatePolicy( kStimulusEnterUserActiveState );
+        }
+    }
+#endif
 }
 
 //******************************************************************************
@@ -5531,8 +5565,8 @@ IOReturn IOPMrootDomain::displayWranglerNotification(
         return kIOReturnUnsupported;
 
     displayPowerState = params->stateNumber;
-    DLOG("DisplayWrangler message 0x%x, power state %d\n",
-         (uint32_t) messageType, displayPowerState);
+    DLOG("wrangler %s ps %d\n",
+         getIOMessageString(messageType), displayPowerState);
 
     switch (messageType) {
        case kIOMessageDeviceWillPowerOff:
@@ -5561,7 +5595,7 @@ IOReturn IOPMrootDomain::displayWranglerNotification(
                 if (service->getPowerStateForClient(gIOPMPowerClientDevice) ==
                     kWranglerPowerStateMax)
                 {
-                    gRootDomain->evaluatePolicy( kStimulusUserIsActive );
+                    gRootDomain->evaluatePolicy( kStimulusEnterUserActiveState );
                 }
             }
             break;
@@ -6431,12 +6465,17 @@ void IOPMrootDomain::evaluatePolicy( int stimulus, uint32_t arg )
             wranglerAsleep = false;
             break;
 
-        case kStimulusUserIsActive:
+        case kStimulusEnterUserActiveState:
+            if (_preventUserActive)
+            {
+                DLOG("user active dropped\n");
+                break;
+            }
             if (!userIsActive)
             {
                 userIsActive = true;
                 userWasActive = true;
-                
+
                 // Stay awake after dropping demand for display power on
                 if (kFullWakeReasonDisplayOn == fullWakeReason)
                     fullWakeReason = fFullWakeReasonDisplayOnAndLocalUser;
@@ -6447,7 +6486,7 @@ void IOPMrootDomain::evaluatePolicy( int stimulus, uint32_t arg )
             flags.bit.idleSleepDisabled = true;
             break;
 
-        case kStimulusUserIsInactive:
+        case kStimulusLeaveUserActiveState:
             if (userIsActive)
             {
                 userIsActive = false;
@@ -6664,7 +6703,7 @@ void IOPMrootDomain::evaluatePolicy( int stimulus, uint32_t arg )
             DLOG("user inactive\n");        
         }
 
-        if (!userIsActive && !ignoreIdleSleepTimer && sleepSlider)
+        if (!userIsActive && sleepSlider)
         {
             startIdleSleepTimer(getTimeToIdleSleep());
         }
@@ -6825,9 +6864,8 @@ void IOPMrootDomain::requestFullWake( FullWakeReason reason )
 void IOPMrootDomain::willEnterFullWake( void )
 {
     hibernateRetry = false;
-    ignoreIdleSleepTimer = false;
-    sleepTimerMaintenance = false;
     sleepToStandby = false;
+    sleepTimerMaintenance = false;
 
     _systemMessageClientMask = kSystemMessageClientPowerd |
                                kSystemMessageClientLegacyApp;
@@ -6845,6 +6883,7 @@ void IOPMrootDomain::willEnterFullWake( void )
 
     IOService::setAdvisoryTickleEnable( true );
     tellClients(kIOMessageSystemWillPowerOn);
+    preventTransitionToUserActive(false);
 }
 
 //******************************************************************************
index 494f1916b08bfc13a7397c9d669fed007caa4bd8..bbbea1e39ad4465fcc4f6be222080e653909b5ba 100644 (file)
@@ -1352,7 +1352,6 @@ IOPlatformExpertDevice::initWithArgs(
                             void * dtTop, void * p2, void * p3, void * p4 )
 {
     IORegistryEntry *  dt = 0;
-    void *             argsData[ 4 ];
     bool               ok;
 
     // dtTop may be zero on non- device tree systems
@@ -1369,13 +1368,6 @@ IOPlatformExpertDevice::initWithArgs(
     if (!workLoop)
         return false;
 
-    argsData[ 0 ] = dtTop;
-    argsData[ 1 ] = p2;
-    argsData[ 2 ] = p3;
-    argsData[ 3 ] = p4;
-
-    setProperty("IOPlatformArgs", (void *)argsData, sizeof(argsData));
-
     return( true);
 }
 
index 3ae21d60fbeac08c15634d1ad2a99c86fbeffad5..9ba2e752ca32e3851e545f02898afc5e035375ad 100644 (file)
@@ -3578,6 +3578,8 @@ void IOService::notifyRootDomain( void )
     MS_PUSH(fMachineState);  // push notifyAll() machine state
     fMachineState = kIOPM_DriverThreadCallDone;
 
+    // Call IOPMrootDomain::willNotifyPowerChildren() on a thread call
+    // to avoid a deadlock.
     fDriverCallReason = kRootDomainInformPreChange;
     fDriverCallBusy   = true;
     thread_call_enter( fDriverCallEntry );
@@ -3639,7 +3641,7 @@ void IOService::notifyChildren( void )
                 // Cannot be used together with strict tree ordering.
 
                 if (!fIsPreChange &&
-                    (connection->delayChildNotification) &&
+                    connection->delayChildNotification &&
                     getPMRootDomain()->shouldDelayChildNotification(this))
                 {
                     if (!children)
@@ -3677,15 +3679,23 @@ void IOService::notifyChildren( void )
 
         if (delayNotify)
         {
-            // Wait for exiting child notifications to complete,
-            // before notifying the children in the array.
+            // Block until all non-delayed children have acked their
+            // notification. Then notify the remaining delayed child
+            // in the array. This is used to hold off graphics child
+            // notification while the rest of the system powers up.
+            // If a hid tickle arrives during this time, the delayed
+            // children are immediately notified and root domain will
+            // not clamp power for dark wake.
+
             fMachineState = kIOPM_NotifyChildrenDelayed;
             PM_LOG2("%s: %d children in delayed array\n",
                 getName(), children->getCount());
         }
         else
         {
+            // Child array created to support strict notification order.
             // Notify children in the array one at a time.
+
             fMachineState = kIOPM_NotifyChildrenOrdered;
         }
        }
@@ -3739,8 +3749,9 @@ void IOService::notifyChildrenDelayed( void )
        assert(fMachineState == kIOPM_NotifyChildrenDelayed);
 
     // Wait after all non-delayed children and interested drivers have ack'ed,
-    // then notify all delayed children. When explicitly cancelled, interest
-    // acks (and ack timer) may still be outstanding.
+    // then notify all delayed children. If notify delay is canceled, child
+    // acks may be outstanding with PM blocked on fHeadNotePendingAcks != 0.
+    // But the handling for either case is identical.
 
     for (int i = 0; ; i++)
     {
index cc584a9a11b85a3b198788c2da2129031e798d1f..7994bfbd7a90eaa4224f41a6aa112251f2171618 100644 (file)
@@ -454,7 +454,7 @@ pmap_enter_options(
        /* 2MiB mappings are confined to x86_64 by VM */
        boolean_t               superpage = flags & VM_MEM_SUPERPAGE;
        vm_object_t             delpage_pm_obj = NULL;
-       int                     delpage_pde_index = 0;
+       uint64_t                delpage_pde_index = 0;
        pt_entry_t              old_pte;
        kern_return_t           kr_expand;
 
@@ -538,7 +538,7 @@ Retry:
                 * Remember the PDE and the PDE index, so that we
                 * can free the page at the end of this function.
                 */
-               delpage_pde_index = (int)pdeidx(pmap, vaddr);
+               delpage_pde_index = pdeidx(pmap, vaddr);
                delpage_pm_obj = pmap->pm_obj;
                *pte = 0;
        }
@@ -906,7 +906,7 @@ Done:
                vm_page_t m;
 
                vm_object_lock(delpage_pm_obj);
-               m = vm_page_lookup(delpage_pm_obj, delpage_pde_index);
+               m = vm_page_lookup(delpage_pm_obj, (delpage_pde_index * PAGE_SIZE));
                if (m == VM_PAGE_NULL)
                    panic("pmap_enter: pte page not in object");
                vm_object_unlock(delpage_pm_obj);
index 45f3dbeb7e25e2cf4b3c3c38a7283155c4816c68..5acb12320c70de476df7b15de5bcf7b6fae515c2 100644 (file)
@@ -192,7 +192,7 @@ void compressor_pager_slots_chunk_free(compressor_slot_t *chunk, int num_slots);
 void compressor_pager_slot_lookup(
        compressor_pager_t      pager,
        boolean_t               do_alloc,
-       uint32_t                offset,
+       memory_object_offset_t  offset,
        compressor_slot_t       **slot_pp);
 
 kern_return_t
@@ -402,7 +402,11 @@ compressor_memory_object_data_request(
        if ((offset & PAGE_MASK) != 0 || (length & PAGE_MASK) != 0)
                panic("compressor_memory_object_data_request(): bad alignment");
 
-       assert((uint32_t) offset == offset);
+       if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
+               panic("%s: offset 0x%llx overflow\n",
+                     __FUNCTION__, (uint64_t) offset);
+               return KERN_FAILURE;
+       }
 
        compressor_pager_lookup(mem_obj, pager);
 
@@ -413,7 +417,7 @@ compressor_memory_object_data_request(
        }
 
        /* find the compressor slot for that page */
-       compressor_pager_slot_lookup(pager, FALSE, (uint32_t) offset, &slot_p);
+       compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
 
        if (offset / PAGE_SIZE > pager->cpgr_num_slots) {
                /* out of range */
@@ -503,14 +507,16 @@ compressor_memory_object_data_return(
  */
 kern_return_t
 compressor_memory_object_create(
-       vm_size_t               new_size,
+       memory_object_size_t    new_size,
        memory_object_t         *new_mem_obj)
 {
        compressor_pager_t      pager;
        int                     num_chunks;
 
-       if ((uint32_t) new_size != new_size) {
-               /* 32-bit overflow */
+       if ((uint32_t)(new_size/PAGE_SIZE) != (new_size/PAGE_SIZE)) {
+               /* 32-bit overflow for number of pages */
+               panic("%s: size 0x%llx overflow\n",
+                     __FUNCTION__, (uint64_t) new_size);
                return KERN_INVALID_ARGUMENT;
        }
 
@@ -522,7 +528,7 @@ compressor_memory_object_create(
        compressor_pager_lock_init(pager);
        pager->cpgr_control = MEMORY_OBJECT_CONTROL_NULL;
        pager->cpgr_references = 1;
-       pager->cpgr_num_slots = (uint32_t) (new_size / PAGE_SIZE);
+       pager->cpgr_num_slots = (uint32_t)(new_size/PAGE_SIZE);
 
        num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK;
        if (num_chunks > 1) {
@@ -567,7 +573,7 @@ void
 compressor_pager_slot_lookup(
        compressor_pager_t      pager,
        boolean_t               do_alloc,
-       uint32_t                offset,
+       memory_object_offset_t  offset,
        compressor_slot_t       **slot_pp)
 {
        int                     num_chunks;
@@ -577,7 +583,14 @@ compressor_pager_slot_lookup(
        compressor_slot_t       *chunk;
        compressor_slot_t       *t_chunk;
 
-       page_num = offset / PAGE_SIZE;
+       page_num = (uint32_t)(offset/PAGE_SIZE);
+       if (page_num != (offset/PAGE_SIZE)) {
+               /* overflow */
+               panic("%s: offset 0x%llx overflow\n",
+                     __FUNCTION__, (uint64_t) offset);
+               *slot_pp = NULL;
+               return;
+       }
        if (page_num > pager->cpgr_num_slots) {
                /* out of range */
                *slot_pp = NULL;
@@ -655,9 +668,14 @@ vm_compressor_pager_put(
 
        compressor_pager_lookup(mem_obj, pager);
 
-       assert((upl_offset_t) offset == offset);
+       if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
+               /* overflow */
+               panic("%s: offset 0x%llx overflow\n",
+                     __FUNCTION__, (uint64_t) offset);
+               return KERN_RESOURCE_SHORTAGE;
+       }
 
-       compressor_pager_slot_lookup(pager, TRUE, (uint32_t) offset, &slot_p);
+       compressor_pager_slot_lookup(pager, TRUE, offset, &slot_p);
 
        if (slot_p == NULL) {
                /* out of range ? */
@@ -694,12 +712,16 @@ vm_compressor_pager_get(
        
        compressor_pager_stats.data_requests++;
 
-       assert((uint32_t) offset == offset);
+       if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
+               panic("%s: offset 0x%llx overflow\n",
+                     __FUNCTION__, (uint64_t) offset);
+               return KERN_MEMORY_ERROR;
+       }
 
        compressor_pager_lookup(mem_obj, pager);
 
        /* find the compressor slot for that page */
-       compressor_pager_slot_lookup(pager, FALSE, (uint32_t) offset, &slot_p);
+       compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
 
        if (offset / PAGE_SIZE > pager->cpgr_num_slots) {
                /* out of range */
@@ -739,12 +761,17 @@ vm_compressor_pager_state_clr(
        
        compressor_pager_stats.state_clr++;
 
-       assert((uint32_t) offset == offset);
+       if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
+               /* overflow */
+               panic("%s: offset 0x%llx overflow\n",
+                     __FUNCTION__, (uint64_t) offset);
+               return;
+       }
 
        compressor_pager_lookup(mem_obj, pager);
 
        /* find the compressor slot for that page */
-       compressor_pager_slot_lookup(pager, FALSE, (uint32_t) offset, &slot_p);
+       compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
 
        if (slot_p && *slot_p != 0) {
                vm_compressor_free(slot_p);
@@ -761,12 +788,17 @@ vm_compressor_pager_state_get(
        
        compressor_pager_stats.state_get++;
 
-       assert((uint32_t) offset == offset);
+       if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) {
+               /* overflow */
+               panic("%s: offset 0x%llx overflow\n",
+                     __FUNCTION__, (uint64_t) offset);
+               return VM_EXTERNAL_STATE_ABSENT;
+       }
 
        compressor_pager_lookup(mem_obj, pager);
 
        /* find the compressor slot for that page */
-       compressor_pager_slot_lookup(pager, FALSE, (uint32_t) offset, &slot_p);
+       compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p);
 
        if (offset / PAGE_SIZE > pager->cpgr_num_slots) {
                /* out of range */
index 315ad33874e55bdeda4811386791ab57504dd882..a30ff18e11125f20b58066fa8055af5db71d6133 100644 (file)
@@ -2423,6 +2423,10 @@ vm_map_enter_mem_object(
                if ((named_entry->protection & cur_protection) !=
                    cur_protection)
                        return KERN_INVALID_RIGHT;
+               if (offset + size < offset) {
+                       /* overflow */
+                       return KERN_INVALID_ARGUMENT;
+               }
                if (named_entry->size < (offset + size))
                        return KERN_INVALID_ARGUMENT;
 
index 9044bbe0636bf0009efa7a7cbfc8554732a255b7..a16857ec00750e91161bdcf31d56b25669ad1ed4 100644 (file)
@@ -4565,9 +4565,13 @@ vm_object_compressor_pager_create(
                
        vm_object_unlock(object);
 
-       if ((uint32_t) object->vo_size != object->vo_size) {
-               panic("vm_object_compressor_pager_create(): object size 0x%llx >= 4GB\n",
-                     (uint64_t) object->vo_size);
+       if ((uint32_t) (object->vo_size/PAGE_SIZE) !=
+           (object->vo_size/PAGE_SIZE)) {
+               panic("vm_object_compressor_pager_create(%p): "
+                     "object size 0x%llx >= 0x%llx\n",
+                     object,
+                     (uint64_t) object->vo_size,
+                     0x0FFFFFFFFULL*PAGE_SIZE);
        }
 
        /*
@@ -4581,10 +4585,16 @@ vm_object_compressor_pager_create(
                assert(object->temporary);
 
                /* create our new memory object */
-               assert((vm_size_t) object->vo_size == object->vo_size);
+               assert((uint32_t) (object->vo_size/PAGE_SIZE) ==
+                      (object->vo_size/PAGE_SIZE));
                (void) compressor_memory_object_create(
-                       (vm_size_t) object->vo_size,
+                       (memory_object_size_t) object->vo_size,
                        &pager);
+               if (pager == NULL) {
+                       panic("vm_object_compressor_pager_create(): "
+                             "no pager for object %p size 0x%llx\n",
+                             object, (uint64_t) object->vo_size);
+               }
        }
 
        entry = vm_object_hash_entry_alloc(pager);
index 0033a2890c33012640b1ebe741de33cae903b252..0f814c3e94e18765c394928e75078efb65acd88d 100644 (file)
@@ -501,7 +501,7 @@ int vm_toggle_entry_reuse(int, int*);
 
 extern void vm_compressor_pager_init(void);
 extern kern_return_t compressor_memory_object_create(
-       vm_size_t,
+       memory_object_size_t,
        memory_object_t *);
 
 /* the object purger. purges the next eligible object from memory. */
index 9467e4d224f34b0f67f582435bfcc1d1e5962a12..e22ce034ebf7d32e7dc2757423ae3c1dfdee52b9 100644 (file)
@@ -57,6 +57,16 @@ int shm_tests( void * the_argp )
                goto test_failed_exit;
        }
 
+       if (my_shmid_ds.shm_internal != (void *) 0){
+               /*
+                * The shm_internal field is a pointer reserved for kernel
+                * use only.  It should not be leaked to user space.
+                * (PR-15642873)
+                */
+               printf( "shmctl failed to sanitize kernel internal pointer \n" );
+               goto test_failed_exit;
+       }
+
        my_err = shmdt( my_shm_addr );
        if ( my_err == -1 ) {
                printf( "shmdt failed with error %d - \"%s\" \n", errno, strerror( errno) );