]> git.saurik.com Git - apple/xnu.git/commitdiff
xnu-2050.18.24.tar.gz mac-os-x-1082 v2050.18.24
authorApple <opensource@apple.com>
Wed, 19 Sep 2012 18:37:21 +0000 (18:37 +0000)
committerApple <opensource@apple.com>
Wed, 19 Sep 2012 18:37:21 +0000 (18:37 +0000)
52 files changed:
bsd/hfs/hfs_vfsops.c
bsd/hfs/hfs_vnops.c
bsd/kern/kern_event.c
bsd/kern/kern_exit.c
bsd/kern/kern_lockf.c
bsd/kern/uipc_mbuf.c
bsd/netinet/tcp_lro.c
bsd/netinet/tcp_output.c
bsd/netinet/tcp_subr.c
bsd/netinet6/ip6_input.c
bsd/netkey/key.c
bsd/sys/cprotect.h
bsd/sys/event.h
bsd/vfs/vfs_fsevents.c
bsd/vm/vnode_pager.c
config/IOKit.exports
config/IOKit.i386.exports
config/IOKit.x86_64.exports
config/MasterVersion
config/Private.exports
config/System6.0.exports
config/Unsupported.exports
iokit/IOKit/IOBufferMemoryDescriptor.h
iokit/IOKit/IOHibernatePrivate.h
iokit/IOKit/IOMapper.h
iokit/IOKit/IOMemoryDescriptor.h
iokit/IOKit/pwr_mgt/IOPM.h
iokit/IOKit/pwr_mgt/IOPMPrivate.h
iokit/IOKit/pwr_mgt/RootDomain.h
iokit/Kernel/IOBufferMemoryDescriptor.cpp
iokit/Kernel/IODMACommand.cpp
iokit/Kernel/IOHibernateIO.cpp
iokit/Kernel/IOInterruptEventSource.cpp
iokit/Kernel/IOKitKernelInternal.h
iokit/Kernel/IOMapper.cpp
iokit/Kernel/IOMemoryDescriptor.cpp
iokit/Kernel/IOPMrootDomain.cpp
iokit/Kernel/IOPlatformExpert.cpp
iokit/Kernel/IOService.cpp
libkern/c++/OSMetaClass.cpp
libkern/conf/files
osfmk/i386/acpi.c
osfmk/ipc/ipc_object.c
osfmk/ipc/ipc_port.c
osfmk/ipc/ipc_pset.c
osfmk/ipc/mach_debug.c
osfmk/ipc/mach_port.c
osfmk/kern/hibernate.c
osfmk/pmc/pmc.h
osfmk/vm/vm_fault.c
osfmk/vm/vm_map.c
osfmk/vm/vm_object.c

index 26bafe77087d817a928756edafd933fbff19894a..d0835918039f1fea3e9b06f992698689d52b5bf3 100644 (file)
@@ -5113,12 +5113,14 @@ struct hfs_reclaim_extent_info {
  *                                        FABN = old FABN - E.blockCount
  *
  * Inputs: 
- *     extent_info - This is the structure that contains state about 
- *                   the current file, extent, and extent record that 
- *                   is being relocated.  This structure is shared 
- *                   among code that traverses through all the extents 
- *                   of the file, code that relocates extents, and 
- *                   code that splits the extent. 
+ *     extent_info -   This is the structure that contains state about 
+ *                     the current file, extent, and extent record that 
+ *                     is being relocated.  This structure is shared 
+ *                     among code that traverses through all the extents 
+ *                     of the file, code that relocates extents, and 
+ *                     code that splits the extent. 
+ *     newBlockCount - The blockCount of the extent to be split after 
+ *                     successfully split operation.
  * Output:
  *     Zero on success, non-zero on failure.
  */
@@ -5148,6 +5150,13 @@ hfs_split_extent(struct hfs_reclaim_extent_info *extent_info, uint32_t newBlockC
        extents = extent_info->extents;
        cp = VTOC(extent_info->vp);
 
+       if (newBlockCount == 0) {
+               if (hfs_resize_debug) {
+                       printf ("hfs_split_extent: No splitting required for newBlockCount=0\n");
+               }
+               return error;
+       }
+
        if (hfs_resize_debug) {
                printf ("hfs_split_extent: Split record:%u recStartBlock=%u %u:(%u,%u) for %u blocks\n", extent_info->overflow_count, extent_info->recStartBlock, index, extents[index].startBlock, extents[index].blockCount, newBlockCount);
        }
@@ -5468,7 +5477,7 @@ hfs_split_extent(struct hfs_reclaim_extent_info *extent_info, uint32_t newBlockC
                                goto out;
                        }
                        if (hfs_resize_debug) {
-                               printf ("hfs_split_extent: Deleted record with startBlock=%u\n", (is_xattr ? xattr_key->startBlock : extents_key->startBlock));
+                               printf ("hfs_split_extent: Deleted extent record with startBlock=%u\n", (is_xattr ? xattr_key->startBlock : extents_key->startBlock));
                        }
                }
 
@@ -5488,8 +5497,18 @@ hfs_split_extent(struct hfs_reclaim_extent_info *extent_info, uint32_t newBlockC
                        printf ("hfs_split_extent: Inserted extent record with startBlock=%u\n", write_recStartBlock);
                }
        }
-       BTFlushPath(extent_info->fcb);
+
 out:
+       /* 
+        * Extents overflow btree or attributes btree headers might have 
+        * been modified during the split/shift operation, so flush the 
+        * changes to the disk while we are inside journal transaction.  
+        * We should only be able to generate I/O that modifies the B-Tree 
+        * header nodes while we're in the middle of a journal transaction.  
+        * Otherwise it might result in panic during unmount.
+        */
+       BTFlushPath(extent_info->fcb);
+
        if (extents_rec) {
                FREE (extents_rec, M_TEMP);
        }
@@ -5578,7 +5597,12 @@ hfs_reclaim_extent(struct hfsmount *hfsmp, const u_long allocLimit, struct hfs_r
         */
        if (oldStartBlock < allocLimit) {
                newBlockCount = allocLimit - oldStartBlock;
-               
+
+               if (hfs_resize_debug) {
+                       int idx = extent_info->extent_index;
+                       printf ("hfs_reclaim_extent: Split straddling extent %u:(%u,%u) for %u blocks\n", idx, extent_info->extents[idx].startBlock, extent_info->extents[idx].blockCount, newBlockCount);
+               }
+
                /* If the extent belongs to a btree, check and trim 
                 * it to be multiple of the node size. 
                 */
@@ -5594,15 +5618,21 @@ hfs_reclaim_extent(struct hfsmount *hfsmp, const u_long allocLimit, struct hfs_r
                                if (remainder_blocks) {
                                        newBlockCount -= remainder_blocks;
                                        if (hfs_resize_debug) {
-                                               printf ("hfs_reclaim_extent: Fixing extent block count, node_blks=%u, old=%u, new=%u\n", node_size/hfsmp->blockSize, newBlockCount + remainder_blocks, newBlockCount);
+                                               printf ("hfs_reclaim_extent: Round-down newBlockCount to be multiple of nodeSize, node_allocblks=%u, old=%u, new=%u\n", node_size/hfsmp->blockSize, newBlockCount + remainder_blocks, newBlockCount);
                                        }
                                }
                        }
-               }
-
-               if (hfs_resize_debug) {
-                       int idx = extent_info->extent_index;
-                       printf ("hfs_reclaim_extent: Split straddling extent %u:(%u,%u) for %u blocks\n", idx, extent_info->extents[idx].startBlock, extent_info->extents[idx].blockCount, newBlockCount);
+                       /* The newBlockCount is zero because of rounding-down so that
+                        * btree nodes are not split across extents.  Therefore this
+                        * straddling extent across resize-boundary does not require 
+                        * splitting.  Skip over to relocating of complete extent.
+                        */
+                       if (newBlockCount == 0) {
+                               if (hfs_resize_debug) {
+                                       printf ("hfs_reclaim_extent: After round-down newBlockCount=0, skip split, relocate full extent\n");
+                               }
+                               goto relocate_full_extent;
+                       }
                }
 
                /* Split the extents into two parts --- the first extent lies
@@ -5618,10 +5648,12 @@ hfs_reclaim_extent(struct hfsmount *hfsmp, const u_long allocLimit, struct hfs_r
                }
                /* Split failed, so try to relocate entire extent */
                if (hfs_resize_debug) {
-                       printf ("hfs_reclaim_extent: Split straddling extent failed, reclocate full extent\n");
+                       int idx = extent_info->extent_index;
+                       printf ("hfs_reclaim_extent: Split straddling extent %u:(%u,%u) for %u blocks failed, relocate full extent\n", idx, extent_info->extents[idx].startBlock, extent_info->extents[idx].blockCount, newBlockCount);
                }
        }
 
+relocate_full_extent:
        /* At this point, the current extent requires relocation.  
         * We will try to allocate space equal to the size of the extent 
         * being relocated first to try to relocate it without splitting.  
index e48966c3cc28640e3dc375a8844e719a54fc3718..f7725b383399a677ef86397054677cea702de625 100644 (file)
@@ -4167,6 +4167,41 @@ skip_rm:
 
        tdcp->c_flag |= C_FORCEUPDATE;  // XXXdbg - force it out!
        (void) hfs_update(tdvp, 0);
+
+       /* Update the vnode's name now that the rename has completed. */
+       vnode_update_identity(fvp, tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, 
+                       tcnp->cn_hash, (VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME));
+
+       /* 
+        * At this point, we may have a resource fork vnode attached to the 
+        * 'from' vnode.  If it exists, we will want to update its name, because
+        * it contains the old name + _PATH_RSRCFORKSPEC. ("/..namedfork/rsrc").
+        *
+        * Note that the only thing we need to update here is the name attached to
+        * the vnode, since a resource fork vnode does not have a separate resource
+        * cnode -- it's still 'fcp'.
+        */
+       if (fcp->c_rsrc_vp) {
+               char* rsrc_path = NULL;
+               int len;
+
+               /* Create a new temporary buffer that's going to hold the new name */
+               MALLOC_ZONE (rsrc_path, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
+               len = snprintf (rsrc_path, MAXPATHLEN, "%s%s", tcnp->cn_nameptr, _PATH_RSRCFORKSPEC);
+               len = MIN(len, MAXPATHLEN);
+
+               /* 
+                * vnode_update_identity will do the following for us:
+                * 1) release reference on the existing rsrc vnode's name.
+                * 2) copy/insert new name into the name cache
+                * 3) attach the new name to the resource vnode
+                * 4) update the vnode's vid
+                */
+               vnode_update_identity (fcp->c_rsrc_vp, fvp, rsrc_path, len, 0, (VNODE_UPDATE_NAME | VNODE_UPDATE_CACHE));
+
+               /* Free the memory associated with the resource fork's name */
+               FREE_ZONE (rsrc_path, MAXPATHLEN, M_NAMEI);     
+       }
 out:
        if (got_cookie) {
                cat_postflight(hfsmp, &cookie, p);
index ee3d66b09df04f6113ca0ba45aaf7e0dc9a1a7c1..ba269074ac91fb5abf3b93b41e22bfc83f711fb7 100644 (file)
@@ -471,7 +471,6 @@ static int
 filt_procattach(struct knote *kn)
 {
        struct proc *p;
-       pid_t selfpid = (pid_t)0;
 
        assert(PID_MAX < NOTE_PDATAMASK);
        
@@ -483,15 +482,22 @@ filt_procattach(struct knote *kn)
                return (ESRCH);
        }
 
-       if ((kn->kn_sfflags & NOTE_EXIT) != 0) {
-               selfpid = proc_selfpid();
-               /* check for validity of NOTE_EXISTATUS */
-               if (((kn->kn_sfflags & NOTE_EXITSTATUS) != 0) && 
-                       ((p->p_ppid != selfpid) && (((p->p_lflag & P_LTRACED) == 0) || (p->p_oppid != selfpid)))) {
+       const int NoteExitStatusBits = NOTE_EXIT | NOTE_EXITSTATUS;
+
+       if ((kn->kn_sfflags & NoteExitStatusBits) == NoteExitStatusBits)
+               do {
+                       pid_t selfpid = proc_selfpid();
+
+                       if (p->p_ppid == selfpid)
+                               break;  /* parent => ok */
+
+                       if ((p->p_lflag & P_LTRACED) != 0 &&
+                           (p->p_oppid == selfpid))
+                               break;  /* parent-in-waiting => ok */
+
                        proc_rele(p);
-                       return(EACCES);
-               }
-       }
+                       return (EACCES);
+               } while (0);
 
        proc_klist_lock();
 
index 4e9f418be25464c7b0ab1827328b99122b04c4b0..e585c0d279deb5ce7fbc1a9ff2ffd6199c6fad54 100644 (file)
@@ -439,6 +439,7 @@ proc_exit(proc_t p)
        struct uthread * uth;
        pid_t pid;
        int exitval;
+       int knote_hint;
 
        uth = (struct uthread *)get_bsdthread_info(current_thread());
 
@@ -792,8 +793,12 @@ proc_exit(proc_t p)
        p->task = TASK_NULL;
        set_bsdtask_info(task, NULL);
 
-       /* exit status will be seen  by parent process */
-       proc_knote(p, NOTE_EXIT | (p->p_xstat & 0xffff));
+       knote_hint = NOTE_EXIT | (p->p_xstat & 0xffff);
+       if (p->p_oppid != 0) {
+               knote_hint |= NOTE_EXIT_REPARENTED;
+       }
+
+       proc_knote(p, knote_hint);
 
        /* mark the thread as the one that is doing proc_exit
         * no need to hold proc lock in uthread_free
index 13e4c97dbe02c6d8c58a58f3a8ac4f9a891a21c2..963493a11d10d5cc6c1fd371f37569a2218eccac 100644 (file)
@@ -579,35 +579,44 @@ lf_setlock(struct lockf *lock)
 #endif /* LOCKF_DEBUGGING */
                error = msleep(lock, &vp->v_lock, priority, lockstr, 0);
 
-               if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
-                       if ((block = lf_getblock(lock, -1))) {
-                               lf_move_blocked(block, lock);
-                       }
-               }
-
                if (error == 0 && (lock->lf_flags & F_ABORT) != 0)
                        error = EBADF;
 
-               if (error) {    /* XXX */
+               if (lock->lf_next) {
                        /*
-                        * We may have been awakened by a signal and/or by a
-                        * debugger continuing us (in which cases we must remove
-                        * ourselves from the blocked list) and/or by another
-                        * process releasing a lock (in which case we have
-                        * already been removed from the blocked list and our
-                        * lf_next field set to NOLOCKF).
+                        * lf_wakelock() always sets wakelock->lf_next to
+                        * NULL before a wakeup; so we've been woken early
+                        * - perhaps by a debugger, signal or other event.
+                        *
+                        * Remove 'lock' from the block list (avoids double-add
+                        * in the spurious case, which would create a cycle)
                         */
-                       if (lock->lf_next) {
-                               TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
-                               lock->lf_next = NOLOCKF;
+                       TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
+                       lock->lf_next = NULL;
+
+                       if (error == 0) {
+                               /*
+                                * If this was a spurious wakeup, retry
+                                */
+                               printf("%s: spurious wakeup, retrying lock\n",
+                                   __func__);
+                               continue;
                        }
+               }
+
+               if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
+                       if ((block = lf_getblock(lock, -1)) != NULL)
+                               lf_move_blocked(block, lock);
+               }
+
+               if (error) {
                        if (!TAILQ_EMPTY(&lock->lf_blkhd))
                                lf_wakelock(lock, TRUE);
-                         
                        FREE(lock, M_LOCKF);
                        return (error);
-               }       /* XXX */
+               }
        }
+
        /*
         * No blocks!!  Add the lock.  Note that we will
         * downgrade or upgrade any overlapping locks this
@@ -1189,6 +1198,10 @@ lf_wakelock(struct lockf *listhead, boolean_t force_all)
                                struct lockf *tlock;
 
                                TAILQ_FOREACH(tlock, &wakelock->lf_blkhd, lf_block) {
+                                       if (TAILQ_NEXT(tlock, lf_block) == tlock) {
+                                               /* See rdar://10887303 */
+                                               panic("cycle in wakelock list");
+                                       }
                                        tlock->lf_next = wakelock;
                                }
                        }
index 0112f0c024352e56582dc085f35de3742177e8e3..3edca9510c2df80355dcad9aad27ff6e922cf782 100644 (file)
@@ -2708,8 +2708,7 @@ m_clalloc(const u_int32_t num, const int wait, const u_int32_t bufsize)
 
        for (i = 0; i < numpages; i++, page += NBPG) {
                ppnum_t offset = ((char *)page - (char *)mbutl) / NBPG;
-               ppnum_t new_page = pmap_find_phys(kernel_pmap,
-                   (vm_offset_t)page);
+               ppnum_t new_page = pmap_find_phys(kernel_pmap, page);
 
                /*
                 * In the case of no mapper being available the following
@@ -2717,7 +2716,10 @@ m_clalloc(const u_int32_t num, const int wait, const u_int32_t bufsize)
                 * mapper the appropriate I/O page is returned.
                 */
                VERIFY(offset < mcl_pages);
-               new_page = IOMapperInsertPage(mcl_paddr_base, offset, new_page);
+               if (mcl_paddr_base) {
+                   bzero((void *)(uintptr_t) page, page_size);
+                   new_page = IOMapperInsertPage(mcl_paddr_base, offset, new_page);
+               }
                mcl_paddr[offset] = new_page << PGSHIFT;
 
                /* Pattern-fill this fresh page */
index 55ebb0e38c06768c068b1f679dce0ad087d86dcf..0f127c7e3f2cd256f3dc92605b486f503a882322 100644 (file)
@@ -513,7 +513,7 @@ tcp_lro_process_pkt(struct mbuf *lro_mb, struct ip *ip_hdr,
 
        case TCP_LRO_COALESCE:
                if ((payload_len != 0) && (unknown_tcpopts == 0) && 
-                       (tcpflags == 0) && (ecn == 0) && (to.to_flags & TOF_TS)) { 
+                       (tcpflags == 0) && (ecn != IPTOS_ECN_CE) && (to.to_flags & TOF_TS)) { 
                        tcp_lro_coalesce(flow_id, lro_mb, tcp_hdr, payload_len,
                                drop_hdrlen, &to, 
                                (to.to_flags & TOF_TS) ? (u_int32_t *)(void *)(optp + 4) : NULL,
index 8a9eeb9cf07bf4a5ca08dc405f66ce1dcde8a745..83a205319e79b9cc92ef47eafecf7e7e2b0e6b3e 100644 (file)
@@ -299,7 +299,7 @@ tcp_output(struct tcpcb *tp)
        u_int16_t       socket_id = get_socket_id(so);
        int so_options = so->so_options;
        struct rtentry *rt;
-       u_int32_t basertt, svc_flags = 0;
+       u_int32_t basertt, svc_flags = 0, allocated_len;
        u_int32_t lro_ackmore = (tp->t_lropktlen != 0) ? 1 : 0;
        struct mbuf *mnext = NULL;
        int sackoptlen = 0;
@@ -825,7 +825,10 @@ after_sack_rexmit:
                        goto send;
                }
                if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {
-                       tp->t_flags &= ~TF_MAXSEGSNT;
+                       if (len >= tp->t_maxseg)
+                               tp->t_flags |= TF_MAXSEGSNT;
+                       else
+                               tp->t_flags &= ~TF_MAXSEGSNT;
                        goto send;
                }
                if (sack_rxmit)
@@ -1275,15 +1278,8 @@ send:
                }
        }
 
-/*#ifdef DIAGNOSTIC*/
-#if INET6
        if (max_linkhdr + hdrlen > MCLBYTES)
                panic("tcphdr too big");
-#else
-       if (max_linkhdr + hdrlen > MHLEN)
-               panic("tcphdr too big");
-#endif
-/*#endif*/
 
        /* Check if there is enough data in the send socket
         * buffer to start measuring bw 
@@ -1314,7 +1310,8 @@ send:
                        tcpstat.tcps_sndrexmitpack++;
                        tcpstat.tcps_sndrexmitbyte += len;
                        if (nstat_collect) {
-                               nstat_route_tx(tp->t_inpcb->inp_route.ro_rt, 1, len, NSTAT_TX_FLAG_RETRANSMIT);
+                               nstat_route_tx(tp->t_inpcb->inp_route.ro_rt, 1, 
+                                       len, NSTAT_TX_FLAG_RETRANSMIT);
                                locked_add_64(&tp->t_inpcb->inp_stat->txpackets, 1);
                                locked_add_64(&tp->t_inpcb->inp_stat->txbytes, len);
                                tp->t_stat.txretransmitbytes += len;
@@ -1327,18 +1324,6 @@ send:
                                locked_add_64(&tp->t_inpcb->inp_stat->txbytes, len);
                        }
                }
-#ifdef notyet
-               if ((m = m_copypack(so->so_snd.sb_mb, off,
-                   (int)len, max_linkhdr + hdrlen)) == 0) {
-                       error = ENOBUFS;
-                       goto out;
-               }
-               /*
-                * m_copypack left space for our hdr; use it.
-                */
-               m->m_len += hdrlen;
-               m->m_data -= hdrlen;
-#else
                /*
                 * try to use the new interface that allocates all 
                 * the necessary mbuf hdrs under 1 mbuf lock and 
@@ -1352,11 +1337,13 @@ send:
                 * data area (no cluster attached)
                 */
                m = NULL;
-#if INET6
+
+               /* minimum length we are going to allocate */
+               allocated_len = MHLEN;
                if (MHLEN < hdrlen + max_linkhdr) {
-                       MGETHDR(m, M_DONTWAIT, MT_HEADER);      /* MAC-OK */
+                       MGETHDR(m, M_DONTWAIT, MT_HEADER);
                        if (m == NULL) {
-                               error = ENOBUFS;
+                               error = ENOBUFS;
                                goto out;
                        }
                        MCLGET(m, M_DONTWAIT);
@@ -1367,13 +1354,14 @@ send:
                        }
                        m->m_data += max_linkhdr;
                        m->m_len = hdrlen;
+                       allocated_len = MCLBYTES;
                }
-#endif
-               if (len <= MHLEN - hdrlen - max_linkhdr) {
+               if (len <= allocated_len - hdrlen - max_linkhdr) {
                        if (m == NULL) {
-                               MGETHDR(m, M_DONTWAIT, MT_HEADER);      /* MAC-OK */
+                               VERIFY(allocated_len <= MHLEN);
+                               MGETHDR(m, M_DONTWAIT, MT_HEADER);
                                if (m == NULL) {
-                                       error = ENOBUFS;
+                                       error = ENOBUFS;
                                        goto out;
                                }
                                m->m_data += max_linkhdr;
@@ -1430,7 +1418,6 @@ send:
                                m->m_len = hdrlen;
                        }
                }
-#endif
                /*
                 * If we're sending everything we've got, set PUSH.
                 * (This will keep happy those implementations which only
@@ -1454,13 +1441,15 @@ send:
                        error = ENOBUFS;
                        goto out;
                }
-#if INET6
-               if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
-                   MHLEN >= hdrlen) {
-                       MH_ALIGN(m, hdrlen);
-               } else
-#endif
-                       m->m_data += max_linkhdr;
+               if (MHLEN < (hdrlen + max_linkhdr)) {
+                       MCLGET(m, M_DONTWAIT);
+                       if ((m->m_flags & M_EXT) == 0) {
+                               m_freem(m);
+                               error = ENOBUFS;
+                               goto out;
+                       }
+               }
+               m->m_data += max_linkhdr;
                m->m_len = hdrlen;
        }
        m->m_pkthdr.rcvif = 0;
index 355d4f0d5b8d3a7b35f75784dcd3e915717f39c2..531b37fa9e08ead34e37384e5add75eda290437d 100644 (file)
@@ -414,7 +414,7 @@ tcp_init()
                _max_protohdr = TCP_MINPROTOHDR;
                _max_protohdr = max_protohdr;   /* round it up */
        }
-       if (max_linkhdr + max_protohdr > MHLEN)
+       if (max_linkhdr + max_protohdr > MCLBYTES)
                panic("tcp_init");
 #undef TCP_MINPROTOHDR
 
index 9fcc992cdab38469d443f5221a55cfc59fc2c950..118aa245ad5fb005fdc50ce8888edf8f27e318ee 100644 (file)
@@ -307,6 +307,9 @@ ip6_init()
        int i;
        struct timeval tv;
 
+       _CASSERT((sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr)) <= 
+               _MHLEN);
+
        PE_parse_boot_argn("net.inet6.ip6.scopedroute", &ip6_doscopedroute,
            sizeof (ip6_doscopedroute));
 
index d1d59cd6fb56a6d00cd3e0b00b62fae42cb50969..ffbdcf88ea40764cc15a264aeaa87cf0ebf31ad9 100644 (file)
@@ -583,7 +583,9 @@ key_init(void)
 {
 
        int i;
-       
+
+       _CASSERT(PFKEY_ALIGN8(sizeof(struct sadb_msg)) <= _MHLEN);
+
        sadb_mutex_grp_attr = lck_grp_attr_alloc_init();
        sadb_mutex_grp = lck_grp_alloc_init("sadb", sadb_mutex_grp_attr);
        sadb_mutex_attr = lck_attr_alloc_init();
index 0dda075ac9e7ec31c1a60f7c4990558ca6aed8ea..eb0a134fdff7fca6d8ec54373325449543adf2eb 100644 (file)
@@ -65,6 +65,9 @@ extern "C" {
 #define CP_READ_ACCESS         0x1
 #define CP_WRITE_ACCESS 0x2
 
+/* 
+ * Check for this version when deciding to enable features
+ */
 #define CONTENT_PROTECTION_XATTR_NAME  "com.apple.system.cprotect"
 #define CP_NEW_MAJOR_VERS 4
 #define CP_PREV_MAJOR_VERS 2
index d22d5efb2050c3e3afda7eeceb97dd88c066ee3f..47fb6dafe8fb2fe7d8b6829fd6a662287064fbd0 100644 (file)
@@ -261,6 +261,11 @@ struct kevent64_s {
 #define        NOTE_PDATAMASK  0x000fffff              /* mask for pid/signal */
 #define        NOTE_PCTRLMASK  (~NOTE_PDATAMASK)
 
+/*
+ * If NOTE_EXITSTATUS is present, provide additional info about exiting process.
+ */
+#define NOTE_EXIT_REPARENTED   0x00080000      /* exited while reparented */
+
 /*
  * data/hint fflags for EVFILT_VM, shared with userspace.
  */
index 179a264d4d9c8ac5868190ce14a90d447bddb295..52cdb65aff904a43b60a72d8f0fa1a26b0db5fa8 100644 (file)
@@ -112,6 +112,8 @@ typedef struct fs_event_watcher {
     uint32_t     num_dropped;
     uint64_t     max_event_id;
     struct fsevent_handle *fseh;
+    pid_t        pid;
+    char         proc_name[(2 * MAXCOMLEN) + 1];
 } fs_event_watcher;
 
 // fs_event_watcher flags
@@ -119,7 +121,7 @@ typedef struct fs_event_watcher {
 #define WATCHER_CLOSING                0x0002
 #define WATCHER_WANTS_COMPACT_EVENTS   0x0004
 #define WATCHER_WANTS_EXTENDED_INFO    0x0008
-
+#define WATCHER_APPLE_SYSTEM_SERVICE   0x0010   // fseventsd, coreservicesd, mds
 
 #define MAX_WATCHERS  8
 static fs_event_watcher *watcher_table[MAX_WATCHERS];
@@ -164,6 +166,47 @@ __private_extern__ void qsort(
     int (*)(const void *, const void *));
 
 
+
+/* From kdp_udp.c + user mode Libc - this ought to be in a library */
+static char *
+strnstr(char *s, const char *find, size_t slen)
+{
+  char c, sc;
+  size_t len;
+  
+  if ((c = *find++) != '\0') {
+    len = strlen(find);
+    do {
+      do {
+        if ((sc = *s++) == '\0' || slen-- < 1)
+          return (NULL);
+      } while (sc != c);
+      if (len > slen)
+        return (NULL);
+    } while (strncmp(s, find, len) != 0);
+    s--;
+  }
+  return (s);
+}
+
+static int
+is_ignored_directory(const char *path) {
+
+    if (!path) {
+      return 0;
+    }
+
+#define IS_TLD(x) strnstr((char *) path, x, MAXPATHLEN) 
+    if (IS_TLD("/.Spotlight-V100/") ||
+        IS_TLD("/.MobileBackups/") || 
+        IS_TLD("/Backups.backupdb/")) {
+        return 1;
+    }
+#undef IS_TLD
+    
+    return 0;
+}
+
 static void
 fsevents_internal_init(void)
 {
@@ -278,40 +321,6 @@ need_fsevent(int type, vnode_t vp)
     return 1;
 }
 
-static int
-prefix_match_len(const char *str1, const char *str2)
-{
-    int len=0;
-
-    while(*str1 && *str2 && *str1 == *str2) {
-       len++;
-       str1++;
-       str2++;
-    }
-
-    if (*str1 == '\0' && *str2 == '\0') {
-       len++;
-    }
-
-    return len;
-}
-
-
-struct history_item {
-    kfs_event *kfse;
-    kfs_event *oldest_kfse;
-    int        counter;
-};
-
-static int
-compare_history_items(const void *_a, const void *_b)
-{
-    const struct history_item *a = (const struct history_item *)_a;
-    const struct history_item *b = (const struct history_item *)_b;
-
-    // we want a descending order
-    return (b->counter - a->counter);
-}
 
 #define is_throw_away(x)  ((x) == FSE_STAT_CHANGED || (x) == FSE_CONTENT_MODIFIED)
 
@@ -340,120 +349,9 @@ compare_history_items(const void *_a, const void *_b)
 #define KFSE_RECYCLED   0x0004
 
 int num_dropped         = 0;
-int num_combined_events = 0;
-int num_added_to_parent = 0;
 int num_parent_switch   = 0;
 int num_recycled_rename = 0;
 
-//
-// NOTE: you must call lock_fs_event_list() before calling
-//       this function.
-//
-static kfs_event *
-find_an_event(const char *str, int len, kfs_event *do_not_reuse, int *reuse_type, int *longest_match_len)
-{
-    kfs_event *kfse, *best_kfse=NULL;
-
-// this seems to be enough to find most duplicate events for the same vnode
-#define MAX_HISTORY  12 
-    struct history_item history[MAX_HISTORY];
-    int           i;
-
-    *longest_match_len = 0;
-    *reuse_type = 0;
-    
-    memset(history, 0, sizeof(history));
-
-    //
-    // now walk the list of events and try to find the best match
-    // for this event.  if we have a vnode, we look for an event
-    // that already references the vnode.  if we don't find one
-    // we'll also take the parent of this vnode (in which case it
-    // will be marked as having dropped events within it).
-    //
-    // if we have a string we look for the longest match on the
-    // path we have.
-    //
-
-    LIST_FOREACH(kfse, &kfse_list_head, kevent_list) {
-       int match_len;
-
-       //
-       // don't look at events that are still in the process of being
-       // created, have a null vnode ptr or rename/exchange events.
-       //
-       if (   (kfse->flags & KFSE_BEING_CREATED) || kfse->type == FSE_RENAME || kfse->type == FSE_EXCHANGE) {
-
-           continue;
-       }
-       
-       if (str != NULL) {
-           if (kfse->len != 0 && kfse->str != NULL) {
-               match_len = prefix_match_len(str, kfse->str);
-               if (match_len > *longest_match_len) {
-                   best_kfse = kfse;
-                   *longest_match_len = match_len;
-               }
-           }
-       }
-
-       if (kfse == do_not_reuse) {
-           continue;
-       }
-
-       for(i=0; i < MAX_HISTORY; i++) {
-           if (history[i].kfse == NULL) {
-               break;
-           }
-
-           //
-           // do a quick check to see if we've got two simple events
-           // that we can cheaply combine.  if the event we're looking
-           // at and one of the events in the history table are for the
-           // same path then we'll just mark the newer event as combined
-           // and recyle the older event.
-           //
-           if (history[i].kfse->str == kfse->str) {
-
-               OSBitOrAtomic16(KFSE_COMBINED_EVENTS, &kfse->flags);
-               *reuse_type = KFSE_RECYCLED;
-               history[i].kfse->flags |= KFSE_RECYCLED_EVENT;
-               return history[i].kfse;
-           }
-       }
-
-       if (i < MAX_HISTORY && history[i].kfse == NULL) {
-           history[i].kfse = kfse;
-           history[i].counter = 1;
-       } else if (i >= MAX_HISTORY) {
-           qsort(history, MAX_HISTORY, sizeof(struct history_item), compare_history_items);
-
-           // pluck off the lowest guy if he's only got a count of 1
-           if (history[MAX_HISTORY-1].counter == 1) {
-               history[MAX_HISTORY-1].kfse = kfse;
-           }
-       }
-    }
-
-    
-    if (str != NULL && best_kfse) {
-       if (*longest_match_len <= 1) {
-           // if the best match we had was "/" then basically we're toast...
-           *longest_match_len = 0;
-           best_kfse = NULL;
-       } else if (*longest_match_len != len) {
-           OSBitOrAtomic16(KFSE_CONTAINS_DROPPED_EVENTS, &best_kfse->flags);
-           *reuse_type = KFSE_COLLAPSED;
-       } else {
-           OSBitOrAtomic16(KFSE_COMBINED_EVENTS, &best_kfse->flags);
-           *reuse_type = KFSE_COMBINED;
-       }
-    }
-
-    return best_kfse;
-}
-
-
 static struct timeval last_print;
 
 //
@@ -480,14 +378,13 @@ int
 add_fsevent(int type, vfs_context_t ctx, ...) 
 {
     struct proc             *p = vfs_context_proc(ctx);
-    int               i, arg_type, skip_init=0, longest_match_len, ret;
+    int               i, arg_type, ret;
     kfs_event        *kfse, *kfse_dest=NULL, *cur;
     fs_event_watcher *watcher;
     va_list           ap;
     int              error = 0, did_alloc=0, need_event_unlock = 0;
     dev_t             dev = 0;
     uint64_t          now, elapsed;
-    int               reuse_type = 0;
     char             *pathbuff=NULL;
     int               pathbuff_len;
 
@@ -598,55 +495,6 @@ add_fsevent(int type, vfs_context_t ctx, ...)
 
 
     if (kfse == NULL) {        // yikes! no free events
-       int len=0;
-       char *str;
-       
-       //
-       // Figure out what kind of reference we have to the
-       // file in this event.  This helps us find an event
-       // to combine/collapse into to make room.
-       //
-       // If we have a rename or exchange event then we
-       // don't want to go through the normal path, we
-       // want to "steal" an event instead (which is what
-       // find_an_event() will do if str is null).
-       //
-       arg_type = va_arg(ap, int32_t);
-       if (type == FSE_RENAME || type == FSE_EXCHANGE) {
-           str = NULL;
-       } else if (arg_type == FSE_ARG_STRING) {
-           len = va_arg(ap, int32_t);
-           str = va_arg(ap, char *);
-       } else if (arg_type == FSE_ARG_VNODE) {
-           struct vnode *vp;
-
-           vp  = va_arg(ap, struct vnode *);
-           pathbuff = get_pathbuff();
-           pathbuff_len = MAXPATHLEN;
-           if (vn_getpath(vp, pathbuff, &pathbuff_len) != 0 || pathbuff[0] == '\0') {
-               release_pathbuff(pathbuff);
-               pathbuff = NULL;
-           }
-           str = pathbuff;
-       } else {
-           str = NULL;
-       }
-
-       //
-       // This will go through all events and find one that we
-        // can combine with (hopefully), or "collapse" into (i.e
-       // it has the same parent) or in the worst case we have
-       // to "recycle" an event which means that it will combine
-       // two other events and return us the now unused event.
-       // failing all that, find_an_event() could still return
-       // null and if it does then we have a catastrophic dropped
-       // events scenario.
-       //
-       kfse = find_an_event(str, len, NULL, &reuse_type, &longest_match_len);
-
-       if (kfse == NULL) {
-         bail_early:
-           
            unlock_fs_event_list();
            lock_watch_table();
 
@@ -681,10 +529,11 @@ add_fsevent(int type, vfs_context_t ctx, ...)
                            continue;
                        }
                        
-                       printf("add_fsevent: watcher %p: num dropped %d rd %4d wr %4d q_size %4d flags 0x%x\n",
-                           watcher_table[ii], watcher_table[ii]->num_dropped,
-                           watcher_table[ii]->rd, watcher_table[ii]->wr,
-                           watcher_table[ii]->eventq_size, watcher_table[ii]->flags);
+                       printf("add_fsevent: watcher %s %p: rd %4d wr %4d q_size %4d flags 0x%x\n",
+                              watcher_table[ii]->proc_name,
+                              watcher_table[ii],
+                              watcher_table[ii]->rd, watcher_table[ii]->wr,
+                              watcher_table[ii]->eventq_size, watcher_table[ii]->flags);
                    }
 
                    last_print = current_tv;
@@ -698,223 +547,13 @@ add_fsevent(int type, vfs_context_t ctx, ...)
                release_pathbuff(pathbuff);
                pathbuff = NULL;
            }
-
            return ENOSPC;
        }
 
-       if ((type == FSE_RENAME || type == FSE_EXCHANGE) && reuse_type != KFSE_RECYCLED) {
-           panic("add_fsevent: type == %d but reuse type == %d!\n", type, reuse_type);
-       } else if ((kfse->type == FSE_RENAME || kfse->type == FSE_EXCHANGE) && kfse->dest == NULL) {
-           panic("add_fsevent: bogus kfse %p (type %d, but dest is NULL)\n", kfse, kfse->type);
-       } else if (kfse->type == FSE_RENAME || kfse->type == FSE_EXCHANGE) {
-           panic("add_fsevent: we should never re-use rename events (kfse %p reuse type %d)!\n", kfse, reuse_type);
-       }
-
-       if (reuse_type == KFSE_COLLAPSED) {
-           if (str) {
-               const char *tmp_ptr, *new_str;
-               
-               //
-               // if we collapsed and have a string we have to chop off the
-               // tail component of the pathname to get the parent.
-               //
-               // NOTE: it is VERY IMPORTANT that we leave the trailing slash
-               //       on the pathname.  user-level code depends on this.
-               //
-               if (str[0] == '\0' || longest_match_len <= 1) {
-                   printf("add_fsevent: strange state (str %s / longest_match_len %d)\n", str, longest_match_len);
-                   if (longest_match_len < 0) {
-                       panic("add_fsevent: longest_match_len %d\n", longest_match_len);
-                   }
-               }
-               // chop off the tail component if it's not the
-               // first character...
-               if (longest_match_len > 1) {
-                   str[longest_match_len] = '\0';
-               } else if (longest_match_len == 0) {
-                   longest_match_len = 1;
-               }
-
-               new_str = vfs_addname(str, longest_match_len, 0, 0);
-               if (new_str == NULL || new_str[0] == '\0') {
-                   panic("add_fsevent: longest match is strange (new_str %p).\n", new_str);
-               }
-               
-               lck_rw_lock_exclusive(&event_handling_lock);
-
-               kfse->len      = longest_match_len;
-               tmp_ptr        = kfse->str;
-               kfse->str = new_str;
-               kfse->ino      = 0;
-               kfse->mode     = 0;
-               kfse->uid      = 0;
-               kfse->gid      = 0;
-               
-               lck_rw_unlock_exclusive(&event_handling_lock);
-               
-               vfs_removename(tmp_ptr);
-           } else {
-               panic("add_fsevent: don't have a vnode or a string pointer (kfse %p)\n", kfse);
-           }
-       }
-
-       if (reuse_type == KFSE_RECYCLED && (type == FSE_RENAME || type == FSE_EXCHANGE)) {
-           
-           // if we're recycling this kfse and we have a rename or
-           // exchange event then we need to also get an event for
-           // kfse_dest. 
-           //
-           if (did_alloc) {
-               // only happens if we allocated one but then failed
-               // for kfse_dest (and thus free'd the first one we
-               // allocated)
-               kfse_dest = zalloc_noblock(event_zone);
-               if (kfse_dest != NULL) {
-                   memset(kfse_dest, 0, sizeof(kfs_event));
-                   kfse_dest->refcount = 1;
-                   OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse_dest->flags);
-               } else {
-                   did_alloc = 0;
-               }
-           }
-
-           if (kfse_dest == NULL) {
-               int dest_reuse_type, dest_match_len;
-               
-               kfse_dest = find_an_event(NULL, 0, kfse, &dest_reuse_type, &dest_match_len);
-               
-               if (kfse_dest == NULL) {
-                   // nothing we can do... gotta bail out
-                   goto bail_early;
-               }
-
-               if (dest_reuse_type != KFSE_RECYCLED) {
-                   panic("add_fsevent: type == %d but dest_reuse type == %d!\n", type, dest_reuse_type);
-               }
-           }
-       }
-
-
-       //
-       // Here we check for some fast-path cases so that we can
-       // jump over the normal initialization and just get on
-       // with delivering the event.  These cases are when we're
-       // combining/collapsing an event and so basically there is
-       // no more work to do (aside from a little book-keeping)
-       //
-       if (str && kfse->len != 0) {
-           kfse->abstime = now;
-           OSAddAtomic(1, &kfse->refcount);
-           skip_init = 1;
-
-           if (reuse_type == KFSE_COMBINED) {
-               num_combined_events++;
-           } else if (reuse_type == KFSE_COLLAPSED) {
-               num_added_to_parent++;
-           }
-       } else if (reuse_type != KFSE_RECYCLED) {
-           panic("add_fsevent: I'm so confused! (reuse_type %d str %p kfse->len %d)\n",
-                 reuse_type, str, kfse->len);
-       }
-
-       va_end(ap);
-
-
-       if (skip_init) {
-           if (kfse->refcount < 1) {
-               panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount);
-           }
-
-           last_event_ptr = kfse;
-           unlock_fs_event_list();
-           goto normal_delivery;
-           
-       } else if (reuse_type == KFSE_RECYCLED || reuse_type == KFSE_COMBINED) {
-
-           //
-           // If we're here we have to clear out the kfs_event(s)
-           // that we were given by find_an_event() and set it
-           // up to be re-filled in by the normal code path.
-           //
-           va_start(ap, ctx);
-
-           need_event_unlock = 1;
-           lck_rw_lock_exclusive(&event_handling_lock);
-
-           OSAddAtomic(1, &kfse->refcount);
-
-           if (kfse->refcount < 1) {
-               panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount);
-           }
-
-           if (kfse->len == 0) {
-               panic("%s:%d: no more fref.vp\n", __FILE__, __LINE__);
-               // vnode_rele_ext(kfse->fref.vp, O_EVTONLY, 0);
-           } else {
-               vfs_removename(kfse->str);
-               kfse->len = 0;
-           }
-           kfse->str = NULL;
-
-           if (kfse->kevent_list.le_prev != NULL) {
-               num_events_outstanding--;
-               if (kfse->type == FSE_RENAME) {
-                   num_pending_rename--;
-               }
-               LIST_REMOVE(kfse, kevent_list);
-               memset(&kfse->kevent_list, 0, sizeof(kfse->kevent_list));
-           }
-
-           kfse->flags = 0 | KFSE_RECYCLED_EVENT;
-           
-           if (kfse_dest) {
-               OSAddAtomic(1, &kfse_dest->refcount);
-               kfse_dest->flags = 0 | KFSE_RECYCLED_EVENT;
-
-               if (did_alloc == 0) {
-                   if (kfse_dest->len == 0) {
-                       panic("%s:%d: no more fref.vp\n", __FILE__, __LINE__);
-                       // vnode_rele_ext(kfse_dest->fref.vp, O_EVTONLY, 0);
-                   } else {
-                       vfs_removename(kfse_dest->str);
-                       kfse_dest->len = 0;
-                   }
-                   kfse_dest->str = NULL;
-
-                   if (kfse_dest->kevent_list.le_prev != NULL) {
-                       num_events_outstanding--;
-                       LIST_REMOVE(kfse_dest, kevent_list);
-                       memset(&kfse_dest->kevent_list, 0, sizeof(kfse_dest->kevent_list));
-                   }
-
-                   if (kfse_dest->dest) {
-                       panic("add_fsevent: should never recycle a rename event! kfse %p\n", kfse);
-                   }
-               }
-           }
-
-           OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse->flags);
-           if (kfse_dest) {
-               OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse_dest->flags);
-           }
-
-           goto process_normally;
-       }
-    }
-
-    if (reuse_type != 0) {
-       panic("fsevents: we have a reuse_type (%d) but are about to clear out kfse %p\n", reuse_type, kfse);
-    }
-
-    //
-    // we only want to do this for brand new events, not
-    // events which have been recycled.
-    //
     memset(kfse, 0, sizeof(kfs_event));
     kfse->refcount = 1;
     OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse->flags);
 
-  process_normally:
     last_event_ptr = kfse;
     kfse->type     = type;
     kfse->abstime  = now;
@@ -1103,7 +742,6 @@ add_fsevent(int type, vfs_context_t ctx, ...)
        lck_rw_unlock_shared(&event_handling_lock);
     }
     
-  normal_delivery:
     // unlock this here so we don't hold it across the
     // event delivery loop.
     if (need_event_unlock) {
@@ -1270,7 +908,6 @@ release_event_ref(kfs_event *kfse)
     }
 }
 
-
 static int
 add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_event_watcher **watcher_out, void *fseh)
 {
@@ -1304,9 +941,17 @@ add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_even
     watcher->num_readers  = 0;
     watcher->max_event_id = 0;
     watcher->fseh         = fseh;
+    watcher->pid          = proc_selfpid();
+    proc_selfname(watcher->proc_name, sizeof(watcher->proc_name));
 
     watcher->num_dropped  = 0;      // XXXdbg - debugging
 
+    if (!strncmp(watcher->proc_name, "fseventsd", sizeof(watcher->proc_name)) ||
+       !strncmp(watcher->proc_name, "coreservicesd", sizeof(watcher->proc_name)) ||
+       !strncmp(watcher->proc_name, "mds", sizeof(watcher->proc_name))) {
+       watcher->flags |= WATCHER_APPLE_SYSTEM_SERVICE;
+    }
+
     lock_watch_table();
 
     // now update the global list of who's interested in
@@ -1375,7 +1020,9 @@ remove_watcher(fs_event_watcher *target)
        unlock_watch_table();
            
        while (watcher->num_readers > 1 && counter++ < 5000) {
+           lock_watch_table();
            fsevents_wakeup(watcher);      // in case they're asleep
+           unlock_watch_table();
            
            tsleep(watcher, PRIBIO, "fsevents-close", 1);
        }
@@ -1385,22 +1032,23 @@ remove_watcher(fs_event_watcher *target)
        }
 
        // drain the event_queue 
-       while(watcher->rd != watcher->wr) {
-           lck_rw_lock_shared(&event_handling_lock);
 
+       while(watcher->rd != watcher->wr) {
+           lck_rw_lock_exclusive(&event_handling_lock);
            kfse = watcher->event_queue[watcher->rd];
-           if (kfse->type == FSE_INVALID || kfse->refcount < 1) {
-               panic("remove_watcher: bogus kfse %p during cleanup (type %d refcount %d rd %d wr %d)\n", kfse, kfse->type, kfse->refcount, watcher->rd, watcher->wr);
+           if (!kfse || kfse->type == FSE_INVALID || kfse->refcount < 1) {
+               lck_rw_unlock_exclusive(&event_handling_lock);
+               break;
            }
-
-           lck_rw_unlock_shared(&event_handling_lock);
-           
+           watcher->event_queue[watcher->rd] = NULL;
            watcher->rd = (watcher->rd+1) % watcher->eventq_size;
-
+           OSSynchronizeIO();
            if (kfse != NULL) {
                release_event_ref(kfse);
            }
+           lck_rw_unlock_exclusive(&event_handling_lock);
        }
+
            
        if (watcher->event_list) {
            FREE(watcher->event_list, M_TEMP);
@@ -1493,17 +1141,47 @@ watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse)
     // send any pending events if no more are received in the next 
     // EVENT_DELAY_IN_MS milli-seconds.
     //
-    if (   (watcher->rd < watcher->wr && (watcher->wr - watcher->rd) > MAX_NUM_PENDING)
-       || (watcher->rd > watcher->wr && (watcher->wr + watcher->eventq_size - watcher->rd) > MAX_NUM_PENDING)) {
+       int32_t num_pending = 0;
+       if (watcher->rd < watcher->wr) {
+               num_pending = watcher->wr - watcher->rd;
+       }
 
-       fsevents_wakeup(watcher);
+       if (watcher->rd > watcher->wr) {
+               num_pending = watcher->wr + watcher->eventq_size - watcher->rd;
+       }
 
-    } else if (timer_set == 0) {
+       if (num_pending > (watcher->eventq_size*3/4) && !(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE)) {
+           /* Non-Apple Service is falling behind, start dropping events for this process */
 
-       schedule_event_wakeup();
-    }
-
-    return 0;
+           lck_rw_lock_exclusive(&event_handling_lock);            
+           while (watcher->rd != watcher->wr) {
+             kfse = watcher->event_queue[watcher->rd];
+             if (!kfse || kfse->type == FSE_INVALID || kfse->refcount < 1) {
+                 lck_rw_unlock_exclusive(&event_handling_lock);
+                 break;
+             }
+             watcher->event_queue[watcher->rd] = NULL;         
+             watcher->rd = (watcher->rd+1) % watcher->eventq_size;
+             OSSynchronizeIO();
+             if (kfse != NULL) {
+               release_event_ref(kfse);
+             }
+           }
+           lck_rw_unlock_exclusive(&event_handling_lock);
+           
+           printf("fsevents: watcher failing behind: %s (pid: %d) rd: %4d wr: %4d q_size: %4d flags: 0x%x\n",
+                  watcher->proc_name, watcher->pid, watcher->rd, watcher->wr,
+                  watcher->eventq_size, watcher->flags);
+           
+           watcher->flags |= WATCHER_DROPPED_EVENTS;
+           fsevents_wakeup(watcher);
+       } else if (num_pending > MAX_NUM_PENDING) {
+           fsevents_wakeup(watcher);
+       } else if (timer_set == 0) {
+           schedule_event_wakeup();
+       }
+       
+       return 0;
 }
 
 static int
@@ -1740,6 +1418,7 @@ fmod_watch(fs_event_watcher *watcher, struct uio *uio)
     user_ssize_t      last_full_event_resid;
     kfs_event        *kfse;
     uint16_t          tmp16;
+    int               skipped;
 
     last_full_event_resid = uio_resid(uio);
 
@@ -1758,6 +1437,7 @@ fmod_watch(fs_event_watcher *watcher, struct uio *uio)
        return EAGAIN;
     }
 
+ restart_watch:
     if (watcher->rd == watcher->wr) {
        if (watcher->flags & WATCHER_CLOSING) {
            OSAddAtomic(-1, &watcher->num_readers);
@@ -1799,6 +1479,7 @@ fmod_watch(fs_event_watcher *watcher, struct uio *uio)
        watcher->flags &= ~WATCHER_DROPPED_EVENTS;
     }
 
+    skipped = 0;
     while (uio_resid(uio) > 0 && watcher->rd != watcher->wr) {
        if (watcher->flags & WATCHER_CLOSING) {
            break;
@@ -1812,12 +1493,21 @@ fmod_watch(fs_event_watcher *watcher, struct uio *uio)
        lck_rw_lock_shared(&event_handling_lock);
 
        kfse = watcher->event_queue[watcher->rd];
-       if (kfse->type == FSE_INVALID || kfse->refcount < 1) {
-           panic("fmod_watch: someone left me a bogus kfse %p (type %d refcount %d rd %d wr %d)\n", kfse, kfse->type, kfse->refcount, watcher->rd, watcher->wr);
+       if (!kfse || kfse->type == FSE_INVALID || kfse->refcount < 1) {
+         lck_rw_unlock_shared(&event_handling_lock);
+         break;
        }
 
        if (watcher->event_list[kfse->type] == FSE_REPORT && watcher_cares_about_dev(watcher, kfse->dev)) {
 
+         if (!(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE) & is_ignored_directory(kfse->str)) {
+           // If this is not an Apple System Service, skip specified directories
+           // radar://12034844
+           error = 0;
+           skipped = 1;
+         } else {
+
+           skipped = 0;
            if (last_event_ptr == kfse) {
                last_event_ptr = NULL;
                last_event_type = -1;
@@ -1839,18 +1529,18 @@ fmod_watch(fs_event_watcher *watcher, struct uio *uio)
            }
 
            last_full_event_resid = uio_resid(uio);
+         }
        }
 
-       lck_rw_unlock_shared(&event_handling_lock);
-
        watcher->rd = (watcher->rd + 1) % watcher->eventq_size;
        OSSynchronizeIO();
-           
-       if (kfse->type == FSE_INVALID || kfse->refcount < 1) {
-           panic("fmod_watch:2: my kfse became bogus! kfse %p (type %d refcount %d rd %d wr %d)\n", kfse, kfse->type, kfse->refcount, watcher->rd, watcher->wr);
-       }
-
        release_event_ref(kfse);
+
+       lck_rw_unlock_shared(&event_handling_lock);
+    }
+
+    if (skipped && error == 0) {
+      goto restart_watch;
     }
 
   get_out:
@@ -2224,7 +1914,9 @@ fseventsf_drain(struct fileproc *fp, __unused vfs_context_t ctx)
         // and decision to tsleep in fmod_watch... this bit of 
         // latency is a decent tradeoff against not having to
         // take and drop a lock in fmod_watch
+       lock_watch_table();
        fsevents_wakeup(fseh->watcher);
+       unlock_watch_table();
 
        tsleep((caddr_t)fseh->watcher, PRIBIO, "watcher-close", 1);
     }
index f86ba0148f42e2d61b22a07672f8adf17919d25f..93441bcb8bf1804fd4cdb3f3b7f88d98f8269b1d 100644 (file)
@@ -502,7 +502,6 @@ vnode_pagein(
        int                     flags,
        int                     *errorp)
 {
-        struct uthread *ut;
         upl_page_info_t *pl;
        int             result = PAGER_SUCCESS;
        int             error = 0;
@@ -690,18 +689,6 @@ out:
        if (errorp)
                *errorp = result;
 
-       ut = get_bsdthread_info(current_thread());
-
-       if (ut->uu_lowpri_window) {
-               /*
-                * task is marked as a low priority I/O type
-                * and the I/O we issued while in this page fault
-                * collided with normal I/O operations... we'll
-                * delay in order to mitigate the impact of this
-                * task on the normal operation of the system
-                */
-               throttle_lowpri_io(TRUE);
-       }
        return (error);
 }
 
index e78f56919ff60eb000509403e8ac90ab02d046c5..0cb629e45584986036615be368919fc9de681fa6 100644 (file)
@@ -1009,6 +1009,7 @@ __ZN8IOMapper10superClassE
 __ZN8IOMapper17setMapperRequiredEb
 __ZN8IOMapper19copyMapperForDeviceEP9IOService
 __ZN8IOMapper19waitForSystemMapperEv
+__ZN8IOMapper13iovmMapMemoryEP8OSObjectjjjP13upl_page_infoPK21IODMAMapSpecification
 __ZN8IOMapper4freeEv
 __ZN8IOMapper5startEP9IOService
 __ZN8IOMapper7gSystemE
index 8dff01adc671ab63b95fea5362610a69fd6cc4c6..77389f47a728ae9a384be3d761df88945d10616a 100644 (file)
@@ -399,7 +399,6 @@ __ZN8IOMapper10iovmInsertEjmP13upl_page_infom
 __ZN8IOMapper10iovmInsertEjmPjm
 __ZN8IOMapper11NewARTTableEmPPvPj
 __ZN8IOMapper12FreeARTTableEP6OSDatam
-__ZN8IOMapper18_RESERVEDIOMapper3Ev
 __ZN8IOMapper18_RESERVEDIOMapper4Ev
 __ZN8IOMapper18_RESERVEDIOMapper5Ev
 __ZN8IOMapper18_RESERVEDIOMapper6Ev
index 37e7e8d41162d3fa486a9cd79730571888d5e117..5c2aa9a4928ef717a3e90e9d218032cd506ae501 100644 (file)
@@ -362,7 +362,6 @@ __ZN8IOMapper10iovmInsertEjjP13upl_page_infoj
 __ZN8IOMapper10iovmInsertEjjPjj
 __ZN8IOMapper11NewARTTableEyPPvPj
 __ZN8IOMapper12FreeARTTableEP6OSDatay
-__ZN8IOMapper18_RESERVEDIOMapper3Ev
 __ZN8IOMapper18_RESERVEDIOMapper4Ev
 __ZN8IOMapper18_RESERVEDIOMapper5Ev
 __ZN8IOMapper18_RESERVEDIOMapper6Ev
index 7b10c32ce11ea6346373f64a7f8e703894c3f6f7..02b81f57c2a3a0c04275c75a403b08fc4b4e1214 100644 (file)
@@ -1,4 +1,4 @@
-12.1.0
+12.2.0
 
 # The first line of this file contains the master version number for the kernel.
 # All other instances of the kernel version in xnu are derived from this file.
index 95fe92e41b49c548080911edab1b941f3d9a92d7..3bfacfc397118f669583591c6148fbd4f3b6d117 100644 (file)
@@ -114,7 +114,7 @@ _net_add_proto
 _net_del_domain
 _net_del_proto
 _netboot_root
-_perf_monitor_register
+_perf_monitor_register_*
 _perf_monitor_unregister
 _pffinddomain
 _pffindproto
index 9b4585b337ac700f6c0068776b1bda15a7e8f28c..2749c3ad20a2711cbefe67a84d65195ea7bd84a7 100644 (file)
@@ -1812,7 +1812,6 @@ __ZN8IOMapper10superClassE
 __ZN8IOMapper11NewARTTableEmPPvPj
 __ZN8IOMapper12FreeARTTableEP6OSDatam
 __ZN8IOMapper17setMapperRequiredEb
-__ZN8IOMapper18_RESERVEDIOMapper3Ev
 __ZN8IOMapper18_RESERVEDIOMapper4Ev
 __ZN8IOMapper18_RESERVEDIOMapper5Ev
 __ZN8IOMapper18_RESERVEDIOMapper6Ev
index dc1590d45ac52f9cd99308058fd652ed004c15d4..b4087719b642eac20cac4d1249ee1b36ed104e6b 100644 (file)
@@ -8,6 +8,7 @@ _KUNCUserNotificationDisplayNotice
 _NDR_record
 _PE_kputc
 __Z22OSFlushObjectTrackListv
+__ZN11IOMemoryMap9wireRangeEjyy
 __ZN15IOWatchDogTimer10gMetaClassE
 __ZN15IOWatchDogTimer10superClassE
 __ZN15IOWatchDogTimer13setPropertiesEP8OSObject
@@ -128,7 +129,15 @@ _mig_user_allocate
 _mig_user_deallocate
 _ml_io_map
 _ml_phys_read
+_ml_phys_read_byte_64
+_ml_phys_read_double_64
+_ml_phys_read_half_64
+_ml_phys_read_word_64
 _ml_phys_write
+_ml_phys_write_byte_64
+_ml_phys_write_double_64
+_ml_phys_write_half_64
+_ml_phys_write_word_64
 _ml_probe_read
 _ml_processor_register
 _ml_thread_policy
index f5d5040617d3aa15a8a2f26bf09aa7975c740c35..fdb0a398cf89b8bfad55e08b838b3283d7934c48 100644 (file)
@@ -34,6 +34,7 @@ enum {
     kIOMemoryPhysicallyContiguous      = 0x00000010,
     kIOMemoryPageable                  = 0x00000020,
     kIOMemoryPurgeable                 = 0x00000040,
+    kIOMemoryHostPhysicallyContiguous          = 0x00000080,
     kIOMemorySharingTypeMask           = 0x000f0000,
     kIOMemoryUnshared                  = 0x00000000,
     kIOMemoryKernelUserShared          = 0x00010000,
@@ -44,9 +45,11 @@ enum {
 #endif
                                        | kIOMemoryThreadSafe
                                        | kIOMemoryClearEncrypt
+                                       | kIOMemoryMapperNone
 };
 
-#define _IOBUFFERMEMORYDESCRIPTOR_INTASKWITHOPTIONS_   1
+#define _IOBUFFERMEMORYDESCRIPTOR_INTASKWITHOPTIONS_           1
+#define _IOBUFFERMEMORYDESCRIPTOR_HOSTPHYSICALLYCONTIGUOUS_    1
 /*!
     @class IOBufferMemoryDescriptor
     @abstract Provides a simple memory descriptor that allocates its own buffer memory.
index 6e758273de6d4fda07833584ccef2cc5c0c7a046..2fdcb9dbb53872df0ea530ae3a7a004285d6ba43 100644 (file)
@@ -264,6 +264,7 @@ IOReturn IOHibernateSystemWake(void);
 IOReturn IOHibernateSystemPostWake(void);
 bool     IOHibernateWasScreenLocked(void);
 void     IOHibernateSetScreenLocked(uint32_t lockState);
+void     IOHibernateSystemRestart(void);
 
 #endif /* __cplusplus */
 
@@ -302,7 +303,8 @@ hibernate_setup(IOHibernateImageHeader * header,
                         boolean_t * encryptedswap);
 kern_return_t 
 hibernate_teardown(hibernate_page_list_t * page_list,
-                    hibernate_page_list_t * page_list_wired);
+                    hibernate_page_list_t * page_list_wired,
+                    hibernate_page_list_t * page_list_pal);
 
 kern_return_t 
 hibernate_processor_setup(IOHibernateImageHeader * header);
@@ -464,7 +466,10 @@ enum {
 #define kIOHibernateRTCVariablesKey    "IOHibernateRTCVariables"
 #define kIOHibernateSMCVariablesKey    "IOHibernateSMCVariables"
 
-#define kIOHibernateBootSwitchVarsKey                  "boot-switch-vars"
+#define kIOHibernateBootSwitchVarsKey  "boot-switch-vars"
+
+#define kIOHibernateBootNoteKey                "boot-note"
+
 
 #define kIOHibernateUseKernelInterpreter    0x80000000
 
index d877974ef49a84b0f4b45e7285d43896b93fd02e..55356c47019e552506968085a9dcfaf227fce8ad 100644 (file)
@@ -55,9 +55,9 @@ __END_DECLS
 
 #include <IOKit/IOService.h>
 #include <IOKit/IOMemoryDescriptor.h>
+#include <IOKit/IODMACommand.h>
 
 class OSData;
-class IODMACommand;
 
 extern const OSSymbol * gIOMapperIDKey;
 
@@ -129,12 +129,20 @@ public:
     virtual ppnum_t iovmAllocDMACommand(IODMACommand * command, IOItemCount pageCount);
     virtual void iovmFreeDMACommand(IODMACommand * command, ppnum_t addr, IOItemCount pageCount);
     
+    virtual ppnum_t iovmMapMemory(
+                         OSObject                    * memory,   // dma command or iomd
+                         ppnum_t                       offsetPage,
+                         ppnum_t                       pageCount,
+                         uint32_t                      options,
+                         upl_page_info_t             * pageList,
+                         const IODMAMapSpecification * mapSpecification);
+
     OSMetaClassDeclareReservedUsed(IOMapper, 0);
     OSMetaClassDeclareReservedUsed(IOMapper, 1);
     OSMetaClassDeclareReservedUsed(IOMapper, 2);
+    OSMetaClassDeclareReservedUsed(IOMapper, 3);
 
 private:
-    OSMetaClassDeclareReservedUnused(IOMapper, 3);
     OSMetaClassDeclareReservedUnused(IOMapper, 4);
     OSMetaClassDeclareReservedUnused(IOMapper, 5);
     OSMetaClassDeclareReservedUnused(IOMapper, 6);
index fd83d0547177eda8c17ad574bda985e3943902f9..a44ce6f40cb6da6e43a9734fc4a071de7b3d9a94 100644 (file)
@@ -38,6 +38,7 @@
 
 class IOMemoryMap;
 class IOMapper;
+class IOService;
 
 /*
  * Direction of transfer, with respect to the described memory.
@@ -83,7 +84,8 @@ enum {
 
     kIOMemoryAsReference       = 0x00000100,
     kIOMemoryBufferPageable    = 0x00000400,
-    kIOMemoryMapperNone                = 0x00000800,
+    kIOMemoryMapperNone                = 0x00000800,   // Shared with Buffer MD
+    kIOMemoryHostOnly           = 0x00001000,   // Never DMA accessible
 #ifdef XNU_KERNEL_PRIVATE
     kIOMemoryRedirected                = 0x00004000,
     kIOMemoryPreparedReadOnly  = 0x00008000,
@@ -116,6 +118,26 @@ enum
 
 #define        IOMEMORYDESCRIPTOR_SUPPORTS_DMACOMMAND  1
 
+struct IODMAMapSpecification
+{
+       uint64_t    alignment;
+       IOService * device;
+       uint32_t    options;
+       uint8_t     numAddressBits;
+       uint8_t     resvA[3];
+       uint32_t    resvB[4];
+};
+
+enum
+{
+    kIODMAMapWriteAccess          = 0x00000002,
+    kIODMAMapPhysicallyContiguous = 0x00000010,
+    kIODMAMapDeviceMemory         = 0x00000020,
+    kIODMAMapPagingPath           = 0x00000040,
+    kIODMAMapIdentityMap          = 0x00000080,
+};
+
+
 enum 
 {
     kIOPreparationIDUnprepared = 0,
@@ -233,6 +255,13 @@ typedef IOOptionBits DMACommandOps;
 
 #ifdef XNU_KERNEL_PRIVATE
     IOMemoryDescriptorReserved * getKernelReserved( void );
+    IOReturn dmaMap(
+       IOMapper                    * mapper,
+       const IODMAMapSpecification * mapSpec,
+       uint64_t                      offset,
+       uint64_t                      length,
+       uint64_t                    * address,
+       ppnum_t                     * mapPages);
 #endif
        
 private:
@@ -763,6 +792,11 @@ public:
     IOReturn userClientUnmap();
 #endif /* XNU_KERNEL_PRIVATE */
 
+    IOReturn wireRange(
+       uint32_t                options,
+        mach_vm_size_t         offset,
+        mach_vm_size_t         length);
+
     OSMetaClassDeclareReservedUnused(IOMemoryMap, 0);
     OSMetaClassDeclareReservedUnused(IOMemoryMap, 1);
     OSMetaClassDeclareReservedUnused(IOMemoryMap, 2);
@@ -830,6 +864,19 @@ public:
 
     virtual uint64_t getPreparationID( void );
 
+#ifdef XNU_KERNEL_PRIVATE
+    // Internal APIs may be made virtual at some time in the future.
+    IOReturn wireVirtual(IODirection forDirection);
+    IOReturn dmaMap(
+       IOMapper                    * mapper,
+       const IODMAMapSpecification * mapSpec,
+       uint64_t                      offset,
+       uint64_t                      length,
+       uint64_t                    * address,
+       ppnum_t                     * mapPages);
+    bool initMemoryEntries(size_t size, IOMapper * mapper);
+#endif
+
 private:
 
 #ifndef __LP64__
@@ -838,8 +885,6 @@ private:
     virtual void unmapFromKernel();
 #endif /* !__LP64__ */
 
-    // Internal APIs may be made virtual at some time in the future.
-    IOReturn wireVirtual(IODirection forDirection);
     void *createNamedEntry();
 
     // Internal
index cfcbf6d55d86d5fa250c705e36c906033da6b60f..3401744c4e60fc53c7a03a7ad192c604e163e86e 100644 (file)
@@ -245,7 +245,7 @@ enum {
  *  false       == Retain FV key when going to standby mode
  *  not present == Retain FV key when going to standby mode
  */
-#define kIOPMDestroyFVKeyOnStandbyKey            "DestroyFVKeyOnStandby"
+#define kIOPMDestroyFVKeyOnStandbyKey       "DestroyFVKeyOnStandby"
 
 /*******************************************************************************
  *
@@ -288,7 +288,15 @@ enum {
      */
     kIOPMDriverAssertionPreventDisplaySleepBit      = 0x40,
 
-    kIOPMDriverAssertionReservedBit7                = 0x80
+    /*! kIOPMDriverAssertionReservedBit7
+     * Reserved for storage family.
+     */
+    kIOPMDriverAssertionReservedBit7                = 0x80,
+
+    /*! kIOPMDriverAssertionMagicPacketWakeEnabledBit
+     * When set, driver is informing PM that magic packet wake is enabled.
+     */
+    kIOPMDriverAssertionMagicPacketWakeEnabledBit   = 0x100
 };
 
  /* kIOPMAssertionsDriverKey
index 4828f38cdd50850c28fd5ffc62dd07229b82e5a6..651544913f5714c13842c1acaa1d0234b4d1acff 100644 (file)
 /* @constant kIOPMTimelineDictionaryKey
  * @abstract RootDomain key for dictionary describing Timeline's info
  */
-#define     kIOPMTimelineDictionaryKey                  "PMTimelineLogging"
+#define kIOPMTimelineDictionaryKey              "PMTimelineLogging"
 
 /* @constant kIOPMTimelineEnabledKey
  * @abstract Boolean value indicating whether the system is recording PM events.
  * @discussion Key may be found in the dictionary at IOPMrootDomain's property 
  * kIOPMTimelineDictionaryKey. uint32_t value; may be 0.
  */
-#define     kIOPMTimelineEnabledKey                     "TimelineEnabled"
+#define kIOPMTimelineEnabledKey                 "TimelineEnabled"
 
 /* @constant kIOMPTimelineSystemNumberTrackedKey
  * @abstract The maximum number of system power events the system may record.
  * @discussion Key may be found in the dictionary at IOPMrootDomain's property 
  * kIOPMTimelineDictionaryKey. uint32_t value; may be 0.
  */
-#define     kIOPMTimelineSystemNumberTrackedKey         "TimelineSystemEventsTracked"
+#define kIOPMTimelineSystemNumberTrackedKey     "TimelineSystemEventsTracked"
 
 /* @constant kIOPMTimelineSystemBufferSizeKey
  * @abstract Size in bytes  of buffer recording system PM events
  * @discussion Key may be found in the dictionary at IOPMrootDomain's property 
  * kIOPMTimelineDictionaryKey. uint32_t value; may be 0.
  */
-#define     kIOPMTimelineSystemBufferSizeKey            "TimelineSystemBufferSize"
+#define kIOPMTimelineSystemBufferSizeKey        "TimelineSystemBufferSize"
 
 
 
@@ -620,31 +620,177 @@ enum {
 #define kIOPMSleepWakeFailureUUIDKey        "UUID"
 #define kIOPMSleepWakeFailureDateKey        "Date"
 
-/******************************************************************************/
-/* System sleep policy
- * Shared between PM root domain and platform driver.
+/*****************************************************************************
+ *
+ * Root Domain private property keys
+ *
+ *****************************************************************************/
+
+/* kIOPMAutoPowerOffEnabledKey
+ * Indicates if Auto Power Off is enabled.
+ * It has a boolean value.
+ *  true        == Auto Power Off is enabled
+ *  false       == Auto Power Off is disabled
+ *  not present == Auto Power Off is not supported on this hardware
  */
+#define kIOPMAutoPowerOffEnabledKey         "AutoPowerOff Enabled"
 
-// Platform specific property added by the platform driver.
-// An OSData that describes the system sleep policy.
-#define kIOPlatformSystemSleepPolicyKey     "IOPlatformSystemSleepPolicy"
+/* kIOPMAutoPowerOffDelayKey
+ * Key refers to a CFNumberRef that represents the delay in seconds before
+ * entering the Auto Power Off state.  The property is not present if Auto
+ * Power Off is unsupported.
+ */
+#define kIOPMAutoPowerOffDelayKey           "AutoPowerOff Delay"
 
-// Root domain property updated before platform sleep.
-// An OSData that describes the system sleep parameters.
-#define kIOPMSystemSleepParametersKey       "IOPMSystemSleepParameters"
+/*****************************************************************************
+ *
+ * System Sleep Policy
+ *
+ *****************************************************************************/
 
-struct IOPMSystemSleepParameters
+#define kIOPMSystemSleepPolicySignature     0x54504c53
+#define kIOPMSystemSleepPolicyVersion       2
+
+/*!
+ * @defined kIOPMSystemSleepTypeKey
+ * @abstract Indicates the type of system sleep.
+ * @discussion An OSNumber property of root domain that describes the type
+ * of system sleep. This property is set after notifying priority sleep/wake
+ * clients, but before informing interested drivers and shutting down power
+ * plane drivers.
+ */
+#define kIOPMSystemSleepTypeKey             "IOPMSystemSleepType"
+
+struct IOPMSystemSleepPolicyVariables
 {
-    uint32_t    version;
-    uint32_t    sleepFlags;
-    uint32_t    sleepTimer;
-    uint32_t    wakeEvents;
+    uint32_t    signature;                  // kIOPMSystemSleepPolicySignature
+    uint32_t    version;                    // kIOPMSystemSleepPolicyVersion
+
+    uint64_t    currentCapability;          // current system capability bits
+    uint64_t    highestCapability;          // highest system capability bits
+
+    uint64_t    sleepFactors;               // sleep factor bits
+    uint32_t    sleepReason;                // kIOPMSleepReason*
+    uint32_t    sleepPhase;                 // identify the sleep phase
+    uint32_t    hibernateMode;              // current hibernate mode
+
+    uint32_t    standbyDelay;               // standby delay in seconds
+    uint32_t    poweroffDelay;              // auto-poweroff delay in seconds
+    uint32_t    scheduledAlarms;            // bitmask of scheduled alarm types
+
+    uint32_t    reserved[50];               // pad sizeof 256 bytes
+};
+
+enum {
+    kIOPMAlarmBitDebugWake                  = 0x01,
+    kIOPMAlarmBitCalendarWake               = 0x02,
+    kIOPMAlarmBitMaintenanceWake            = 0x04,
+    kIOPMAlarmBitSleepServiceWake           = 0x08
 };
 
-// Sleep flags
 enum {
-    kIOPMSleepFlagHibernate         = 0x00000001,
-    kIOPMSleepFlagSleepTimerEnable  = 0x00000002
+    kIOPMSleepPhase1 = 1,
+    kIOPMSleepPhase2
 };
 
+// Sleep Factor Mask / Bits
+enum {
+    kIOPMSleepFactorSleepTimerWake          = 0x00000001ULL,
+    kIOPMSleepFactorLidOpen                 = 0x00000002ULL,
+    kIOPMSleepFactorACPower                 = 0x00000004ULL,
+    kIOPMSleepFactorBatteryLow              = 0x00000008ULL,
+    kIOPMSleepFactorStandbyNoDelay          = 0x00000010ULL,
+    kIOPMSleepFactorStandbyForced           = 0x00000020ULL,
+    kIOPMSleepFactorStandbyDisabled         = 0x00000040ULL,
+    kIOPMSleepFactorUSBExternalDevice       = 0x00000080ULL,
+    kIOPMSleepFactorBluetoothHIDDevice      = 0x00000100ULL,
+    kIOPMSleepFactorExternalMediaMounted    = 0x00000200ULL,
+    kIOPMSleepFactorThunderboltDevice       = 0x00000400ULL,
+    kIOPMSleepFactorRTCAlarmScheduled       = 0x00000800ULL,
+    kIOPMSleepFactorMagicPacketWakeEnabled  = 0x00001000ULL,
+    kIOPMSleepFactorHibernateForced         = 0x00010000ULL,
+    kIOPMSleepFactorAutoPowerOffDisabled    = 0x00020000ULL,
+    kIOPMSleepFactorAutoPowerOffForced      = 0x00040000ULL,
+    kIOPMSleepFactorExternalDisplay         = 0x00080000ULL
+};
+
+// System Sleep Types
+enum {
+    kIOPMSleepTypeInvalid                   = 0,
+    kIOPMSleepTypeAbortedSleep              = 1,
+    kIOPMSleepTypeNormalSleep               = 2,
+    kIOPMSleepTypeSafeSleep                 = 3,
+    kIOPMSleepTypeHibernate                 = 4,
+    kIOPMSleepTypeStandby                   = 5,
+    kIOPMSleepTypePowerOff                  = 6,
+    kIOPMSleepTypeLast                      = 7
+};
+
+// System Sleep Flags
+enum {
+    kIOPMSleepFlagDisableHibernateAbort     = 0x00000001,
+    kIOPMSleepFlagDisableUSBWakeEvents      = 0x00000002,
+    kIOPMSleepFlagDisableBatlowAssertion    = 0x00000004
+};
+
+// System Wake Events
+enum {
+    kIOPMWakeEventLidOpen                   = 0x00000001,
+    kIOPMWakeEventLidClose                  = 0x00000002,
+    kIOPMWakeEventACAttach                  = 0x00000004,
+    kIOPMWakeEventACDetach                  = 0x00000008,
+    kIOPMWakeEventCDInsert                  = 0x00000010,
+    kIOPMWakeEventCDEject                   = 0x00000020,
+    kIOPMWakeEventHPDAttach                 = 0x00000040,
+    kIOPMWakeEventHPDDetach                 = 0x00000080,
+    kIOPMWakeEventPowerButton               = 0x00000100,
+    kIOPMWakeEventG3PowerOn                 = 0x00000200,
+    kIOPMWakeEventUserPME                   = 0x00000400,
+    kIOPMWakeEventSleepTimer                = 0x00000800,
+    kIOPMWakeEventBatteryLow                = 0x00001000,
+    kIOPMWakeEventDarkPME                   = 0x00002000
+};
+
+/*!
+ * @defined kIOPMSystemSleepParametersKey
+ * @abstract Sleep parameters describing the upcoming sleep
+ * @discussion Root domain updates this OSData property before system sleep
+ * to pass sleep parameters to the platform driver.  Some of the parameters
+ * are based on the chosen entry in the system sleep policy table.
+ */
+#define kIOPMSystemSleepParametersKey       "IOPMSystemSleepParameters"
+#define kIOPMSystemSleepParametersVersion   2
+
+struct IOPMSystemSleepParameters
+{
+    uint16_t    version;
+    uint16_t    reserved1;
+    uint32_t    sleepType;
+    uint32_t    sleepFlags;
+    uint32_t    ecWakeEvents;
+    uint32_t    ecWakeTimer;
+    uint32_t    ecPoweroffTimer;
+    uint32_t    reserved2[10];
+} __attribute__((packed));
+
+#if defined(KERNEL) && defined(__cplusplus)
+
+/*!
+ * @defined kIOPMInstallSystemSleepPolicyHandlerKey
+ * @abstract Name of the platform function to install a sleep policy handler.
+ * @discussion Pass to IOPMrootDomain::callPlatformFunction(), with a pointer
+ * to the C-function handler at param1, and an optional target at param2, to
+ * register a sleep policy handler. Only a single sleep policy handler can
+ * be installed.
+ */
+#define kIOPMInstallSystemSleepPolicyHandlerKey        \
+        "IOPMInstallSystemSleepPolicyHandler"
+
+typedef IOReturn (*IOPMSystemSleepPolicyHandler)(
+        void * target,
+        const IOPMSystemSleepPolicyVariables * vars,
+        IOPMSystemSleepParameters * params );
+
+#endif /* KERNEL */
+
 #endif /* ! _IOKIT_IOPMPRIVATE_H */
index 9a514bdbca7a86f731b2a87048b6b6ae82b0b4a0..95dbc96a51ef8351a327dc5a81c68ee1d3f31768 100644 (file)
@@ -631,7 +631,7 @@ private:
     IONotifier *            systemCapabilityNotifier;
 
     IOPMTimeline            *timeline;
-    
+
     typedef struct {
         uint32_t            pid;
         uint32_t            refcount;
@@ -644,6 +644,8 @@ private:
     OSSet *                 preventIdleSleepList;
     OSSet *                 preventSystemSleepList;
 
+    UInt32                  _scheduledAlarms;
+
 #if HIBERNATION
     clock_sec_t             _standbyTimerResetSeconds;
 #endif
@@ -704,7 +706,7 @@ private:
 
 #if HIBERNATION
     bool        getSleepOption( const char * key, uint32_t * option );
-    bool        evaluateSystemSleepPolicy( IOPMSystemSleepParameters * p, int sleepPhase );
+    bool        evaluateSystemSleepPolicy( IOPMSystemSleepParameters * p, int phase );
     void        evaluateSystemSleepPolicyEarly( void );
     void        evaluateSystemSleepPolicyFinal( void );
 #endif /* HIBERNATION */
index a56b469ee999d7373044d28e131688e0fad6272b..377dc21296bf17fab3ff63a368080ea4b2a74674 100644 (file)
 #include <IOKit/IOMapper.h>
 #include <IOKit/IOBufferMemoryDescriptor.h>
 #include <libkern/OSDebug.h>
+#include <mach/mach_vm.h>
 
 #include "IOKitKernelInternal.h"
 
+#ifdef IOALLOCDEBUG
+#include <libkern/c++/OSCPPDebug.h>
+#endif
+#include <IOKit/IOStatisticsPrivate.h>
+
+#if IOKITSTATS
+#define IOStatisticsAlloc(type, size) \
+do { \
+       IOStatistics::countAlloc(type, size); \
+} while (0)
+#else
+#define IOStatisticsAlloc(type, size)
+#endif /* IOKITSTATS */
+
+
 __BEGIN_DECLS
 void ipc_port_release_send(ipc_port_t port);
 #include <vm/pmap.h>
@@ -48,9 +64,209 @@ __END_DECLS
 
 enum
 {
-    kInternalFlagPhysical  = 0x00000001,
-    kInternalFlagPageSized = 0x00000002
+    kInternalFlagPhysical      = 0x00000001,
+    kInternalFlagPageSized     = 0x00000002,
+    kInternalFlagPageAllocated = 0x00000004
+};
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#if 0
+#undef assert
+#define assert(ex)  \
+       ((ex) ? (void)0 : Assert(__FILE__, __LINE__, # ex))
+#endif
+
+enum
+{
+    kIOPageAllocChunkBytes = (PAGE_SIZE / 64),
+    kIOPageAllocSignature  = 'iopa'
+};
+
+struct io_pagealloc_t
+{
+    queue_chain_t link;
+    uint64_t      avail;
+    uint32_t      signature;
 };
+typedef struct io_pagealloc_t io_pagealloc_t;
+
+typedef char io_pagealloc_t_assert[(sizeof(io_pagealloc_t) <= kIOPageAllocChunkBytes) ? 1 : -1];
+
+IOSimpleLock * gIOPageAllocLock;
+queue_head_t   gIOPageAllocList;
+vm_size_t      gIOPageAllocCount;
+vm_size_t      gIOPageAllocBytes;
+
+static io_pagealloc_t * 
+iopa_allocpage(void)
+{
+    kern_return_t    kr;
+    io_pagealloc_t * pa;
+    vm_address_t     vmaddr = 0;
+
+    int options = 0; // KMA_LOMEM;
+    kr = kernel_memory_allocate(kernel_map, &vmaddr,
+                               page_size, 0, options);
+    if (KERN_SUCCESS != kr) return (0);
+
+    bzero((void *) vmaddr, page_size);
+    pa = (typeof(pa)) (vmaddr + page_size - kIOPageAllocChunkBytes);
+
+    pa->signature = kIOPageAllocSignature;
+    pa->avail     = -2ULL;
+
+    return (pa);
+}
+
+static void 
+iopa_freepage(io_pagealloc_t * pa)
+{
+    kmem_free( kernel_map, trunc_page((uintptr_t) pa), page_size);
+}
+
+static uintptr_t
+iopa_allocinpage(io_pagealloc_t * pa, uint32_t count, uint64_t align)
+{
+    uint32_t n, s;
+    uint64_t avail = pa->avail;
+
+    assert(avail);
+
+    // find strings of count 1 bits in avail
+    for (n = count; n > 1; n -= s)
+    {
+       s = n >> 1;
+       avail = avail & (avail << s);
+    }
+    // and aligned
+    avail &= align;
+
+    if (avail)
+    {
+       n = __builtin_clzll(avail);
+       pa->avail &= ~((-1ULL << (64 - count)) >> n);
+       if (!pa->avail && pa->link.next)
+       {
+           remque(&pa->link);
+           pa->link.next = 0;
+       }
+       return (n * kIOPageAllocChunkBytes + trunc_page((uintptr_t) pa));
+    }
+
+    return (0);
+}
+
+static uint32_t 
+log2up(uint32_t size)
+{
+    if (size <= 1) size = 0;
+    else size = 32 - __builtin_clz(size - 1);
+    return (size);
+}
+
+static uintptr_t 
+iopa_alloc(vm_size_t bytes, uint32_t balign)
+{
+    static const uint64_t align_masks[] = {
+       0xFFFFFFFFFFFFFFFF,
+       0xAAAAAAAAAAAAAAAA,
+       0x8888888888888888,
+       0x8080808080808080,
+       0x8000800080008000,
+       0x8000000080000000,
+       0x8000000000000000,
+    };
+    io_pagealloc_t * pa;
+    uintptr_t        addr = 0;
+    uint32_t         count;
+    uint64_t         align;
+
+    if (!bytes) bytes = 1;
+    count = (bytes + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes;
+    align = align_masks[log2up((balign + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes)];
+
+    IOSimpleLockLock(gIOPageAllocLock);
+    pa = (typeof(pa)) queue_first(&gIOPageAllocList);
+    while (!queue_end(&gIOPageAllocList, &pa->link))
+    {
+       addr = iopa_allocinpage(pa, count, align);
+       if (addr)
+       {
+           gIOPageAllocBytes += bytes;
+           break;
+       }
+       pa = (typeof(pa)) queue_next(&pa->link);
+    }
+    IOSimpleLockUnlock(gIOPageAllocLock);
+    if (!addr)
+    {
+        pa = iopa_allocpage();
+       if (pa)
+       {
+           addr = iopa_allocinpage(pa, count, align);
+           IOSimpleLockLock(gIOPageAllocLock);
+           if (pa->avail) enqueue_head(&gIOPageAllocList, &pa->link);
+           gIOPageAllocCount++;
+           if (addr) gIOPageAllocBytes += bytes;
+           IOSimpleLockUnlock(gIOPageAllocLock);
+       }
+    }
+
+    if (addr)
+    {
+        assert((addr & ((1 << log2up(balign)) - 1)) == 0);
+       IOStatisticsAlloc(kIOStatisticsMallocAligned, bytes);
+#if IOALLOCDEBUG
+       debug_iomalloc_size += bytes;
+#endif
+    }
+
+    return (addr);
+}
+
+static void 
+iopa_free(uintptr_t addr, vm_size_t bytes)
+{
+    io_pagealloc_t * pa;
+    uint32_t         count;
+    uintptr_t        chunk;
+
+    if (!bytes) bytes = 1;
+
+    chunk = (addr & page_mask);
+    assert(0 == (chunk & (kIOPageAllocChunkBytes - 1)));
+
+    pa = (typeof(pa)) (addr | (page_size - kIOPageAllocChunkBytes));
+    assert(kIOPageAllocSignature == pa->signature);
+
+    count = (bytes + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes;
+    chunk /= kIOPageAllocChunkBytes;
+
+    IOSimpleLockLock(gIOPageAllocLock);
+    if (!pa->avail)
+    {
+       assert(!pa->link.next);
+       enqueue_tail(&gIOPageAllocList, &pa->link);
+    }
+    pa->avail |= ((-1ULL << (64 - count)) >> chunk);
+    if (pa->avail != -2ULL) pa = 0;
+    else
+    {
+        remque(&pa->link);
+        pa->link.next = 0;
+        pa->signature = 0;
+       gIOPageAllocCount--;
+    }
+    gIOPageAllocBytes -= bytes;
+    IOSimpleLockUnlock(gIOPageAllocLock);
+    if (pa) iopa_freepage(pa);
+
+#if IOALLOCDEBUG
+    debug_iomalloc_size -= bytes;
+#endif
+    IOStatisticsAlloc(kIOStatisticsFreeAligned, bytes);
+}
 
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
@@ -79,11 +295,14 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask(
                                mach_vm_address_t alignment,
                                mach_vm_address_t physicalMask)
 {
-    kern_return_t      kr;
-    task_t             mapTask = NULL;
-    vm_map_t           vmmap = NULL;
-    mach_vm_address_t   highestMask = 0;
-    IOOptionBits       iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
+    kern_return_t        kr;
+    task_t               mapTask = NULL;
+    vm_map_t             vmmap = NULL;
+    mach_vm_address_t     highestMask = 0;
+    IOOptionBits         iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
+    IODMAMapSpecification mapSpec;
+    bool                  mapped = false;
+    bool                  needZero;
 
     if (!capacity)
         return false;
@@ -99,12 +318,19 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask(
        return (false);
     _ranges.v64->address = 0;
     _ranges.v64->length  = 0;
-       //  make sure super::free doesn't dealloc _ranges before super::init
-       _flags = kIOMemoryAsReference;
+    //  make sure super::free doesn't dealloc _ranges before super::init
+    _flags = kIOMemoryAsReference;
 
     // Grab IOMD bits from the Buffer MD options
     iomdOptions  |= (options & kIOBufferDescriptorMemoryFlags);
 
+    if (!(kIOMemoryMapperNone & options))
+    {
+       IOMapper::checkForSystemMapper();
+       mapped = (0 != IOMapper::gSystem);
+    }
+    needZero = mapped;
+
     if (physicalMask && (alignment <= 1))
     {
        alignment   = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
@@ -128,6 +354,18 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask(
     if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
        return false;
 
+    bzero(&mapSpec, sizeof(mapSpec));
+    mapSpec.alignment      = _alignment;
+    mapSpec.numAddressBits = 64;
+    if (highestMask && mapped)
+    {
+       if (highestMask <= 0xFFFFFFFF)
+           mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask));
+       else
+           mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32)));
+       highestMask = 0;
+    }
+
     // set flags for entry + object create
     vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
 
@@ -183,7 +421,19 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask(
 
        /* Allocate a wired-down buffer inside kernel space. */
 
-       if ((options & kIOMemoryPhysicallyContiguous) || highestMask || (alignment > page_size))
+       bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous));
+
+       if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous)))
+       {
+           contig |= (!mapped);
+           contig |= (0 != (kIOMemoryMapperNone & options));
+#if 0
+           // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
+           contig |= true;
+#endif
+       }
+
+       if (contig || highestMask || (alignment > page_size))
        {
             _internalFlags |= kInternalFlagPhysical;
             if (highestMask)
@@ -191,8 +441,15 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask(
                 _internalFlags |= kInternalFlagPageSized;
                 capacity = round_page(capacity);
             }
-            _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(capacity, highestMask, alignment, 
-                                        (0 != (options & kIOMemoryPhysicallyContiguous)));
+            _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(
+                                       capacity, highestMask, alignment, contig);
+       }
+       else if (needZero
+                 && ((capacity + alignment) <= (page_size - kIOPageAllocChunkBytes)))
+       {
+            _internalFlags |= kInternalFlagPageAllocated;
+            needZero        = false;
+            _buffer         = (void *) iopa_alloc(capacity, alignment);
        }
        else if (alignment > 1)
        {
@@ -202,11 +459,11 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask(
        {
             _buffer = IOMalloc(capacity);
        }
-
        if (!_buffer)
        {
             return false;
        }
+       if (needZero) bzero(_buffer, capacity);
     }
 
     if( (options & (kIOMemoryPageable | kIOMapCacheMask))) {
@@ -245,7 +502,7 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask(
            {
                *startAddr;
                startAddr += page_size;
-           }
+           }
        }
     }
 
@@ -256,6 +513,11 @@ bool IOBufferMemoryDescriptor::initWithPhysicalMask(
                                inTask, iomdOptions, /* System mapper */ 0))
        return false;
 
+    // give any system mapper the allocation params
+    if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec, 
+                                               &mapSpec, sizeof(mapSpec)))
+       return false;
+
     if (mapTask)
     {
        if (!reserved) {
@@ -461,16 +723,24 @@ void IOBufferMemoryDescriptor::free()
     }
     else if (buffer)
     {
-        if (internalFlags & kInternalFlagPhysical)
+       if (kInternalFlagPageSized & internalFlags) size = round_page(size);
+
+        if (kInternalFlagPhysical & internalFlags)
         {
-            if (kInternalFlagPageSized & internalFlags)
-                size = round_page(size);
             IOKernelFreePhysical((mach_vm_address_t) buffer, size);
-        }
+       }
+       else if (kInternalFlagPageAllocated & internalFlags)
+       {
+            iopa_free((uintptr_t) buffer, size);
+       }
         else if (alignment > 1)
+       {
             IOFreeAligned(buffer, size);
+       }
         else
+       {
             IOFree(buffer, size);
+       }
     }
     if (range && (kIOMemoryAsReference & flags))
        IODelete(range, IOAddressRange, 1);
index dadc4dc3546e4a6de06435ce58c83d621c1aedc3..1ae4e61ae9c6bbc6db025928c0a0d482b7c676c9 100644 (file)
@@ -30,6 +30,7 @@
 
 #include <libkern/OSTypes.h>
 #include <libkern/OSByteOrder.h>
+#include <libkern/OSDebug.h>
 
 #include <IOKit/IOReturn.h>
 #include <IOKit/IOLib.h>
@@ -41,7 +42,7 @@
 #include "IOKitKernelInternal.h"
 
 #define MAPTYPE(type)          ((UInt) (type) & kTypeMask)
-#define IS_MAPPED(type)                (MAPTYPE(type) == kMapped)
+#define IS_MAPPED(type)                (MAPTYPE(type) != kBypassed)
 #define IS_BYPASSED(type)      (MAPTYPE(type) == kBypassed)
 #define IS_NONCOHERENT(type)   (MAPTYPE(type) == kNonCoherent)
 
@@ -147,6 +148,8 @@ IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
                                    IOMapper       *mapper,
                                    void           *refCon)
 {
+    IOService * device = 0;
+
     if (!super::init() || !outSegFunc)
         return false;
 
@@ -168,6 +171,12 @@ IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
     if (!maxTransferSize)
        maxTransferSize--;      // Set Max transfer to -1
 
+
+    if (mapper && !OSDynamicCast(IOMapper, mapper))
+    {
+       device = mapper;
+       mapper = 0;
+    }
     if (!mapper)
     {
         IOMapper::checkForSystemMapper();
@@ -190,7 +199,7 @@ IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
     switch (MAPTYPE(mappingOptions))
     {
     case kMapped:                   break;
-    case kNonCoherent: fMapper = 0; break;
+    case kNonCoherent: /*fMapper = 0;*/ break;
     case kBypassed:
        if (mapper && !mapper->getBypassMask(&fBypassMask))
            return false;
@@ -208,7 +217,8 @@ IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
     bzero(reserved, sizeof(IODMACommandInternal));
 
     fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
-    
+    fInternalState->fDevice = device;
+
     return true;
 }
 
@@ -250,18 +260,15 @@ IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepar
 
     if (mem) {
        bzero(&fMDSummary, sizeof(fMDSummary));
-       err = mem->dmaCommandOperation(
-               kIOMDGetCharacteristics,
-               &fMDSummary, sizeof(fMDSummary));
+       err = mem->dmaCommandOperation(kIOMDGetCharacteristics | (kMapped == MAPTYPE(fMappingOptions)),
+                                      &fMDSummary, sizeof(fMDSummary));
        if (err)
            return err;
 
        ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage;
 
        if ((kMapped == MAPTYPE(fMappingOptions))
-           && fMapper 
-           && (!fNumAddressBits || (fNumAddressBits >= 31)))
-           // assuming mapped space is 2G
+           && fMapper)
            fInternalState->fCheckAddressing = false;
        else
            fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT))));
@@ -272,10 +279,10 @@ IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepar
 
        mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0);
        if (autoPrepare) {
-               err = prepare();
-               if (err) {
-                       clearMemoryDescriptor();
-               }
+           err = prepare();
+           if (err) {
+               clearMemoryDescriptor();
+           }
        }
     }
        
@@ -321,7 +328,7 @@ IODMACommand::segmentOp(
 
     IODMACommandInternal * state = target->reserved;
 
-    if (target->fNumAddressBits && (target->fNumAddressBits < 64) && !state->fLocalMapper)
+    if (target->fNumAddressBits && (target->fNumAddressBits < 64) && (state->fLocalMapperPageAlloc || !target->fMapper))
        maxPhys = (1ULL << target->fNumAddressBits);
     else
        maxPhys = 0;
@@ -387,9 +394,16 @@ IODMACommand::segmentOp(
            {
                if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
                {
+                   addr64_t cpuAddr = address;
                    addr64_t remapAddr;
                    uint64_t chunk;
 
+                   if ((kMapped == MAPTYPE(target->fMappingOptions))
+                       && target->fMapper)
+                   {
+                       cpuAddr = target->fMapper->mapAddr(address);
+                   }
+       
                    remapAddr = ptoa_64(vm_page_get_phys_page(lastPage));
                    if (!state->fDoubleBuffer)
                    {
@@ -405,12 +419,12 @@ IODMACommand::segmentOp(
 
                    if (kWalkSyncIn & op)
                    { // cppvNoModSnk
-                       copypv(remapAddr, address, chunk,
+                       copypv(remapAddr, cpuAddr, chunk,
                                        cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
                    }
                    else
                    {
-                       copypv(address, remapAddr, chunk,
+                       copypv(cpuAddr, remapAddr, chunk,
                                        cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
                    }
                    address += chunk;
@@ -436,13 +450,11 @@ IODMACommand::walkAll(UInt8 op)
 
     if (kWalkPreflight & op)
     {
-       state->fMapContig      = false;
        state->fMisaligned     = false;
        state->fDoubleBuffer   = false;
        state->fPrepared       = false;
        state->fCopyNext       = NULL;
        state->fCopyPageAlloc  = 0;
-       state->fLocalMapperPageAlloc = 0;
        state->fCopyPageCount  = 0;
        state->fNextRemapPage  = NULL;
        state->fCopyMD         = 0;
@@ -469,6 +481,9 @@ IODMACommand::walkAll(UInt8 op)
            if (!state->fDoubleBuffer)
            {
                kern_return_t kr;
+
+               if (fMapper) panic("fMapper copying");
+
                kr = vm_page_alloc_list(state->fCopyPageCount, 
                                        KMA_LOMEM | KMA_NOPAGEWAIT, &mapBase);
                if (KERN_SUCCESS != kr)
@@ -507,19 +522,6 @@ IODMACommand::walkAll(UInt8 op)
                }
            }
        }
-
-       if (state->fLocalMapper)
-       {
-           state->fLocalMapperPageCount = atop_64(round_page(
-                   state->fPreparedLength + ((state->fPreparedOffset + fMDSummary.fPageAlign) & page_mask)));
-           state->fLocalMapperPageAlloc = fMapper->iovmAllocDMACommand(this, state->fLocalMapperPageCount);
-            if (!state->fLocalMapperPageAlloc)
-            {
-                DEBG("IODMACommand !iovmAlloc");
-                return (kIOReturnNoResources);
-            }
-           state->fMapContig = true;
-       }
     }
 
     if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op))
@@ -564,12 +566,6 @@ IODMACommand::walkAll(UInt8 op)
 
     if (kWalkComplete & op)
     {
-       if (state->fLocalMapperPageAlloc)
-       {
-           fMapper->iovmFreeDMACommand(this, state->fLocalMapperPageAlloc, state->fLocalMapperPageCount);
-           state->fLocalMapperPageAlloc = 0;
-           state->fLocalMapperPageCount = 0;
-       }
        if (state->fCopyPageAlloc)
        {
            vm_page_free_list(state->fCopyPageAlloc, FALSE);
@@ -636,6 +632,11 @@ IODMACommand::prepareWithSpecification(SegmentFunction     outSegFunc,
     if (!maxTransferSize)
        maxTransferSize--;      // Set Max transfer to -1
 
+    if (mapper && !OSDynamicCast(IOMapper, mapper))
+    {
+       fInternalState->fDevice = mapper;
+       mapper = 0;
+    }
     if (!mapper)
     {
         IOMapper::checkForSystemMapper();
@@ -645,7 +646,7 @@ IODMACommand::prepareWithSpecification(SegmentFunction      outSegFunc,
     switch (MAPTYPE(mappingOptions))
     {
     case kMapped:                   break;
-    case kNonCoherent: fMapper = 0; break;
+    case kNonCoherent:              break;
     case kBypassed:
        if (mapper && !mapper->getBypassMask(&fBypassMask))
            return kIOReturnBadArgument;
@@ -721,15 +722,14 @@ IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchr
        state->fLocalMapper    = (fMapper && (fMapper != IOMapper::gSystem));
 
        state->fSourceAlignMask = fAlignMask;
-       if (state->fLocalMapper)
+       if (fMapper)
            state->fSourceAlignMask &= page_mask;
        
        state->fCursor = state->fIterateOnly
                        || (!state->fCheckAddressing
-                           && !state->fLocalMapper
                            && (!state->fSourceAlignMask
                                || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask)))));
-       
+
        if (!state->fCursor)
        {
            IOOptionBits op = kWalkPrepare | kWalkPreflight;
@@ -737,6 +737,45 @@ IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchr
                op |= kWalkSyncOut;
            ret = walkAll(op);
        }
+
+       if (fMapper)
+       {
+           if (state->fLocalMapper)
+           {
+               state->fLocalMapperPageCount = atop_64(round_page(
+                       state->fPreparedLength + ((state->fPreparedOffset + fMDSummary.fPageAlign) & page_mask)));
+               state->fLocalMapperPageAlloc = ptoa_64(fMapper->iovmAllocDMACommand(this, state->fLocalMapperPageCount));
+               if (!state->fLocalMapperPageAlloc)
+               {
+                   DEBG("IODMACommand !iovmAlloc");
+                   return (kIOReturnNoResources);
+               }
+               state->fMapContig = true;
+           }
+           else
+           {
+               IOMDDMAMapArgs mapArgs;
+               bzero(&mapArgs, sizeof(mapArgs));
+               mapArgs.fMapper = fMapper;
+               mapArgs.fMapSpec.device         = state->fDevice;
+               mapArgs.fMapSpec.alignment      = fAlignMask + 1;
+               mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? fNumAddressBits : 64;
+               mapArgs.fOffset = state->fPreparedOffset;
+               mapArgs.fLength = state->fPreparedLength;
+               const IOMemoryDescriptor * md = state->fCopyMD;
+               if (!md) md = fMemory;
+               ret = md->dmaCommandOperation(kIOMDDMAMap | state->fIterateOnly, &mapArgs, sizeof(mapArgs));
+               if (kIOReturnSuccess == ret)
+               {
+                   state->fLocalMapperPageAlloc = mapArgs.fAlloc;
+                   state->fLocalMapperPageCount = mapArgs.fAllocCount;
+                   state->fMapContig = true;
+               }
+               ret = kIOReturnSuccess;
+           }
+       }
+
+
        if (kIOReturnSuccess == ret)
            state->fPrepared = true;
     }
@@ -761,6 +800,20 @@ IODMACommand::complete(bool invalidateCache, bool synchronize)
                        op |= kWalkSyncIn;
                ret = walkAll(op);
        }
+       if (state->fLocalMapperPageAlloc)
+       {
+           if (state->fLocalMapper)
+           {
+               fMapper->iovmFreeDMACommand(this, atop_64(state->fLocalMapperPageAlloc), state->fLocalMapperPageCount);
+           }
+           else if (state->fLocalMapperPageCount)
+           {
+               fMapper->iovmFree(atop_64(state->fLocalMapperPageAlloc), state->fLocalMapperPageCount);
+           }
+           state->fLocalMapperPageAlloc = 0;
+           state->fLocalMapperPageCount = 0;
+       }
+
        state->fPrepared = false;
 
        if (IS_NONCOHERENT(fMappingOptions) && invalidateCache)
@@ -947,7 +1000,7 @@ IODMACommand::genIOVMSegments(uint32_t op,
        return kIOReturnBadArgument;
 
     IOMDDMAWalkSegmentArgs *state =
-       (IOMDDMAWalkSegmentArgs *) fState;
+       (IOMDDMAWalkSegmentArgs *)(void *) fState;
 
     UInt64 offset    = *offsetP + internalState->fPreparedOffset;
     UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength;
@@ -986,12 +1039,27 @@ IODMACommand::genIOVMSegments(uint32_t op,
            state->fOffset = offset;
            state->fLength = memLength - offset;
 
-           if (internalState->fMapContig && (kWalkClient & op))
+           if (internalState->fMapContig && internalState->fLocalMapperPageAlloc)
            {
-               ppnum_t pageNum = internalState->fLocalMapperPageAlloc;
-               state->fIOVMAddr = ptoa_64(pageNum) 
-                                           + offset - internalState->fPreparedOffset;
+               state->fIOVMAddr = internalState->fLocalMapperPageAlloc + offset;
                rtn = kIOReturnSuccess;
+#if 0
+               {
+                   uint64_t checkOffset;
+                   IOPhysicalLength segLen;
+                   for (checkOffset = 0; checkOffset < state->fLength; )
+                   {
+                       addr64_t phys = const_cast<IOMemoryDescriptor *>(fMemory)->getPhysicalSegment(checkOffset + offset, &segLen, kIOMemoryMapperNone);
+                       if (fMapper->mapAddr(state->fIOVMAddr + checkOffset) != phys)
+                       {
+                           panic("%llx != %llx:%llx, %llx phys: %llx %llx\n", offset, 
+                                   state->fIOVMAddr + checkOffset, fMapper->mapAddr(state->fIOVMAddr + checkOffset), state->fLength, 
+                                   phys, checkOffset);
+                       }
+                       checkOffset += page_size - (phys & page_mask);
+                   }
+               }
+#endif
            }
            else
            {
@@ -1150,7 +1218,7 @@ IODMACommand::clientOutputSegment(
 
     if (target->fNumAddressBits && (target->fNumAddressBits < 64) 
        && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits)
-       && (target->reserved->fLocalMapperPageAlloc || !target->reserved->fLocalMapper))
+       && (target->reserved->fLocalMapperPageAlloc || !target->fMapper))
     {
        DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
        ret = kIOReturnMessageTooLarge;
index 37c0bc7eaf8d3e5b66383f2e1e5257c2d8eaf56c..5f19ecd15dd90c3517ff388a5b6ce52ed1db1f26 100644 (file)
@@ -173,6 +173,7 @@ to restrict I/O ops.
 #include <machine/pal_hibernate.h>
 
 extern "C" addr64_t            kvtophys(vm_offset_t va);
+extern "C" ppnum_t             pmap_find_phys(pmap_t pmap, addr64_t va);
 
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
@@ -1109,7 +1110,7 @@ if (vars->position & (vars->blockSize - 1)) HIBLOG("misaligned file pos %qx\n",
 }
 
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-               
+
 IOReturn
 IOHibernateSystemSleep(void)
 {
@@ -1727,7 +1728,7 @@ IOHibernateSystemWake(void)
 static IOReturn
 IOHibernateDone(IOHibernateVars * vars)
 {
-    hibernate_teardown(vars->page_list, vars->page_list_wired);
+    hibernate_teardown(vars->page_list, vars->page_list_wired, vars->page_list_pal);
 
     if (vars->videoMapping)
     {
@@ -1811,43 +1812,46 @@ IOHibernateDone(IOHibernateVars * vars)
     if (vars->ioBuffer)
        vars->ioBuffer->release();
     bzero(&gIOHibernateHandoffPages[0], gIOHibernateHandoffPageCount * sizeof(gIOHibernateHandoffPages[0]));
-    if (vars->handoffBuffer && (kIOHibernateStateWakingFromHibernate == gIOHibernateState))
+    if (vars->handoffBuffer)
     {
-       IOHibernateHandoff * handoff;
-       bool done = false;
-       for (handoff = (IOHibernateHandoff *) vars->handoffBuffer->getBytesNoCopy();
-            !done;
-            handoff = (IOHibernateHandoff *) &handoff->data[handoff->bytecount])
+       if (kIOHibernateStateWakingFromHibernate == gIOHibernateState)
        {
-           HIBPRINT("handoff %p, %x, %x\n", handoff, handoff->type, handoff->bytecount);
-           uint8_t * data = &handoff->data[0];
-           switch (handoff->type)
+           IOHibernateHandoff * handoff;
+           bool done = false;
+           for (handoff = (IOHibernateHandoff *) vars->handoffBuffer->getBytesNoCopy();
+                !done;
+                handoff = (IOHibernateHandoff *) &handoff->data[handoff->bytecount])
            {
-               case kIOHibernateHandoffTypeEnd:
-                   done = true;
-                   break;
+               HIBPRINT("handoff %p, %x, %x\n", handoff, handoff->type, handoff->bytecount);
+               uint8_t * data = &handoff->data[0];
+               switch (handoff->type)
+               {
+                   case kIOHibernateHandoffTypeEnd:
+                       done = true;
+                       break;
 
-           case kIOHibernateHandoffTypeDeviceTree:
-                   MergeDeviceTree((DeviceTreeNode *) data, IOService::getServiceRoot());
-                   break;
-    
-               case kIOHibernateHandoffTypeKeyStore:
+                   case kIOHibernateHandoffTypeDeviceTree:
+                       MergeDeviceTree((DeviceTreeNode *) data, IOService::getServiceRoot());
+                       break;
+       
+                   case kIOHibernateHandoffTypeKeyStore:
 #if defined(__i386__) || defined(__x86_64__)
-                   {
-                       IOBufferMemoryDescriptor *
-                       md = IOBufferMemoryDescriptor::withBytes(data, handoff->bytecount, kIODirectionOutIn);
-                       if (md)
                        {
-                           IOSetKeyStoreData(md);
+                           IOBufferMemoryDescriptor *
+                           md = IOBufferMemoryDescriptor::withBytes(data, handoff->bytecount, kIODirectionOutIn);
+                           if (md)
+                           {
+                               IOSetKeyStoreData(md);
+                           }
                        }
-                   }
 #endif
-                   break;
-    
-               default:
-                   done = (kIOHibernateHandoffType != (handoff->type & 0xFFFF0000));
-                   break;
-           }    
+                       break;
+       
+                   default:
+                       done = (kIOHibernateHandoffType != (handoff->type & 0xFFFF0000));
+                       break;
+               }    
+           }
        }
        vars->handoffBuffer->release();
     }
@@ -2889,3 +2893,51 @@ hibernate_machine_init(void)
 }
 
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+void IOHibernateSystemRestart(void)
+{
+    static uint8_t    noteStore[32] __attribute__((aligned(32)));
+    IORegistryEntry * regEntry;
+    const OSSymbol *  sym;
+    OSData *          noteProp;
+    OSData *          data;
+    uintptr_t *       smcVars;
+    uint8_t *         smcBytes;
+    size_t            len;
+    addr64_t          element;
+
+    data = OSDynamicCast(OSData, IOService::getPMRootDomain()->getProperty(kIOHibernateSMCVariablesKey));
+    if (!data) return;
+
+    smcVars = (typeof(smcVars)) data->getBytesNoCopy();
+    smcBytes = (typeof(smcBytes)) smcVars[1];
+    len = smcVars[0];
+    if (len > sizeof(noteStore)) len = sizeof(noteStore);
+    noteProp = OSData::withCapacity(3 * sizeof(element));
+    if (!noteProp) return;
+    element = len;
+    noteProp->appendBytes(&element, sizeof(element));
+    element = crc32(0, smcBytes, len);
+    noteProp->appendBytes(&element, sizeof(element));
+
+    bcopy(smcBytes, noteStore, len);
+    element = (addr64_t) &noteStore[0];
+    element = (element & page_mask) | ptoa_64(pmap_find_phys(kernel_pmap, element));
+    noteProp->appendBytes(&element, sizeof(element));
+
+    if (!gIOOptionsEntry)
+    {
+       regEntry = IORegistryEntry::fromPath("/options", gIODTPlane);
+       gIOOptionsEntry = OSDynamicCast(IODTNVRAM, regEntry);
+       if (regEntry && !gIOOptionsEntry)
+           regEntry->release();
+    }
+
+    sym = OSSymbol::withCStringNoCopy(kIOHibernateBootNoteKey);
+    if (gIOOptionsEntry && sym) gIOOptionsEntry->setProperty(sym, noteProp);
+    if (noteProp)               noteProp->release();
+    if (sym)                    sym->release();
+}
+
+
+
index 08ecc626d33d3d950c387ff12a551edd0fb0ded7..6782ec9fe7152d173f7b2a1d0824a639728e2e61 100644 (file)
@@ -167,19 +167,20 @@ void IOInterruptEventSource::disable()
 
 void IOInterruptEventSource::setWorkLoop(IOWorkLoop *inWorkLoop)
 {
-    super::setWorkLoop(inWorkLoop);
-
-    if (!provider)
-       return;
-
-    if ( !inWorkLoop ) {
-       if (intIndex >= 0) {
-           provider->unregisterInterrupt(intIndex);
+    if (inWorkLoop) super::setWorkLoop(inWorkLoop);
+
+    if (provider) {
+       if (!inWorkLoop) {
+           if (intIndex >= 0) {
+               provider->unregisterInterrupt(intIndex);
+               intIndex = ~intIndex;
+           }
+       } else if ((intIndex < 0) && (kIOReturnSuccess == registerInterruptHandler(provider, ~intIndex))) {
            intIndex = ~intIndex;
        }
-    } else if ((intIndex < 0) && (kIOReturnSuccess == registerInterruptHandler(provider, ~intIndex))) {
-       intIndex = ~intIndex;
     }
+
+    if (!inWorkLoop) super::setWorkLoop(inWorkLoop);
 }
 
 const IOService *IOInterruptEventSource::getProvider() const
index 27c55e7c4928621842d921ece1bd38b43ab352f8..fc4f31b5f9111db497686c814c7edaf27918556f 100644 (file)
@@ -98,6 +98,9 @@ extern ppnum_t IOGetLastPageNumber(void);
 
 extern ppnum_t gIOLastPage;
 
+extern IOSimpleLock * gIOPageAllocLock;
+extern queue_head_t   gIOPageAllocList;
+
 /* Physical to physical copy (ints must be disabled) */
 extern void bcopy_phys(addr64_t from, addr64_t to, vm_size_t size);
 
@@ -105,12 +108,16 @@ __END_DECLS
 
 // Used for dedicated communications for IODMACommand
 enum  {
-    kIOMDWalkSegments         = 0x00000001,
-    kIOMDFirstSegment        = 0x00000002 | kIOMDWalkSegments,
-    kIOMDGetCharacteristics   = 0x00000004,
-    kIOMDSetDMAActive         = 0x00000005,
-    kIOMDSetDMAInactive       = 0x00000006,
-    kIOMDLastDMACommandOperation
+    kIOMDWalkSegments             = 0x01000000,
+    kIOMDFirstSegment            = 1 | kIOMDWalkSegments,
+    kIOMDGetCharacteristics       = 0x02000000,
+    kIOMDGetCharacteristicsMapped = 1 | kIOMDGetCharacteristics,
+    kIOMDDMAActive                = 0x03000000,
+    kIOMDSetDMAActive             = 1 | kIOMDDMAActive,
+    kIOMDSetDMAInactive           = kIOMDDMAActive,
+    kIOMDAddDMAMapSpec            = 0x04000000,
+    kIOMDDMAMap                   = 0x05000000,
+    kIOMDDMACommandOperationMask  = 0xFF000000,
 };
 struct IOMDDMACharacteristics {
     UInt64 fLength;
@@ -119,7 +126,7 @@ struct IOMDDMACharacteristics {
     UInt32 fPageAlign;
     ppnum_t fHighestPage;
     IODirection fDirection;
-    UInt8 fIsMapped, fIsPrepared;
+    UInt8 fIsPrepared;
 };
 struct IOMDDMAWalkSegmentArgs {
     UInt64 fOffset;                    // Input/Output offset
@@ -128,6 +135,15 @@ struct IOMDDMAWalkSegmentArgs {
 };
 typedef UInt8 IOMDDMAWalkSegmentState[128];
 
+struct IOMDDMAMapArgs {
+    IOMapper *            fMapper;
+    IODMAMapSpecification fMapSpec;
+    uint64_t              fOffset;
+    uint64_t              fLength;
+    uint64_t              fAlloc;
+    ppnum_t               fAllocCount;
+};
+
 struct IODMACommandInternal
 {
     IOMDDMAWalkSegmentState fState;
@@ -154,11 +170,13 @@ struct IODMACommandInternal
 
     ppnum_t  fCopyPageCount;
 
-    ppnum_t  fLocalMapperPageAlloc;
+    addr64_t  fLocalMapperPageAlloc;
     ppnum_t  fLocalMapperPageCount;
 
     class IOBufferMemoryDescriptor * fCopyMD;
 
+    IOService * fDevice;
+
     // IODMAEventSource use
     IOReturn fStatus;
     UInt64   fActualByteCount;
index 23b0db10e25b21e54afd1e93f5b65a83fe473d4c..6abcb5d1aef0ae53bb18efd85dfa7ab5d445cec6 100644 (file)
@@ -29,6 +29,7 @@
 #include <IOKit/IOMapper.h>
 #include <IOKit/IODMACommand.h>
 #include <libkern/c++/OSData.h>
+#include <libkern/OSDebug.h>
 
 __BEGIN_DECLS
 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
@@ -40,7 +41,7 @@ OSDefineMetaClassAndAbstractStructors(IOMapper, IOService);
 OSMetaClassDefineReservedUsed(IOMapper, 0);
 OSMetaClassDefineReservedUsed(IOMapper, 1);
 OSMetaClassDefineReservedUsed(IOMapper, 2);
-OSMetaClassDefineReservedUnused(IOMapper, 3);
+OSMetaClassDefineReservedUsed(IOMapper, 3);
 OSMetaClassDefineReservedUnused(IOMapper, 4);
 OSMetaClassDefineReservedUnused(IOMapper, 5);
 OSMetaClassDefineReservedUnused(IOMapper, 6);
@@ -132,7 +133,10 @@ void IOMapper::waitForSystemMapper()
 {
     sMapperLock.lock();
     while ((uintptr_t) IOMapper::gSystem & kWaitMask)
+    {
+               OSReportWithBacktrace("waitForSystemMapper");
         sMapperLock.sleep(&IOMapper::gSystem);
+    }
     sMapperLock.unlock();
 }
 
@@ -165,7 +169,7 @@ IOMapper * IOMapper::copyMapperForDevice(IOService * device)
 
 ppnum_t IOMapper::iovmAllocDMACommand(IODMACommand * command, IOItemCount pageCount)
 {
-       return (0);
+    return (0);
 }
 
 void IOMapper::iovmFreeDMACommand(IODMACommand * command,
@@ -173,6 +177,17 @@ void IOMapper::iovmFreeDMACommand(IODMACommand * command,
 {
 }
 
+ppnum_t IOMapper::iovmMapMemory(
+                         OSObject                    * memory,   // dma command or iomd
+                         ppnum_t                       offsetPage,
+                         ppnum_t                       pageCount,
+                         uint32_t                      options,
+                         upl_page_info_t             * pageList,
+                         const IODMAMapSpecification * mapSpecification)
+{
+    return (0);
+}
+
 void IOMapper::iovmInsert(ppnum_t addr, IOItemCount offset,
                             ppnum_t *pageList, IOItemCount pageCount)
 {
@@ -249,6 +264,7 @@ void IOMapperIOVMFree(ppnum_t addr, unsigned pages)
 ppnum_t IOMapperInsertPage(ppnum_t addr, unsigned offset, ppnum_t page)
 {
     if (IOMapper::gSystem) {
+               if (!addr) panic("!addr");
         IOMapper::gSystem->iovmInsert(addr, (IOItemCount) offset, page);
         return addr + offset;
     }
index fe8f9f2713ace17a8fa1b95e6873a0fe2eb42d1c..aa24637cb4485a632b6f6a1c43da37c13e2a6909 100644 (file)
@@ -40,6 +40,7 @@
 #include <IOKit/IOLib.h>
 #include <IOKit/IOMemoryDescriptor.h>
 #include <IOKit/IOMapper.h>
+#include <IOKit/IODMACommand.h>
 #include <IOKit/IOKitKeysPrivate.h>
 
 #ifndef __LP64__
@@ -89,6 +90,8 @@ __END_DECLS
 
 #define kIOMaximumMappedIOByteCount    (512*1024*1024)
 
+#define kIOMapperWaitSystem    ((IOMapper *) 1)
+
 static IOMapper * gIOSystemMapper = NULL;
 
 static ppnum_t   gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
@@ -141,13 +144,16 @@ struct ioPLBlock {
     upl_t fIOPL;
     vm_address_t fPageInfo;   // Pointer to page list or index into it
     uint32_t fIOMDOffset;          // The offset of this iopl in descriptor
-    ppnum_t fMappedBase;           // Page number of first page in this iopl
+    ppnum_t fMappedPage;           // Page number of first page in this iopl
     unsigned int fPageOffset;      // Offset within first page of iopl
     unsigned int fFlags;           // Flags
 };
 
 struct ioGMDData {
-    IOMapper *fMapper;
+    IOMapper *  fMapper;
+    uint8_t    fDMAMapNumAddressBits;
+    uint64_t    fDMAMapAlignment;
+    addr64_t    fMappedBase;
     uint64_t fPreparationID;
     unsigned int fPageCnt;
 #if __LP64__
@@ -159,7 +165,7 @@ struct ioGMDData {
 };
 
 #define getDataP(osd)  ((ioGMDData *) (osd)->getBytesNoCopy())
-#define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
+#define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
 #define getNumIOPL(osd, d)     \
     (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
 #define getPageList(d) (&(d->fPageList[0]))
@@ -693,6 +699,7 @@ IOGeneralMemoryDescriptor::initWithOptions(void *   buffers,
     }
 
     // Grab the appropriate mapper
+    if (kIOMemoryHostOnly & options) options |= kIOMemoryMapperNone;
     if (kIOMemoryMapperNone & options)
         mapper = 0;    // No Mapper
     else if (mapper == kIOMapperSystem) {
@@ -735,17 +742,8 @@ IOGeneralMemoryDescriptor::initWithOptions(void *  buffers,
         ioGMDData *dataP;
         unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
 
-        if (!_memoryEntries) {
-            _memoryEntries = OSData::withCapacity(dataSize);
-            if (!_memoryEntries)
-                return false;
-        }
-        else if (!_memoryEntries->initWithCapacity(dataSize))
-            return false;
-
-        _memoryEntries->appendBytes(0, computeDataSize(0, 0));
+        if (!initMemoryEntries(dataSize, mapper)) return (false);
         dataP = getDataP(_memoryEntries);
-        dataP->fMapper = mapper;
         dataP->fPageCnt = 0;
 
  //       _wireCount++;        // UPLs start out life wired
@@ -761,28 +759,19 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
        if (upl_get_size(iopl.fIOPL) < (count + offset))
            panic("short external upl");
 
-        // Set the flag kIOPLOnDevice convieniently equal to 1
-        iopl.fFlags  = pageList->device | kIOPLExternUPL;
-        iopl.fIOMDOffset = 0;
-
         _highestPage = upl_get_highest_page(iopl.fIOPL);
 
+        // Set the flag kIOPLOnDevice convieniently equal to 1
+        iopl.fFlags  = pageList->device | kIOPLExternUPL;
         if (!pageList->device) {
             // Pre-compute the offset into the UPL's page list
             pageList = &pageList[atop_32(offset)];
             offset &= PAGE_MASK;
-            if (mapper) {
-                iopl.fMappedBase = mapper->iovmAlloc(_pages);
-                mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages);
-            }
-           else
-               iopl.fMappedBase = 0;
         }
-       else
-           iopl.fMappedBase = 0;
+        iopl.fIOMDOffset = 0;
+        iopl.fMappedPage = 0;
         iopl.fPageInfo = (vm_address_t) pageList;
         iopl.fPageOffset = offset;
-
         _memoryEntries->appendBytes(&iopl, sizeof(iopl));
     }
     else {
@@ -886,17 +875,8 @@ IOGeneralMemoryDescriptor::initWithOptions(void *  buffers,
             ioGMDData *dataP;
             unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
 
-            if (!_memoryEntries) {
-                _memoryEntries = OSData::withCapacity(dataSize);
-                if (!_memoryEntries)
-                   return false;
-            }
-            else if (!_memoryEntries->initWithCapacity(dataSize))
-                return false;
-    
-            _memoryEntries->appendBytes(0, computeDataSize(0, 0));
+            if (!initMemoryEntries(dataSize, mapper)) return false;
             dataP = getDataP(_memoryEntries);
-            dataP->fMapper = mapper;
             dataP->fPageCnt = _pages;
 
            if ( (kIOMemoryPersistent & _flags) && !_memEntry)
@@ -1146,7 +1126,8 @@ IOGeneralMemoryDescriptor::getPreparationID( void )
     if (!_wireCount)
        return (kIOPreparationIDUnprepared);
 
-    if (_flags & (kIOMemoryTypePhysical | kIOMemoryTypePhysical64))
+    if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
+      || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64))
     {
         IOMemoryDescriptor::setPreparationID();
         return (IOMemoryDescriptor::getPreparationID());
@@ -1195,6 +1176,81 @@ uint64_t IOMemoryDescriptor::getPreparationID( void )
 
 IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
 {
+    IOReturn err = kIOReturnSuccess;
+    DMACommandOps params;
+    IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
+    ioGMDData *dataP;
+
+    params = (op & ~kIOMDDMACommandOperationMask & op);
+    op &= kIOMDDMACommandOperationMask;
+
+    if (kIOMDDMAMap == op)
+    {
+       if (dataSize < sizeof(IOMDDMAMapArgs))
+           return kIOReturnUnderrun;
+
+       IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
+
+       if (!_memoryEntries 
+           && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
+
+       if (_memoryEntries && data->fMapper)
+       {
+           bool remap = false;
+           bool whole = ((data->fOffset == 0) && (data->fLength == _length));
+           dataP = getDataP(_memoryEntries);
+           if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits)
+           {
+               dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
+               remap = ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
+           }
+           if (data->fMapSpec.alignment > dataP->fDMAMapAlignment)
+           {
+               dataP->fDMAMapAlignment = data->fMapSpec.alignment;
+               remap |= (dataP->fDMAMapAlignment > page_size);
+           }
+           remap |= (!whole);
+           if (remap || !dataP->fMappedBase)
+           {
+//             if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
+               err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
+               if ((kIOReturnSuccess == err) && whole && !dataP->fMappedBase)
+               {
+                   dataP->fMappedBase = data->fAlloc;
+                   data->fAllocCount = 0;                      // IOMD owns the alloc now
+               }
+           }
+           else
+           {
+               data->fAlloc = dataP->fMappedBase;
+               data->fAllocCount = 0;                          // IOMD owns the alloc
+           }
+       }
+
+       return (err);                           
+    }
+
+    if (kIOMDAddDMAMapSpec == op)
+    {
+       if (dataSize < sizeof(IODMAMapSpecification))
+           return kIOReturnUnderrun;
+
+       IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
+
+       if (!_memoryEntries 
+           && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
+
+       if (_memoryEntries)
+       {
+           dataP = getDataP(_memoryEntries);
+           if (data->numAddressBits < dataP->fDMAMapNumAddressBits)
+               dataP->fDMAMapNumAddressBits = data->numAddressBits;
+           if (data->alignment > dataP->fDMAMapAlignment)
+               dataP->fDMAMapAlignment = data->alignment;
+       }
+       return kIOReturnSuccess;
+    }
+
     if (kIOMDGetCharacteristics == op) {
 
        if (dataSize < sizeof(IOMDDMACharacteristics))
@@ -1210,35 +1266,30 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *
        else {
            data->fIsPrepared = true;
            data->fHighestPage = _highestPage;
-           if (_memoryEntries) {
-               ioGMDData *gmdData = getDataP(_memoryEntries);
-               ioPLBlock *ioplList = getIOPLList(gmdData);
-               UInt count = getNumIOPL(_memoryEntries, gmdData);
-
-               data->fIsMapped = (gmdData->fMapper && _pages && (count > 0)
-                              && ioplList[0].fMappedBase);
+           if (_memoryEntries)
+           {
+               dataP = getDataP(_memoryEntries);
+               ioPLBlock *ioplList = getIOPLList(dataP);
+               UInt count = getNumIOPL(_memoryEntries, dataP);
                if (count == 1)
                    data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
            }
-           else
-               data->fIsMapped = false;
        }
 
        return kIOReturnSuccess;
 
 #if IOMD_DEBUG_DMAACTIVE
-    } else if (kIOMDSetDMAActive == op) {
-       IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
-       OSIncrementAtomic(&md->__iomd_reservedA);
-    } else if (kIOMDSetDMAInactive == op) {
-       IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
-       if (md->__iomd_reservedA)
-           OSDecrementAtomic(&md->__iomd_reservedA);
-       else
-           panic("kIOMDSetDMAInactive");
+    } else if (kIOMDDMAActive == op) {
+       if (params) OSIncrementAtomic(&md->__iomd_reservedA);
+       else {
+           if (md->__iomd_reservedA)
+               OSDecrementAtomic(&md->__iomd_reservedA);
+           else
+               panic("kIOMDSetDMAInactive");
+       }
 #endif /* IOMD_DEBUG_DMAACTIVE */
 
-    } else if (!(kIOMDWalkSegments & op))
+    } else if (kIOMDWalkSegments != op)
        return kIOReturnBadArgument;
 
     // Get the next segment
@@ -1257,12 +1308,32 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *
     UInt offset = isP->fIO.fOffset;
     bool mapped = isP->fIO.fMapped;
 
+    if (IOMapper::gSystem && mapped
+        && (!(kIOMemoryHostOnly & _flags))
+       && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBase))
+//     && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBase))
+    {
+       if (!_memoryEntries 
+           && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
+
+       dataP = getDataP(_memoryEntries);
+       if (dataP->fMapper)
+       {
+           IODMAMapSpecification mapSpec;
+           bzero(&mapSpec, sizeof(mapSpec));
+           mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
+           mapSpec.alignment = dataP->fDMAMapAlignment;
+           err = md->dmaMap(dataP->fMapper, &mapSpec, 0, _length, &dataP->fMappedBase, NULL);
+           if (kIOReturnSuccess != err) return (err);
+       }
+    }
+
     if (offset >= _length)
        return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
 
     // Validate the previous offset
     UInt ind, off2Ind = isP->fOffset2Index;
-    if ((kIOMDFirstSegment != op) 
+    if (!params
        && offset 
        && (offset == isP->fNextOffset || off2Ind <= offset))
        ind = isP->fIndex;
@@ -1271,6 +1342,8 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *
 
     UInt length;
     UInt64 address;
+
+
     if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
 
        // Physical address based memory descriptor
@@ -1287,12 +1360,20 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *
        length   = off2Ind - offset;
        address  = physP[ind - 1].address + len - length;
 
-       // see how far we can coalesce ranges
-       while (ind < _rangesCount && address + length == physP[ind].address) {
-           len = physP[ind].length;
-           length += len;
-           off2Ind += len;
-           ind++;
+       if (true && mapped && _memoryEntries 
+               && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
+       {
+           address = dataP->fMappedBase + offset;
+       }
+       else
+       {
+           // see how far we can coalesce ranges
+           while (ind < _rangesCount && address + length == physP[ind].address) {
+               len = physP[ind].length;
+               length += len;
+               off2Ind += len;
+               ind++;
+           }
        }
 
        // correct contiguous check overshoot
@@ -1316,18 +1397,25 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *
        length   = off2Ind - offset;
        address  = physP[ind - 1].address + len - length;
 
-       // see how far we can coalesce ranges
-       while (ind < _rangesCount && address + length == physP[ind].address) {
-           len = physP[ind].length;
-           length += len;
-           off2Ind += len;
-           ind++;
+       if (true && mapped && _memoryEntries 
+               && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
+       {
+           address = dataP->fMappedBase + offset;
+       }
+       else
+       {
+           // see how far we can coalesce ranges
+           while (ind < _rangesCount && address + length == physP[ind].address) {
+               len = physP[ind].length;
+               length += len;
+               off2Ind += len;
+               ind++;
+           }
        }
-
        // correct contiguous check overshoot
        ind--;
        off2Ind -= len;
-    }
+    } 
 #endif /* !__LP64__ */
     else do {
        if (!_wireCount)
@@ -1335,7 +1423,7 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *
 
        assert(_memoryEntries);
 
-       ioGMDData * dataP = getDataP(_memoryEntries);
+       dataP = getDataP(_memoryEntries);
        const ioPLBlock *ioplList = getIOPLList(dataP);
        UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
        upl_page_info_t *pageList = getPageList(dataP);
@@ -1361,9 +1449,9 @@ IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *
 
        // If a mapped address is requested and this is a pre-mapped IOPL
        // then just need to compute an offset relative to the mapped base.
-       if (mapped && ioplInfo.fMappedBase) {
+       if (mapped && dataP->fMappedBase) {
            offset += (ioplInfo.fPageOffset & PAGE_MASK);
-           address = ptoa_64(ioplInfo.fMappedBase) + offset;
+           address = trunc_page_64(dataP->fMappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
            continue;   // Done leave do/while(false) now
        }
 
@@ -1480,11 +1568,11 @@ IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *l
     else
     {
        IOMDDMAWalkSegmentState _state;
-       IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state;
+       IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
 
        state->fOffset = offset;
        state->fLength = _length - offset;
-       state->fMapped = (0 == (options & kIOMemoryMapperNone));
+       state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOnly);
 
        ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
 
@@ -1516,12 +1604,6 @@ IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *l
                if (length > origLen)
                    length = origLen;
            }
-#ifdef __LP64__
-           else if (!(options & kIOMemoryMapperNone) && (_flags & kIOMemoryMapperNone))
-           {
-               panic("getPhysicalSegment not mapped for I/O");
-           }
-#endif /* __LP64__ */
        }
     }
 
@@ -1645,6 +1727,13 @@ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
 IOReturn 
 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
 {
+    IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
+    DMACommandOps params;
+    IOReturn err;
+
+    params = (op & ~kIOMDDMACommandOperationMask & op);
+    op &= kIOMDDMACommandOperationMask;
+
     if (kIOMDGetCharacteristics == op) {
        if (dataSize < sizeof(IOMDDMACharacteristics))
            return kIOReturnUnderrun;
@@ -1653,11 +1742,9 @@ IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt data
        data->fLength = getLength();
        data->fSGCount = 0;
        data->fDirection = getDirection();
-       if (IOMapper::gSystem)
-           data->fIsMapped = true;
        data->fIsPrepared = true;       // Assume prepared - fails safe
     }
-    else if (kIOMDWalkSegments & op) {
+    else if (kIOMDWalkSegments == op) {
        if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
            return kIOReturnUnderrun;
 
@@ -1665,15 +1752,25 @@ IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt data
        IOByteCount offset  = (IOByteCount) data->fOffset;
 
        IOPhysicalLength length;
-       IOMemoryDescriptor *ncmd = const_cast<IOMemoryDescriptor *>(this);
        if (data->fMapped && IOMapper::gSystem)
-           data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length);
+           data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
        else
-           data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
+           data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
        data->fLength = length;
     }
-    else
-       return kIOReturnBadArgument;
+    else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported;
+    else if (kIOMDDMAMap == op)
+    {
+       if (dataSize < sizeof(IOMDDMAMapArgs))
+           return kIOReturnUnderrun;
+       IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
+
+       if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
+
+       err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
+       return (err);                           
+    }
+    else return kIOReturnBadArgument;
 
     return kIOReturnSuccess;
 }
@@ -1960,8 +2057,8 @@ IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
     IOOptionBits type = _flags & kIOMemoryTypeMask;
     IOReturn error = kIOReturnCannotWire;
     ioGMDData *dataP;
+    upl_page_info_array_t pageInfo;
     ppnum_t mapBase = 0;
-    IOMapper *mapper;
     ipc_port_t sharedMem = (ipc_port_t) _memEntry;
 
     assert(!_wireCount);
@@ -1971,14 +2068,9 @@ IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
        return kIOReturnNoResources;
 
     dataP = getDataP(_memoryEntries);
+    IOMapper *mapper;
     mapper = dataP->fMapper;
-    if (mapper && _pages)
-        mapBase = mapper->iovmAlloc(_pages);
-
-    // Note that appendBytes(NULL) zeros the data up to the
-    // desired length.
-    _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
-    dataP = 0; // May no longer be valid so lets not get tempted.
+    dataP->fMappedBase = 0;
 
     if (forDirection == kIODirectionNone)
         forDirection = getDirection();
@@ -2001,9 +2093,16 @@ IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
 
 #ifdef UPL_NEED_32BIT_ADDR
     if (kIODirectionPrepareToPhys32 & forDirection) 
-       uplFlags |= UPL_NEED_32BIT_ADDR;
+    {
+       if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR;
+       if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32;
+    }
 #endif
 
+    // Note that appendBytes(NULL) zeros the data up to the desired length.
+    _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
+    dataP = 0;
+
     // Find the appropriate vm_map for the given task
     vm_map_t curMap;
     if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
@@ -2016,6 +2115,7 @@ IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
     unsigned int pageIndex = 0;
     IOByteCount mdOffset = 0;
     ppnum_t highestPage = 0;
+
     for (UInt range = 0; range < _rangesCount; range++) {
         ioPLBlock iopl;
        user_addr_t startPage;
@@ -2029,13 +2129,12 @@ IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
        startPage = trunc_page_64(startPage);
 
        if (mapper)
-           iopl.fMappedBase = mapBase + pageIndex;
+           iopl.fMappedPage = mapBase + pageIndex;
        else
-           iopl.fMappedBase = 0;
+           iopl.fMappedPage = 0;
 
        // Iterate over the current range, creating UPLs
         while (numBytes) {
-            dataP = getDataP(_memoryEntries);
            vm_address_t kernelStart = (vm_address_t) startPage;
             vm_map_t theMap;
            if (curMap)
@@ -2047,8 +2146,9 @@ IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
            else
                theMap = NULL;
 
-            upl_page_info_array_t pageInfo = getPageList(dataP);
             int ioplFlags = uplFlags;
+           dataP = getDataP(_memoryEntries);
+           pageInfo = getPageList(dataP);
             upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
 
             vm_size_t ioplSize = round_page(numBytes);
@@ -2097,18 +2197,9 @@ IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
             if (baseInfo->device) {
                 numPageInfo = 1;
                 iopl.fFlags  = kIOPLOnDevice;
-                // Don't translate device memory at all 
-               if (mapper && mapBase) {
-                   mapper->iovmFree(mapBase, _pages);
-                   mapBase = 0;
-                   iopl.fMappedBase = 0;
-               }
             }
             else {
                 iopl.fFlags = 0;
-               if (mapper)
-                    mapper->iovmInsert(mapBase, pageIndex,
-                                       baseInfo, numPageInfo);
             }
 
             iopl.fIOMDOffset = mdOffset;
@@ -2133,6 +2224,7 @@ IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
                 }
                 goto abortExit;
             }
+           dataP = 0;
 
             // Check for a multiple iopl's in one virtual range
             pageIndex += numPageInfo;
@@ -2142,8 +2234,7 @@ IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
                 startPage += ioplSize;
                 mdOffset += ioplSize;
                 iopl.fPageOffset = 0;
-               if (mapper)
-                   iopl.fMappedBase = mapBase + pageIndex;
+               if (mapper) iopl.fMappedPage = mapBase + pageIndex;
             }
             else {
                 mdOffset += numBytes;
@@ -2170,9 +2261,6 @@ abortExit:
            }
        }
        (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
-
-        if (mapper && mapBase)
-            mapper->iovmFree(mapBase, _pages);
     }
 
     if (error == KERN_FAILURE)
@@ -2181,6 +2269,168 @@ abortExit:
     return error;
 }
 
+bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
+{
+    ioGMDData * dataP;
+    unsigned    dataSize = size;
+
+    if (!_memoryEntries) {
+       _memoryEntries = OSData::withCapacity(dataSize);
+       if (!_memoryEntries)
+           return false;
+    }
+    else if (!_memoryEntries->initWithCapacity(dataSize))
+       return false;
+
+    _memoryEntries->appendBytes(0, computeDataSize(0, 0));
+    dataP = getDataP(_memoryEntries);
+
+    if (mapper == kIOMapperWaitSystem) {
+        IOMapper::checkForSystemMapper();
+        mapper = IOMapper::gSystem;
+    }
+    dataP->fMapper               = mapper;
+    dataP->fPageCnt              = 0;
+    dataP->fMappedBase           = 0;
+    dataP->fDMAMapNumAddressBits = 64;
+    dataP->fDMAMapAlignment      = 0;
+    dataP->fPreparationID        = kIOPreparationIDUnprepared;
+
+    return (true);
+}
+
+IOReturn IOMemoryDescriptor::dmaMap(
+    IOMapper                    * mapper,
+    const IODMAMapSpecification * mapSpec,
+    uint64_t                      offset,
+    uint64_t                      length,
+    uint64_t                    * address,
+    ppnum_t                     * mapPages)
+{
+    IOMDDMAWalkSegmentState  walkState;
+    IOMDDMAWalkSegmentArgs * walkArgs = (IOMDDMAWalkSegmentArgs *) (void *)&walkState;
+    IOOptionBits             mdOp;
+    IOReturn                 ret;
+    IOPhysicalLength         segLen;
+    addr64_t                 phys, align, pageOffset;
+    ppnum_t                  base, pageIndex, pageCount;
+    uint64_t                 index;
+    uint32_t                 mapOptions = 0;
+
+    if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
+
+    walkArgs->fMapped = false;
+    mdOp = kIOMDFirstSegment;
+    pageCount = 0;
+    for (index = 0; index < length; )
+    {
+       if (index && (page_mask & (index + pageOffset))) break;
+
+       walkArgs->fOffset = offset + index;
+       ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
+       mdOp = kIOMDWalkSegments;
+       if (ret != kIOReturnSuccess) break;
+       phys = walkArgs->fIOVMAddr;
+       segLen = walkArgs->fLength;
+
+       align = (phys & page_mask);
+       if (!index) pageOffset = align;
+       else if (align) break;
+       pageCount += atop_64(round_page_64(align + segLen));
+       index += segLen;
+    }
+
+    if (index < length) return (kIOReturnVMError);
+
+    base = mapper->iovmMapMemory(this, offset, pageCount, 
+                                mapOptions, NULL, mapSpec);
+
+    if (!base) return (kIOReturnNoResources);
+
+    mdOp = kIOMDFirstSegment;
+    for (pageIndex = 0, index = 0; index < length; )
+    {
+       walkArgs->fOffset = offset + index;
+       ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
+       mdOp = kIOMDWalkSegments;
+       if (ret != kIOReturnSuccess) break;
+       phys = walkArgs->fIOVMAddr;
+       segLen = walkArgs->fLength;
+
+       ppnum_t page = atop_64(phys);
+       ppnum_t count = atop_64(round_page_64(phys + segLen)) - page;
+       while (count--)
+       {
+           mapper->iovmInsert(base, pageIndex, page);
+           page++;
+           pageIndex++;
+       }
+       index += segLen;
+    }
+    if (pageIndex != pageCount) panic("pageIndex");
+
+    *address = ptoa_64(base) + pageOffset;
+    if (mapPages) *mapPages = pageCount;
+
+    return (kIOReturnSuccess);
+}
+
+IOReturn IOGeneralMemoryDescriptor::dmaMap(
+    IOMapper                    * mapper,
+    const IODMAMapSpecification * mapSpec,
+    uint64_t                      offset,
+    uint64_t                      length,
+    uint64_t                    * address,
+    ppnum_t                     * mapPages)
+{
+    IOReturn          err = kIOReturnSuccess;
+    ioGMDData *       dataP;
+    IOOptionBits      type = _flags & kIOMemoryTypeMask;
+
+    *address = 0;
+    if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess);
+
+    if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
+     || offset || (length != _length))
+    {
+       err = super::dmaMap(mapper, mapSpec, offset, length, address, mapPages);
+    }
+    else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries)))
+    {
+       const ioPLBlock * ioplList = getIOPLList(dataP);
+       upl_page_info_t * pageList;
+       uint32_t          mapOptions = 0;
+       ppnum_t           base;
+
+       IODMAMapSpecification mapSpec;
+       bzero(&mapSpec, sizeof(mapSpec));
+       mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
+       mapSpec.alignment = dataP->fDMAMapAlignment;
+
+       // For external UPLs the fPageInfo field points directly to
+       // the upl's upl_page_info_t array.
+       if (ioplList->fFlags & kIOPLExternUPL)
+       {
+           pageList = (upl_page_info_t *) ioplList->fPageInfo;
+           mapOptions |= kIODMAMapPagingPath;
+       }
+       else
+           pageList = getPageList(dataP);
+
+    if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
+
+       // Check for direct device non-paged memory
+       if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous;
+
+       base = mapper->iovmMapMemory(
+                       this, offset, _pages, mapOptions, &pageList[0], &mapSpec);
+       *address = ptoa_64(base) + (ioplList->fPageOffset & PAGE_MASK);
+       if (mapPages) *mapPages = _pages;
+    }
+
+    return (err);
+}
+
 /*
  * prepare
  *
@@ -2190,6 +2440,7 @@ abortExit:
  * the memory after the I/O transfer finishes.  This method needn't
  * called for non-pageable memory.
  */
+
 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
 {
     IOReturn error    = kIOReturnSuccess;
@@ -2207,14 +2458,14 @@ IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
     }
 
     if (kIOReturnSuccess == error)
-       _wireCount++;
-
-    if (1 == _wireCount)
     {
-        if (kIOMemoryClearEncrypt & _flags)
-        {
-            performOperation(kIOMemoryClearEncrypted, 0, _length);
-        }
+       if (1 == ++_wireCount)
+       {
+           if (kIOMemoryClearEncrypt & _flags)
+           {
+               performOperation(kIOMemoryClearEncrypted, 0, _length);
+           }
+       }
     }
 
     if (_prepareLock)
@@ -2263,9 +2514,10 @@ IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
            if (__iomd_reservedA) panic("complete() while dma active");
 #endif /* IOMD_DEBUG_DMAACTIVE */
 
-           if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
-               dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
-
+           if (dataP->fMappedBase) {
+               dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages);
+               dataP->fMappedBase = 0;
+            }
            // Only complete iopls that we created which are for TypeVirtual
            if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
                for (UInt ind = 0; ind < count; ind++)
@@ -3343,6 +3595,28 @@ IOMemoryMap * IOMemoryMap::copyCompatible(
     return( newMapping );
 }
 
+IOReturn IOMemoryMap::wireRange(
+       uint32_t                options,
+        mach_vm_size_t         offset,
+        mach_vm_size_t         length)
+{
+    IOReturn kr;
+    mach_vm_address_t start = trunc_page_64(fAddress + offset);
+    mach_vm_address_t end   = round_page_64(fAddress + offset + length);
+    
+    if (kIODirectionOutIn & options)
+    {
+       kr = vm_map_wire(fAddressMap, start, end, (kIODirectionOutIn & options), FALSE);
+    }
+    else
+    {
+       kr = vm_map_unwire(fAddressMap, start, end, FALSE);
+    }
+
+    return (kr);
+}
+
+
 IOPhysicalAddress 
 #ifdef __LP64__
 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
@@ -3378,6 +3652,9 @@ void IOMemoryDescriptor::initialize( void )
     IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
                                                    ptoa_64(gIOMaximumMappedIOPageCount), 64);
     gIOLastPage = IOGetLastPageNumber();
+
+    gIOPageAllocLock = IOSimpleLockAlloc();
+    queue_init(&gIOPageAllocList);
 }
 
 void IOMemoryDescriptor::free( void )
index e086a456d25c3b726b88ce233e2b538c4d0203f9..9e45734201ad70bebf9ff52e6dab1c3ca4ae25d5 100644 (file)
@@ -174,7 +174,8 @@ enum
     kAutoWakePostWindow = 15
 };
 
-#define kLocalEvalClamshellCommand        (1 << 15)
+#define kLocalEvalClamshellCommand  (1 << 15)
+#define kIdleSleepRetryInterval     (3 * 60)
 
 enum {
     OFF_STATE           = 0,
@@ -280,6 +281,12 @@ static uint32_t         gAggressivesState = 0;
 static uint32_t         gDarkWakeFlags = kDarkWakeFlagHIDTickleNone | kDarkWakeFlagIgnoreDiskIOAlways;
 static PMStatsStruct    gPMStats;
 
+#if HIBERNATION
+static IOPMSystemSleepPolicyHandler     gSleepPolicyHandler = 0;
+static IOPMSystemSleepPolicyVariables * gSleepPolicyVars = 0;
+static void *                           gSleepPolicyTarget;
+#endif
+
 struct timeval gIOLastSleepTime;
 struct timeval gIOLastWakeTime;
 
@@ -805,6 +812,7 @@ static SYSCTL_PROC(_kern, OID_AUTO, progressmeter,
 
 static SYSCTL_INT(_debug, OID_AUTO, darkwake, CTLFLAG_RW, &gDarkWakeFlags, 0, "");
 
+static const OSSymbol * gIOPMSettingAutoWakeCalendarKey;
 static const OSSymbol * gIOPMSettingAutoWakeSecondsKey;
 static const OSSymbol * gIOPMSettingDebugWakeRelativeKey;
 static const OSSymbol * gIOPMSettingMaintenanceWakeCalendarKey;
@@ -827,6 +835,7 @@ bool IOPMrootDomain::start( IOService * nub )
     super::start(nub);
 
     gRootDomain = this;
+    gIOPMSettingAutoWakeCalendarKey = OSSymbol::withCString(kIOPMSettingAutoWakeCalendarKey);
     gIOPMSettingAutoWakeSecondsKey = OSSymbol::withCString(kIOPMSettingAutoWakeSecondsKey);
     gIOPMSettingDebugWakeRelativeKey = OSSymbol::withCString(kIOPMSettingDebugWakeRelativeKey);
     gIOPMSettingMaintenanceWakeCalendarKey = OSSymbol::withCString(kIOPMSettingMaintenanceWakeCalendarKey);
@@ -845,7 +854,7 @@ bool IOPMrootDomain::start( IOService * nub )
             OSSymbol::withCString(kIOPMSettingSleepOnPowerButtonKey),
             gIOPMSettingAutoWakeSecondsKey,
             OSSymbol::withCString(kIOPMSettingAutoPowerSecondsKey),
-            OSSymbol::withCString(kIOPMSettingAutoWakeCalendarKey),
+            gIOPMSettingAutoWakeCalendarKey,
             OSSymbol::withCString(kIOPMSettingAutoPowerCalendarKey),
             gIOPMSettingDebugWakeRelativeKey,
             OSSymbol::withCString(kIOPMSettingDebugPowerRelativeKey),
@@ -1009,7 +1018,7 @@ bool IOPMrootDomain::start( IOService * nub )
         publishFeature("DisplayDims");
     }
     if(psIterator) {
-        psIterator->release();        
+        psIterator->release();
     }
     
     
@@ -1168,9 +1177,9 @@ IOReturn IOPMrootDomain::setProperties( OSObject * props_obj )
     OSBoolean       *b;
     OSNumber        *n;
     OSDictionary    *d;
-    OSSymbol        *type;
+    const OSSymbol  *key;
     OSObject        *obj;
-    unsigned int    i;
+    OSCollectionIterator * iter = 0;
 
     const OSSymbol *publish_simulated_battery_string    = OSSymbol::withCString("SoftwareSimulatedBatteries");
     const OSSymbol *boot_complete_string                = OSSymbol::withCString("System Boot Complete");
@@ -1192,163 +1201,178 @@ IOReturn IOPMrootDomain::setProperties( OSObject * props_obj )
     const OSSymbol *suspendPMClient_string              = OSSymbol::withCString(kPMSuspendedNotificationClients);
 #endif
     
-    if (!dict) 
+    if (!dict)
     {
         return_value = kIOReturnBadArgument;
         goto exit;
     }
-    
-    if ((b = OSDynamicCast(OSBoolean, dict->getObject(publish_simulated_battery_string))))
-    {
-        publishResource(publish_simulated_battery_string, kOSBooleanTrue);
-    }
 
-    if ((n = OSDynamicCast(OSNumber, dict->getObject(idle_seconds_string))))
+    iter = OSCollectionIterator::withCollection(dict);
+    if (!iter)
     {
-        setProperty(idle_seconds_string, n);
-        idleSeconds = n->unsigned32BitValue();
+        return_value = kIOReturnNoMemory;
+        goto exit;
     }
 
-    if (boot_complete_string && dict->getObject(boot_complete_string)) 
-    {
-        pmPowerStateQueue->submitPowerEvent( kPowerEventSystemBootCompleted );
-    }
-    
-    if( battery_warning_disabled_string && dict->getObject(battery_warning_disabled_string))
-    {
-        setProperty( battery_warning_disabled_string, dict->getObject(battery_warning_disabled_string));
-    }
-    
-    if (pmTimelineLogging_string && (d = OSDynamicCast(OSDictionary, dict->getObject(pmTimelineLogging_string))))
+    while ((key = (const OSSymbol *) iter->getNextObject()) &&
+           (obj = dict->getObject(key)))
     {
-        if (timeline && timeline->setProperties(d)) 
+        if (key->isEqualTo(publish_simulated_battery_string))
         {
-            OSDictionary *tlInfo = timeline->copyInfoDictionary();            
-            if (tlInfo) {
-                setProperty(kIOPMTimelineDictionaryKey, tlInfo);
-                tlInfo->release();
+            if (OSDynamicCast(OSBoolean, obj))
+                publishResource(key, kOSBooleanTrue);
+        }
+        else if (key->isEqualTo(idle_seconds_string))
+        {
+            if ((n = OSDynamicCast(OSNumber, obj)))
+            {
+                setProperty(key, n);
+                idleSeconds = n->unsigned32BitValue();
+            }
+        }
+        else if (key->isEqualTo(boot_complete_string))
+        {
+            pmPowerStateQueue->submitPowerEvent(kPowerEventSystemBootCompleted);
+        }
+        else if (key->isEqualTo(sys_shutdown_string))
+        {
+            if ((b = OSDynamicCast(OSBoolean, obj)))
+                pmPowerStateQueue->submitPowerEvent(kPowerEventSystemShutdown, (void *) b);
+        }
+        else if (key->isEqualTo(battery_warning_disabled_string))
+        {
+            setProperty(key, obj);
+        }
+        else if (key->isEqualTo(pmTimelineLogging_string))
+        {
+            if ((d = OSDynamicCast(OSDictionary, obj)) &&
+                timeline && timeline->setProperties(d))
+            {
+                OSDictionary *tlInfo = timeline->copyInfoDictionary();            
+                if (tlInfo) {
+                    setProperty(kIOPMTimelineDictionaryKey, tlInfo);
+                    tlInfo->release();
+                }
             }
         }
-    }
-
-    if( sys_shutdown_string && (b = OSDynamicCast(OSBoolean, dict->getObject(sys_shutdown_string)))) 
-    {
-        pmPowerStateQueue->submitPowerEvent(kPowerEventSystemShutdown, (void *) b);
-    }
-    
-    if( stall_halt_string && (b = OSDynamicCast(OSBoolean, dict->getObject(stall_halt_string))) ) 
-    {
-        setProperty(stall_halt_string, b);
-    }
-
 #if    HIBERNATION
-    if ( hibernatemode_string
-        && (n = OSDynamicCast(OSNumber, dict->getObject(hibernatemode_string))))
-    {
-       setProperty(hibernatemode_string, n);
-    }
-    if ( hibernatefreeratio_string
-        && (n = OSDynamicCast(OSNumber, dict->getObject(hibernatefreeratio_string))))
-    {
-        setProperty(hibernatefreeratio_string, n);
-    }
-    if ( hibernatefreetime_string
-        && (n = OSDynamicCast(OSNumber, dict->getObject(hibernatefreetime_string))))
-    {
-        setProperty(hibernatefreetime_string, n);
-    }    
-    OSString *str;
-    if ( hibernatefile_string
-        && (str = OSDynamicCast(OSString, dict->getObject(hibernatefile_string))))
-    {
-        setProperty(hibernatefile_string, str);
-    }
-#endif
-    
-    if( sleepdisabled_string
-        && (b = OSDynamicCast(OSBoolean, dict->getObject(sleepdisabled_string))) )
-    {
-        setProperty(sleepdisabled_string, b);
-        pmPowerStateQueue->submitPowerEvent(kPowerEventUserDisabledSleep, (void *) b);
-    }
-    if (ondeck_sleepwake_uuid_string
-        && (obj = dict->getObject(ondeck_sleepwake_uuid_string)))
-    {
-        if(pmPowerStateQueue) {
+        else if (key->isEqualTo(hibernatemode_string) ||
+                 key->isEqualTo(hibernatefreeratio_string) ||
+                 key->isEqualTo(hibernatefreetime_string))
+        {
+            if ((n = OSDynamicCast(OSNumber, obj)))
+                setProperty(key, n);
+        }
+        else if (key->isEqualTo(hibernatefile_string))
+        {
+            OSString * str = OSDynamicCast(OSString, obj);
+            if (str) setProperty(key, str);
+        }
+#endif    
+        else if (key->isEqualTo(sleepdisabled_string))
+        {
+            if ((b = OSDynamicCast(OSBoolean, obj)))
+            {
+                setProperty(key, b);
+                pmPowerStateQueue->submitPowerEvent(kPowerEventUserDisabledSleep, (void *) b);
+            }
+        }
+        else if (key->isEqualTo(ondeck_sleepwake_uuid_string))
+        {
             obj->retain();
             pmPowerStateQueue->submitPowerEvent(kPowerEventQueueSleepWakeUUID, (void *)obj);
         }
-
-    }
-    
-    if (loginwindow_tracepoint_string
-        && (n = OSDynamicCast(OSNumber, dict->getObject(loginwindow_tracepoint_string)))
-        && pmTracer)
-    {
-        pmTracer->traceLoginWindowPhase( n->unsigned8BitValue() );
-    }
-
-    if ((b = OSDynamicCast(OSBoolean, dict->getObject(kIOPMDeepSleepEnabledKey))))
-    {
-        setProperty(kIOPMDeepSleepEnabledKey, b);
-    }
-    if ((n = OSDynamicCast(OSNumber, dict->getObject(kIOPMDeepSleepDelayKey))))
-    {
-        setProperty(kIOPMDeepSleepDelayKey, n);
-    }
-
+        else if (key->isEqualTo(loginwindow_tracepoint_string))
+        {
+            if (pmTracer && (n = OSDynamicCast(OSNumber, obj)))
+                pmTracer->traceLoginWindowPhase(n->unsigned8BitValue());
+        }
+        else if (key->isEqualTo(kIOPMDeepSleepEnabledKey)       ||
+                 key->isEqualTo(kIOPMDestroyFVKeyOnStandbyKey)  ||
+                 key->isEqualTo(kIOPMAutoPowerOffEnabledKey)    ||
+                 key->isEqualTo(stall_halt_string))
+        {
+            if ((b = OSDynamicCast(OSBoolean, obj)))
+                setProperty(key, b);
+        }
+        else if (key->isEqualTo(kIOPMDeepSleepDelayKey) ||
+                 key->isEqualTo(kIOPMAutoPowerOffDelayKey))
+        {
+            if ((n = OSDynamicCast(OSNumber, obj)))
+                setProperty(key, n);
+        }
 #if SUSPEND_PM_NOTIFICATIONS_DEBUG
-    if ((n = OSDynamicCast(OSNumber, dict->getObject(suspendPMClient_string))))
-    {
-        // Toggle the suspended status for pid n.
-        uint32_t pid_int = n->unsigned32BitValue();        
-        suspendPMNotificationsForPID(pid_int, !pmNotificationIsSuspended(pid_int));
-    }
+        else if (key->isEqualTo(suspendPMClient_string))
+        {
+            if ((n = OSDynamicCast(OSNumber, obj)))
+            {
+                // Toggle the suspended status for pid n.
+                uint32_t pid_int = n->unsigned32BitValue();        
+                suspendPMNotificationsForPID(pid_int, !pmNotificationIsSuspended(pid_int));
+            }
+        }
 #endif
-    
-    if ((b = OSDynamicCast(OSBoolean, dict->getObject(kIOPMDestroyFVKeyOnStandbyKey))))
-    {
-        setProperty(kIOPMDestroyFVKeyOnStandbyKey, b);
-    }
-
-    // Relay our allowed PM settings onto our registered PM clients
-    for(i = 0; i < allowedPMSettings->getCount(); i++) {
-
-        type = (OSSymbol *)allowedPMSettings->getObject(i);
-        if(!type) continue;
+        // Relay our allowed PM settings onto our registered PM clients
+        else if ((allowedPMSettings->getNextIndexOfObject(key, 0) != (unsigned int) -1))
+        {
+            if ((gIOPMSettingAutoWakeSecondsKey == key) && ((n = OSDynamicCast(OSNumber, obj))))
+            {
+                UInt32 rsecs = n->unsigned32BitValue();
+                if (!rsecs)
+                autoWakeStart = autoWakeEnd = 0;
+                else
+                {
+                AbsoluteTime deadline;
+                clock_interval_to_deadline(rsecs + kAutoWakePostWindow, kSecondScale, &deadline);
+                autoWakeEnd = AbsoluteTime_to_scalar(&deadline);
+                if (rsecs > kAutoWakePreWindow)
+                    rsecs -= kAutoWakePreWindow;
+                else
+                    rsecs = 0;
+                clock_interval_to_deadline(rsecs, kSecondScale, &deadline);
+                autoWakeStart = AbsoluteTime_to_scalar(&deadline);
+                }
+            }
 
-        obj = dict->getObject(type);
-        if(!obj) continue;
+            return_value = setPMSetting(key, obj);            
+            if (kIOReturnSuccess != return_value)
+                break;
 
-        if ((gIOPMSettingAutoWakeSecondsKey == type) && ((n = OSDynamicCast(OSNumber, obj))))
-        {
-            UInt32 rsecs = n->unsigned32BitValue();
-            if (!rsecs)
-            autoWakeStart = autoWakeEnd = 0;
-            else
+            if (gIOPMSettingDebugWakeRelativeKey == key)
             {
-            AbsoluteTime deadline;
-            clock_interval_to_deadline(rsecs + kAutoWakePostWindow, kSecondScale, &deadline);
-            autoWakeEnd = AbsoluteTime_to_scalar(&deadline);
-            if (rsecs > kAutoWakePreWindow)
-                rsecs -= kAutoWakePreWindow;
-            else
-                rsecs = 0;
-            clock_interval_to_deadline(rsecs, kSecondScale, &deadline);
-            autoWakeStart = AbsoluteTime_to_scalar(&deadline);
+                if ((n = OSDynamicCast(OSNumber, obj)) &&
+                    (_debugWakeSeconds = n->unsigned32BitValue()))
+                {
+                    OSBitOrAtomic(kIOPMAlarmBitDebugWake, &_scheduledAlarms);
+                }
+                else
+                {
+                    _debugWakeSeconds = 0;
+                    OSBitAndAtomic(~kIOPMAlarmBitDebugWake, &_scheduledAlarms);
+                }
+                DLOG("_scheduledAlarms = 0x%x\n", (uint32_t) _scheduledAlarms);
+            }
+            else if (gIOPMSettingAutoWakeCalendarKey == key)
+            {
+                OSData * data;
+                if ((data = OSDynamicCast(OSData, obj)) &&
+                    (data->getLength() == sizeof(IOPMCalendarStruct)))
+                {
+                    const IOPMCalendarStruct * cs = 
+                        (const IOPMCalendarStruct *) data->getBytesNoCopy();
+
+                    if (cs->year)
+                        OSBitOrAtomic(kIOPMAlarmBitCalendarWake, &_scheduledAlarms);
+                    else
+                        OSBitAndAtomic(~kIOPMAlarmBitCalendarWake, &_scheduledAlarms);
+                    DLOG("_scheduledAlarms = 0x%x\n", (uint32_t) _scheduledAlarms);
+                }
             }
         }
-        if (gIOPMSettingDebugWakeRelativeKey == type)
+        else
         {
-            if ((n = OSDynamicCast(OSNumber, obj)))
-                _debugWakeSeconds = n->unsigned32BitValue();
-            else
-                _debugWakeSeconds = 0;
+            DLOG("setProperties(%s) not handled\n", key->getCStringNoCopy());
         }
-        
-        return_value = setPMSetting(type, obj);
-        
-        if(kIOReturnSuccess != return_value) goto exit;
     }
 
 exit:
@@ -1371,6 +1395,7 @@ exit:
 #if SUSPEND_PM_NOTIFICATIONS_DEBUG
     if(suspendPMClient_string) suspendPMClient_string->release();
 #endif
+    if (iter) iter->release();
     return return_value;
 }
 
@@ -2193,6 +2218,7 @@ void IOPMrootDomain::powerChangeDone( unsigned long previousPowerState )
             
             _lastDebugWakeSeconds = _debugWakeSeconds;
             _debugWakeSeconds = 0;
+            _scheduledAlarms = 0;
 
             // And start logging the wake event here
             // TODO: Publish the wakeReason string as an integer
@@ -2577,11 +2603,23 @@ void IOPMrootDomain::tellNoChangeDown( unsigned long stateNum )
        // Sleep canceled, clear the sleep trace point.
     tracePoint(kIOPMTracePointSystemUp);
 
-    if (idleSeconds && !wrangler)
+    if (!wrangler)
     {
-        // stay awake for at least idleSeconds
-        startIdleSleepTimer(idleSeconds);
+        if (idleSeconds)
+        {
+            // stay awake for at least idleSeconds
+            startIdleSleepTimer(idleSeconds);
+        }
+    }
+    else if (sleepSlider && wranglerAsleep)
+    {
+        // Display wrangler is already asleep, it won't trigger the next
+        // idle sleep attempt. Schedule a future idle sleep attempt, and
+        // also push out the next idle sleep attempt.
+
+        startIdleSleepTimer( kIdleSleepRetryInterval );
     }
+    
     return tellClients( kIOMessageSystemWillNotSleep );
 }
 
@@ -3573,85 +3611,95 @@ void IOPMrootDomain::informCPUStateChange(
 // evaluateSystemSleepPolicy
 //******************************************************************************
 
+#define kIOPlatformSystemSleepPolicyKey     "IOPlatformSystemSleepPolicy"
+
+// Sleep flags
+enum {
+    kIOPMSleepFlagHibernate         = 0x00000001,
+    kIOPMSleepFlagSleepTimerEnable  = 0x00000002
+};
+
 struct IOPMSystemSleepPolicyEntry
 {
     uint32_t    factorMask;
     uint32_t    factorBits;
     uint32_t    sleepFlags;
     uint32_t    wakeEvents;
-};
+} __attribute__((packed));
 
 struct IOPMSystemSleepPolicyTable
 {
-    uint8_t     signature[4];
+    uint32_t    signature;
     uint16_t    version;
     uint16_t    entryCount;
     IOPMSystemSleepPolicyEntry  entries[];
-};
-
-enum {
-    kIOPMSleepFactorSleepTimerWake          = 0x00000001,
-    kIOPMSleepFactorLidOpen                 = 0x00000002,
-    kIOPMSleepFactorACPower                 = 0x00000004,
-    kIOPMSleepFactorLowBattery              = 0x00000008,
-    kIOPMSleepFactorDeepSleepNoDelay        = 0x00000010,
-    kIOPMSleepFactorDeepSleepDemand         = 0x00000020,
-    kIOPMSleepFactorDeepSleepDisable        = 0x00000040,
-    kIOPMSleepFactorUSBExternalDevice       = 0x00000080,
-    kIOPMSleepFactorBluetoothHIDDevice      = 0x00000100,
-    kIOPMSleepFactorExternalMediaMounted    = 0x00000200,
-    kIOPMSleepFactorDriverAssertBit5        = 0x00000400,   /* Reserved for ThunderBolt */
-    kIOPMSleepFactorDriverAssertBit6        = 0x00000800,
-    kIOPMSleepFactorDriverAssertBit7        = 0x00001000    /* Reserved for legacy I/O */
-};
-
-enum {
-    kSleepPhaseEarly, kSleepPhaseFinal
-};
+} __attribute__((packed));
 
-bool IOPMrootDomain::evaluateSystemSleepPolicy( IOPMSystemSleepParameters * p, int sleepPhase )
+bool IOPMrootDomain::evaluateSystemSleepPolicy(
+    IOPMSystemSleepParameters * params, int sleepPhase )
 {
     const IOPMSystemSleepPolicyTable * pt;
     OSObject *  prop = 0;
     OSData *    policyData;
-    uint32_t    currentFactors;
-    uint32_t    deepSleepDelay = 0;
-    bool        success = false;
-
-    if (getProperty(kIOPMDeepSleepEnabledKey) != kOSBooleanTrue)
-        return false;
-
-    getSleepOption(kIOPMDeepSleepDelayKey, &deepSleepDelay);
-
-    prop = getServiceRoot()->copyProperty(kIOPlatformSystemSleepPolicyKey);
-    if (!prop)
-        return false;
-
-    policyData = OSDynamicCast(OSData, prop);
-    if (!policyData ||
-        (policyData->getLength() < sizeof(IOPMSystemSleepPolicyTable)))
-    {
-        goto done;
-    }
-
-    pt = (const IOPMSystemSleepPolicyTable *) policyData->getBytesNoCopy();
-    if ((pt->signature[0] != 'S') ||
-        (pt->signature[1] != 'L') ||
-        (pt->signature[2] != 'P') ||
-        (pt->signature[3] != 'T') ||
-        (pt->version      != 1)   ||
-        (pt->entryCount   == 0))
+    uint64_t    currentFactors = 0;
+    uint32_t    standbyDelay   = 0;
+    uint32_t    powerOffDelay  = 0;
+    uint32_t    mismatch;
+    bool        standbyEnabled;
+    bool        powerOffEnabled;
+    bool        found = false;
+
+    // Get platform's sleep policy table
+    if (!gSleepPolicyHandler)
+    {
+        prop = getServiceRoot()->copyProperty(kIOPlatformSystemSleepPolicyKey);
+        if (!prop) goto done;
+    }
+
+    // Fetch additional settings
+    standbyEnabled = (getSleepOption(kIOPMDeepSleepDelayKey, &standbyDelay)
+        && (getProperty(kIOPMDeepSleepEnabledKey) == kOSBooleanTrue));
+    powerOffEnabled = (getSleepOption(kIOPMAutoPowerOffDelayKey, &powerOffDelay)
+        && (getProperty(kIOPMAutoPowerOffEnabledKey) == kOSBooleanTrue));
+    DLOG("standby %d delay %u, powerOff %d delay %u, hibernate %u\n",
+        standbyEnabled, standbyDelay, powerOffEnabled, powerOffDelay,
+        hibernateMode);
+
+    // pmset level overrides
+    if ((hibernateMode & kIOHibernateModeOn) == 0)
     {
-        goto done;
+        if (!gSleepPolicyHandler)
+        {
+            standbyEnabled  = false;
+            powerOffEnabled = false;
+        }
     }
-
-    if ((policyData->getLength() - sizeof(IOPMSystemSleepPolicyTable)) !=
-        (sizeof(IOPMSystemSleepPolicyEntry) * pt->entryCount))
+    else if (!(hibernateMode & kIOHibernateModeSleep))
     {
-        goto done;
+        // Force hibernate (i.e. mode 25)
+        // If standby is enabled, force standy.
+        // If poweroff is enabled, force poweroff.
+        if (standbyEnabled)
+            currentFactors |= kIOPMSleepFactorStandbyForced;
+        else if (powerOffEnabled)
+            currentFactors |= kIOPMSleepFactorAutoPowerOffForced;
+        else
+            currentFactors |= kIOPMSleepFactorHibernateForced;
     }
 
-    currentFactors = 0;
+    // Current factors based on environment and assertions
+    if (sleepTimerMaintenance)
+        currentFactors |= kIOPMSleepFactorSleepTimerWake;
+    if (!clamshellClosed)
+        currentFactors |= kIOPMSleepFactorLidOpen;
+    if (acAdaptorConnected)
+        currentFactors |= kIOPMSleepFactorACPower;
+    if (lowBatteryCondition)
+        currentFactors |= kIOPMSleepFactorBatteryLow;
+    if (!standbyDelay)
+        currentFactors |= kIOPMSleepFactorStandbyNoDelay;
+    if (!standbyEnabled)
+        currentFactors |= kIOPMSleepFactorStandbyDisabled;
     if (getPMAssertionLevel(kIOPMDriverAssertionUSBExternalDeviceBit) !=
         kIOPMDriverAssertionLevelOff)
         currentFactors |= kIOPMSleepFactorUSBExternalDevice;
@@ -3661,88 +3709,145 @@ bool IOPMrootDomain::evaluateSystemSleepPolicy( IOPMSystemSleepParameters * p, i
     if (getPMAssertionLevel(kIOPMDriverAssertionExternalMediaMountedBit) !=
         kIOPMDriverAssertionLevelOff)
         currentFactors |= kIOPMSleepFactorExternalMediaMounted;
-    if (getPMAssertionLevel(kIOPMDriverAssertionReservedBit5) !=    /* AssertionBit5 = Thunderbolt */
+    if (getPMAssertionLevel(kIOPMDriverAssertionReservedBit5) !=
         kIOPMDriverAssertionLevelOff)
-        currentFactors |= kIOPMSleepFactorDriverAssertBit5;
-    if (getPMAssertionLevel(kIOPMDriverAssertionReservedBit7) !=
+        currentFactors |= kIOPMSleepFactorThunderboltDevice;
+    if (_scheduledAlarms != 0)
+        currentFactors |= kIOPMSleepFactorRTCAlarmScheduled;
+    if (getPMAssertionLevel(kIOPMDriverAssertionMagicPacketWakeEnabledBit) !=
         kIOPMDriverAssertionLevelOff)
-        currentFactors |= kIOPMSleepFactorDriverAssertBit7;
-    if (0 == deepSleepDelay)
-        currentFactors |= kIOPMSleepFactorDeepSleepNoDelay;
-    if (!clamshellClosed)
-        currentFactors |= kIOPMSleepFactorLidOpen;
-    if (acAdaptorConnected)
-        currentFactors |= kIOPMSleepFactorACPower;
-    if (lowBatteryCondition)
-        currentFactors |= kIOPMSleepFactorLowBattery;
-    if (sleepTimerMaintenance)
-        currentFactors |= kIOPMSleepFactorSleepTimerWake;
+        currentFactors |= kIOPMSleepFactorMagicPacketWakeEnabled;
+    if (!powerOffEnabled)
+        currentFactors |= kIOPMSleepFactorAutoPowerOffDisabled;
+    if (desktopMode)
+        currentFactors |= kIOPMSleepFactorExternalDisplay;
 
-    // pmset overrides
-    if ((hibernateMode & kIOHibernateModeOn) == 0)
-        currentFactors |= kIOPMSleepFactorDeepSleepDisable;
-    else if ((hibernateMode & kIOHibernateModeSleep) == 0)
-        currentFactors |= kIOPMSleepFactorDeepSleepDemand;
-    
-    DLOG("Sleep policy %u entries, current factors 0x%x\n",
-        pt->entryCount, currentFactors);
+    DLOG("sleep factors 0x%llx\n", currentFactors);
+
+    // Clear the output params
+    bzero(params, sizeof(*params));
+
+    if (gSleepPolicyHandler)
+    {
+        if (!gSleepPolicyVars)
+        {
+            gSleepPolicyVars = IONew(IOPMSystemSleepPolicyVariables, 1);
+            if (!gSleepPolicyVars)
+                goto done;
+            bzero(gSleepPolicyVars, sizeof(*gSleepPolicyVars));
+        }
+        gSleepPolicyVars->signature = kIOPMSystemSleepPolicySignature;
+        gSleepPolicyVars->version   = kIOPMSystemSleepPolicyVersion;
+        if (kIOPMSleepPhase1 == sleepPhase)
+        {
+            gSleepPolicyVars->currentCapability = _currentCapability;
+            gSleepPolicyVars->highestCapability = _highestCapability;
+            gSleepPolicyVars->sleepReason   = lastSleepReason;
+            gSleepPolicyVars->hibernateMode = hibernateMode;
+            gSleepPolicyVars->standbyDelay  = standbyDelay;
+            gSleepPolicyVars->poweroffDelay = powerOffDelay;
+        }
+        gSleepPolicyVars->sleepFactors = currentFactors;
+        gSleepPolicyVars->sleepPhase   = sleepPhase;
+        gSleepPolicyVars->scheduledAlarms = _scheduledAlarms;
+        
+        if ((gSleepPolicyHandler(gSleepPolicyTarget, gSleepPolicyVars, params) !=
+             kIOReturnSuccess) || (kIOPMSleepTypeInvalid == params->sleepType) ||
+             (params->sleepType >= kIOPMSleepTypeLast) ||
+             (kIOPMSystemSleepParametersVersion != params->version))
+        {
+            MSG("sleep policy handler error\n");
+            goto done;
+        }
+
+        if ((params->sleepType >= kIOPMSleepTypeSafeSleep) &&
+            ((hibernateMode & kIOHibernateModeOn) == 0))
+        {
+            hibernateMode |= (kIOHibernateModeOn | kIOHibernateModeSleep);
+        }
+
+        DLOG("sleep params v%u, type %u, flags 0x%x, wake 0x%x, timer %u, poweroff %u\n",
+            params->version, params->sleepType, params->sleepFlags,
+            params->ecWakeEvents, params->ecWakeTimer, params->ecPoweroffTimer);
+        found = true;
+        goto done;
+    }
+
+    // Policy table is meaningless without standby enabled
+    if (!standbyEnabled)
+        goto done;
+
+    // Validate the sleep policy table
+    policyData = OSDynamicCast(OSData, prop);
+    if (!policyData || (policyData->getLength() <= sizeof(IOPMSystemSleepPolicyTable)))
+        goto done;
+
+    pt = (const IOPMSystemSleepPolicyTable *) policyData->getBytesNoCopy();
+    if ((pt->signature != kIOPMSystemSleepPolicySignature) ||
+        (pt->version != 1) || (0 == pt->entryCount))
+        goto done;
+
+    if (((policyData->getLength() - sizeof(IOPMSystemSleepPolicyTable)) !=
+         (sizeof(IOPMSystemSleepPolicyEntry) * pt->entryCount)))
+        goto done;
 
     for (uint32_t i = 0; i < pt->entryCount; i++)
     {
-        const IOPMSystemSleepPolicyEntry * policyEntry = &pt->entries[i];
+        const IOPMSystemSleepPolicyEntry * entry = &pt->entries[i];
+        mismatch = (((uint32_t)currentFactors ^ entry->factorBits) & entry->factorMask);
+
+        DLOG("mask 0x%08x, bits 0x%08x, flags 0x%08x, wake 0x%08x, mismatch 0x%08x\n",
+            entry->factorMask, entry->factorBits,
+            entry->sleepFlags, entry->wakeEvents, mismatch);
+        if (mismatch)
+            continue;
 
-        DLOG("factor mask 0x%08x, bits 0x%08x, flags 0x%08x, wake 0x%08x\n",
-            policyEntry->factorMask, policyEntry->factorBits,
-            policyEntry->sleepFlags, policyEntry->wakeEvents);
+        DLOG("^ found match\n");
+        found = true;
 
-        if ((currentFactors ^ policyEntry->factorBits) & policyEntry->factorMask)
-            continue;   // mismatch, try next
+        params->version = kIOPMSystemSleepParametersVersion;
+        params->reserved1 = 1;
+        if (entry->sleepFlags & kIOPMSleepFlagHibernate)
+            params->sleepType = kIOPMSleepTypeStandby;
+        else
+            params->sleepType = kIOPMSleepTypeNormalSleep;
 
-        if (p)
+        params->ecWakeEvents = entry->wakeEvents;
+        if (entry->sleepFlags & kIOPMSleepFlagSleepTimerEnable)
         {
-            p->version    = 1;
-            p->sleepFlags = policyEntry->sleepFlags;
-            p->sleepTimer = 0;
-            p->wakeEvents = policyEntry->wakeEvents;
-            if (p->sleepFlags & kIOPMSleepFlagSleepTimerEnable)
+            if (kIOPMSleepPhase2 == sleepPhase)
             {
-                if (kSleepPhaseFinal == sleepPhase)
+                clock_sec_t now_secs = gIOLastSleepTime.tv_sec;
+
+                if (!_standbyTimerResetSeconds ||
+                    (now_secs <= _standbyTimerResetSeconds))
+                {
+                    // Reset standby timer adjustment
+                    _standbyTimerResetSeconds = now_secs;
+                    DLOG("standby delay %u, reset %u\n",
+                        standbyDelay, (uint32_t) _standbyTimerResetSeconds);
+                }
+                else if (standbyDelay)
                 {
-                    clock_sec_t now_secs = gIOLastSleepTime.tv_sec;
+                    // Shorten the standby delay timer
+                    clock_sec_t elapsed = now_secs - _standbyTimerResetSeconds;
+                    if (standbyDelay > elapsed)
+                        standbyDelay -= elapsed;
+                    else
+                        standbyDelay = 1; // must be > 0
 
-                    if (!_standbyTimerResetSeconds ||
-                        (now_secs <= _standbyTimerResetSeconds))
-                    {
-                        // Reset standby timer adjustment
-                        _standbyTimerResetSeconds = now_secs;
-                        DLOG("standby delay %u, reset %u\n",
-                            deepSleepDelay, (uint32_t) _standbyTimerResetSeconds);
-                    }
-                    else if (deepSleepDelay)
-                    {
-                        // Shorten the standby delay timer
-                        clock_sec_t elapsed = now_secs - _standbyTimerResetSeconds;
-                        if (deepSleepDelay > elapsed)
-                            deepSleepDelay -= elapsed;
-                        else
-                            deepSleepDelay = 1; // must be > 0
-
-                        DLOG("standby delay %u, elapsed %u\n",
-                            deepSleepDelay, (uint32_t) elapsed);
-                    }
+                    DLOG("standby delay %u, elapsed %u\n",
+                        standbyDelay, (uint32_t) elapsed);
                 }
-                p->sleepTimer = deepSleepDelay;
             }
-            else if (kSleepPhaseFinal == sleepPhase)
-            {
-                // A sleep that does not enable the sleep timer will reset
-                // the standby delay adjustment.
-                _standbyTimerResetSeconds = 0;
-            }            
+            params->ecWakeTimer = standbyDelay;
+        }
+        else if (kIOPMSleepPhase2 == sleepPhase)
+        {
+            // A sleep that does not enable the sleep timer will reset
+            // the standby delay adjustment.
+            _standbyTimerResetSeconds = 0;
         }
-
-        DLOG("matched policy entry %u\n", i);
-        success = true;
         break;
     }
 
@@ -3750,14 +3855,14 @@ done:
     if (prop)
         prop->release();
 
-    return success;
+    return found;
 }
 
+static IOPMSystemSleepParameters gEarlySystemSleepParams;
+
 void IOPMrootDomain::evaluateSystemSleepPolicyEarly( void )
 {
-    IOPMSystemSleepParameters   params;
-
-    // Evaluate sleep policy before driver sleep phase.
+    // Evaluate early (priority interest phase), before drivers sleep.
 
     DLOG("%s\n", __FUNCTION__);
     removeProperty(kIOPMSystemSleepParametersKey);
@@ -3770,12 +3875,37 @@ void IOPMrootDomain::evaluateSystemSleepPolicyEarly( void )
     hibernateMode = 0;
     getSleepOption(kIOHibernateModeKey, &hibernateMode);
 
-    if (!hibernateNoDefeat &&
-        evaluateSystemSleepPolicy(&params, kSleepPhaseEarly) &&
-        ((params.sleepFlags & kIOPMSleepFlagHibernate) == 0))
+    // Save for late evaluation if sleep is aborted
+    bzero(&gEarlySystemSleepParams, sizeof(gEarlySystemSleepParams));
+
+    if (evaluateSystemSleepPolicy(&gEarlySystemSleepParams, kIOPMSleepPhase1))
     {
-        hibernateDisabled = true;
+        if (!hibernateNoDefeat &&
+            (gEarlySystemSleepParams.sleepType == kIOPMSleepTypeNormalSleep))
+        {
+            // Disable hibernate setup for normal sleep
+            hibernateDisabled = true;
+        }
     }
+
+    // Publish IOPMSystemSleepType
+    uint32_t sleepType = gEarlySystemSleepParams.sleepType;
+    if (sleepType == kIOPMSleepTypeInvalid)
+    {
+        // no sleep policy
+        sleepType = kIOPMSleepTypeNormalSleep;
+        if (hibernateMode & kIOHibernateModeOn)
+            sleepType = (hibernateMode & kIOHibernateModeSleep) ?
+                        kIOPMSleepTypeSafeSleep : kIOPMSleepTypeHibernate;
+    }
+    else if ((sleepType == kIOPMSleepTypeStandby) &&
+             (gEarlySystemSleepParams.ecPoweroffTimer))
+    {
+        // report the lowest possible sleep state
+        sleepType = kIOPMSleepTypePowerOff;
+    }
+
+    setProperty(kIOPMSystemSleepTypeKey, sleepType, 32);
 }
 
 void IOPMrootDomain::evaluateSystemSleepPolicyFinal( void )
@@ -3783,27 +3913,30 @@ void IOPMrootDomain::evaluateSystemSleepPolicyFinal( void )
     IOPMSystemSleepParameters   params;
     OSData *                    paramsData;
 
-    // Evaluate sleep policy after drivers but before platform sleep.
+    // Evaluate sleep policy after sleeping drivers but before platform sleep.
 
     DLOG("%s\n", __FUNCTION__);
 
-    if (evaluateSystemSleepPolicy(&params, kSleepPhaseFinal))
+    if (evaluateSystemSleepPolicy(&params, kIOPMSleepPhase2))
     {
         if ((hibernateDisabled || hibernateAborted) &&
-            (params.sleepFlags & kIOPMSleepFlagHibernate))
+            (params.sleepType != kIOPMSleepTypeNormalSleep))
         {
-            // Should hibernate but unable to or aborted.
-            // Arm timer for a short sleep and retry or wake fully.
+            // Final evaluation picked a state requiring hibernation,
+            // but hibernate setup was skipped. Retry using the early
+            // sleep parameters.
 
-            params.sleepFlags &= ~kIOPMSleepFlagHibernate;
-            params.sleepFlags |= kIOPMSleepFlagSleepTimerEnable;
-            params.sleepTimer = 1;
+            bcopy(&gEarlySystemSleepParams, &params, sizeof(params));
+            params.sleepType = kIOPMSleepTypeAbortedSleep;
+            params.ecWakeTimer = 1;
             hibernateNoDefeat = true;
             DLOG("wake in %u secs for hibernateDisabled %d, hibernateAborted %d\n",
-                        params.sleepTimer, hibernateDisabled, hibernateAborted);
+                params.ecWakeTimer, hibernateDisabled, hibernateAborted);
         }
         else
+        {
             hibernateNoDefeat = false;
+        }
 
         paramsData = OSData::withBytes(&params, sizeof(params));
         if (paramsData)
@@ -3812,25 +3945,30 @@ void IOPMrootDomain::evaluateSystemSleepPolicyFinal( void )
             paramsData->release();
         }
 
-        if (params.sleepFlags & kIOPMSleepFlagHibernate)
+        if (params.sleepType >= kIOPMSleepTypeHibernate)
         {
-            // Force hibernate
+            // Disable safe sleep to force the hibernate path
             gIOHibernateMode &= ~kIOHibernateModeSleep;
         }
     }
 }
 
 bool IOPMrootDomain::getHibernateSettings(
-    uint32_t *  hibernateMode,
+    uint32_t *  hibernateModePtr,
     uint32_t *  hibernateFreeRatio,
     uint32_t *  hibernateFreeTime )
 {
-    bool ok = getSleepOption(kIOHibernateModeKey, hibernateMode);
+    // Called by IOHibernateSystemSleep() after evaluateSystemSleepPolicyEarly()
+    // has updated the hibernateDisabled flag.
+
+    bool ok = getSleepOption(kIOHibernateModeKey, hibernateModePtr);
     getSleepOption(kIOHibernateFreeRatioKey, hibernateFreeRatio);
     getSleepOption(kIOHibernateFreeTimeKey, hibernateFreeTime);
     if (hibernateDisabled)
-        *hibernateMode = 0;
-    DLOG("hibernateMode 0x%x\n", *hibernateMode);
+        *hibernateModePtr = 0;
+    else if (gSleepPolicyHandler)
+        *hibernateModePtr = hibernateMode;
+    DLOG("hibernateMode 0x%x\n", *hibernateModePtr);
     return ok;
 }
 
@@ -3953,6 +4091,7 @@ void IOPMrootDomain::handlePlatformHaltRestart( UInt32 pe_type )
                        ctx.PowerState  = ON_STATE;
                        ctx.MessageType = kIOMessageSystemPagingOff;
                        IOService::updateConsoleUsers(NULL, kIOMessageSystemPagingOff);
+                       IOHibernateSystemRestart();
                        break;
 
                default:
@@ -4642,6 +4781,10 @@ void IOPMrootDomain::overridePowerChangeForUIService(
                 else
                     maxPowerState = 1;
             }
+            else if (actions->parameter & kPMActionsFlagIsGraphicsDevice)
+            {
+                maxPowerState = 1;
+            }
         }
         else
         {
@@ -4964,13 +5107,17 @@ IOReturn IOPMrootDomain::setMaintenanceWakeCalendar(
 
     if (kPMCalendarTypeMaintenance == calendar->selector) {
         ret = setPMSetting(gIOPMSettingMaintenanceWakeCalendarKey, data);
+        if (kIOReturnSuccess == ret)
+            OSBitOrAtomic(kIOPMAlarmBitMaintenanceWake, &_scheduledAlarms);
     } else 
     if (kPMCalendarTypeSleepService == calendar->selector)
     {
         ret = setPMSetting(gIOPMSettingSleepServiceWakeCalendarKey, data);
+        if (kIOReturnSuccess == ret)
+            OSBitOrAtomic(kIOPMAlarmBitSleepServiceWake, &_scheduledAlarms);
     }
+    DLOG("_scheduledAlarms = 0x%x\n", (uint32_t) _scheduledAlarms);
     
-
     data->release();
     return ret;
 }
@@ -5047,7 +5194,7 @@ IOReturn IOPMrootDomain::displayWranglerNotification(
 //******************************************************************************
 
 bool IOPMrootDomain::displayWranglerMatchPublished( 
-    void * target, 
+    void * target,
     void * refCon,
     IOService * newService,
     IONotifier * notifier __unused)
@@ -5323,6 +5470,16 @@ void IOPMrootDomain::dispatchPowerEvent(
             {
                 systemBooting = false;
 
+                if (lowBatteryCondition)
+                {
+                    privateSleepSystem (kIOPMSleepReasonLowPower);
+
+                    // The rest is unnecessary since the system is expected
+                    // to sleep immediately. The following wake will update
+                    // everything.
+                    break;
+                }
+
                 // If lid is closed, re-send lid closed notification
                 // now that booting is complete.
                 if ( clamshellClosed )
@@ -6367,6 +6524,20 @@ IOReturn IOPMrootDomain::callPlatformFunction(
 
         return kIOReturnSuccess;
     }
+#if HIBERNATION
+    else if (functionName &&
+             functionName->isEqualTo(kIOPMInstallSystemSleepPolicyHandlerKey))
+    {
+        if (gSleepPolicyHandler)
+            return kIOReturnExclusiveAccess;
+        if (!param1)
+            return kIOReturnBadArgument;
+        gSleepPolicyHandler = (IOPMSystemSleepPolicyHandler) param1;
+        gSleepPolicyTarget  = (void *) param2;
+        setProperty("IOPMSystemSleepPolicyHandler", kOSBooleanTrue);
+        return kIOReturnSuccess;
+    }
+#endif
 
     return super::callPlatformFunction(
         functionName, waitForFunction, param1, param2, param3, param4);
index 03779d4b104d050e8f315f8da7cf026bd0b6e8c4..e7f39347403068941d3adb6b380082353ba7f806 100644 (file)
@@ -105,7 +105,9 @@ bool IOPlatformExpert::start( IOService * provider )
     // Override the mapper present flag is requested by boot arguments.
     if (PE_parse_boot_argn("dart", &debugFlags, sizeof (debugFlags)) && (debugFlags == 0))
       removeProperty(kIOPlatformMapperPresentKey);
-    
+    if (PE_parse_boot_argn("-x", &debugFlags, sizeof (debugFlags)))
+      removeProperty(kIOPlatformMapperPresentKey);
+
     // Register the presence or lack thereof a system 
     // PCI address mapper with the IOMapper class
     IOMapper::setMapperRequired(0 != getProperty(kIOPlatformMapperPresentKey));
index 0c5dbc36cd5248fd0187a285f62917265be577cd..0eabf83f9d70ebd488ce9ff04b5d4ebd2964723c 100644 (file)
@@ -465,6 +465,8 @@ bool IOService::attach( IOService * provider )
        ok = attachToParent( getRegistryRoot(), gIOServicePlane);
     }
 
+    if (ok && !__provider) (void) getProvider();
+
     return( ok );
 }
 
@@ -767,10 +769,9 @@ IOService * IOService::getProvider( void ) const
     IOService *        parent;
     SInt32     generation;
 
-    parent = __provider;
     generation = getGenerationCount();
     if( __providerGeneration == generation)
-       return( parent );
+       return( __provider );
 
     parent = (IOService *) getParentEntry( gIOServicePlane);
     if( parent == IORegistryEntry::getRegistryRoot())
@@ -778,7 +779,8 @@ IOService * IOService::getProvider( void ) const
        parent = 0;
 
     self->__provider = parent;
-    // save the count before getParentEntry()
+    OSMemoryBarrier();
+    // save the count from before call to getParentEntry()
     self->__providerGeneration = generation;
 
     return( parent );
index 009383888a7767008dee56b137c1ba0df6e56540..8bd5fa52202a5f3d95536af1e601e36c142547d6 100644 (file)
@@ -46,8 +46,6 @@
 #include <libkern/c++/OSLib.h>
 #include <libkern/OSAtomic.h>
 
-#include <IOKit/pwr_mgt/RootDomain.h>
-#include <IOKit/IOMessage.h>
 #include <IOKit/IOLib.h>
 
 __BEGIN_DECLS
index 0228f1e00eeb4f78295613494cb2098ba9d9548f..87c3c331d3fda3079c66c1d21d4eb5f171a867e9 100644 (file)
@@ -7,6 +7,7 @@ OPTIONS/config_dtrace                                   optional config_dtrace
 OPTIONS/hibernation                                    optional hibernation
 OPTIONS/networking                                     optional networking
 OPTIONS/crypto                                         optional crypto
+OPTIONS/allcrypto                                      optional allcrypto
 OPTIONS/zlib                                           optional zlib
 
 # libkern
index f13561244dde4d6450f29914cc3f6b3faa61a6f3..4106c92831deea8b2f6b514224094ca3fe7320c8 100644 (file)
@@ -68,6 +68,8 @@
 extern void    acpi_sleep_cpu(acpi_sleep_callback, void * refcon);
 extern void acpi_wake_prot(void);
 #endif
+extern kern_return_t IOCPURunPlatformQuiesceActions(void);
+extern kern_return_t IOCPURunPlatformActiveActions(void);
 
 extern void    fpinit(void);
 
@@ -92,7 +94,6 @@ typedef struct acpi_hibernate_callback_data acpi_hibernate_callback_data_t;
 unsigned int           save_kdebug_enable = 0;
 static uint64_t                acpi_sleep_abstime;
 
-
 #if CONFIG_SLEEP
 static void
 acpi_hibernate(void *refcon)
@@ -139,6 +140,8 @@ acpi_hibernate(void *refcon)
        }
        kdebug_enable = 0;
 
+       IOCPURunPlatformQuiesceActions();
+
        acpi_sleep_abstime = mach_absolute_time();
 
        (data->func)(data->refcon);
@@ -304,6 +307,8 @@ acpi_sleep_kernel(acpi_sleep_callback func, void *refcon)
 
        kdebug_enable = save_kdebug_enable;
 
+       IOCPURunPlatformActiveActions();
+
        if (did_hibernate) {
                
                my_tsc = (now >> 32) | (now << 32);
index 35f9224f0f60b9c586bdcad026858c87a81095ea..ae246fb859e69bed8e14507fb2db68aa7f2d9184 100644 (file)
@@ -306,6 +306,7 @@ ipc_object_alloc_dead_name(
  *             Allocate an object.
  *     Conditions:
  *             Nothing locked.  If successful, the object is returned locked.
+ *             The space is write locked on successful return. 
  *             The caller doesn't get a reference for the object.
  *     Returns:
  *             KERN_SUCCESS            The object is allocated.
@@ -363,7 +364,6 @@ ipc_object_alloc(
        ipc_entry_modified(space, *namep, entry);
 
        io_lock(object);
-       is_write_unlock(space);
 
        object->io_references = 1; /* for entry, not caller */
        object->io_bits = io_makebits(TRUE, otype, 0);
index d76463f64e07b24995af9c353dd54fa67250ac00..9ba62e5a9266fd85230fca6abdeaa2f255a6c9fa 100644 (file)
@@ -593,14 +593,16 @@ ipc_port_alloc(
        if (kr != KERN_SUCCESS)
                return kr;
 
-       /* port is locked */
-
+       /* port and space are locked */
        ipc_port_init(port, space, name);
 
 #if     MACH_ASSERT
        ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX);
 #endif  /* MACH_ASSERT */
 
+       /* unlock space after init */
+       is_write_unlock(space);
+
 #if CONFIG_MACF_MACH
        task_t issuer = current_task();
        tasklabel_lock2 (issuer, space->is_task);
index e2e6a65987c24f2dc13efbf5a226f0dafcb47293..b5d5094724cfc3401d20e7ec280711d3348f4913 100644 (file)
@@ -107,10 +107,11 @@ ipc_pset_alloc(
                              &name, (ipc_object_t *) &pset);
        if (kr != KERN_SUCCESS)
                return kr;
-       /* pset is locked */
+       /* pset and space are locked */
 
        pset->ips_local_name = name;
        ipc_mqueue_init(&pset->ips_messages, TRUE /* set */);
+       is_write_unlock(space);
 
        *namep = name;
        *psetp = pset;
index 87b7329d1a6a32e7a0337756d1ca0ee3fb8f3134..615d61f99c70294795661bfc64182bb3b19aac65 100644 (file)
@@ -249,7 +249,7 @@ mach_port_space_info(
                }
 
                iin->iin_urefs = IE_BITS_UREFS(bits);
-               iin->iin_object = (natural_t)(uintptr_t)entry->ie_object;
+               iin->iin_object = (natural_t)VM_KERNEL_ADDRPERM((uintptr_t)entry->ie_object);
                iin->iin_next = entry->ie_next;
                iin->iin_hash = entry->ie_index;
        }
index 18c7648928d7eadb67faee3bbe15defedfd3d599..a2d4f0d7aaf67652c792c6e9361efdfb831705b0 100644 (file)
@@ -1190,11 +1190,10 @@ mach_port_get_set_status(
 
                for (index = 0; index < tsize; index++) {
                        ipc_entry_t ientry = &table[index];
+                       ipc_port_t port = (ipc_port_t) ientry->ie_object;
 
-                       if (ientry->ie_bits & MACH_PORT_TYPE_RECEIVE) {
-                               ipc_port_t port =
-                                       (ipc_port_t) ientry->ie_object;
-
+                       if (ientry->ie_bits & MACH_PORT_TYPE_RECEIVE &&
+                           port->ip_pset_count > 0) {
                                mach_port_gst_helper(pset, port,
                                                     maxnames, names, &actual);
                        }
index 018ea6b78719039e2848d5bc014c6e8b2ae06838..c88c0fd1e3c414ee2356a54f804cd5fb568b6a6e 100644 (file)
@@ -107,7 +107,8 @@ hibernate_setup(IOHibernateImageHeader * header,
 
 kern_return_t 
 hibernate_teardown(hibernate_page_list_t * page_list,
-                    hibernate_page_list_t * page_list_wired)
+                    hibernate_page_list_t * page_list_wired,
+                   hibernate_page_list_t * page_list_pal)
 {
     hibernate_free_gobble_pages();
 
@@ -115,6 +116,8 @@ hibernate_teardown(hibernate_page_list_t * page_list,
         kfree(page_list, page_list->list_size);
     if (page_list_wired)
         kfree(page_list_wired, page_list_wired->list_size);
+    if (page_list_pal)
+        kfree(page_list_pal, page_list_pal->list_size);
 
     return (KERN_SUCCESS);
 }
index 789a5b223eabb9ceaffc98db3410ae7ab52e0660..6746929e04334aad2b4f5300cd5e0bedbca3517b 100644 (file)
@@ -34,6 +34,8 @@ extern "C" {
 #include <mach/mach_time.h>
 #include <mach/mach_types.h>
 
+#include <libkern/version.h>
+
 /****************************************************************************
  * The four main object types
  *
@@ -343,6 +345,14 @@ typedef struct pmc_methods {
  * KERN_RESOURCE_SHORTAGE if the kernel lacks the resources to register another performance monitor
  * driver, KERN_INVALID_ARGUMENT if one or both of the arguments is null
  */
+
+/* Prevent older AppleProfileFamily kexts from loading on newer kernels.
+ * Alas, C doesn't necessarily have a cleaner way to do the version number concatenation
+ */
+#define PERF_REG_NAME1(a, b) a ## b
+#define PERF_REG_NAME(a, b) PERF_REG_NAME1(a, b)
+#define perf_monitor_register PERF_REG_NAME(perf_monitor_register_, VERSION_MAJOR)
+
 kern_return_t perf_monitor_register(perf_monitor_object_t monitor, perf_monitor_methods_t *methods);
 
 /*!fn
index 6dc7767f59b4f254c4c80d8311b7d9893c869d68..f4c6f17ef3394218289fd4abfd64a18c1550f869 100644 (file)
@@ -121,7 +121,8 @@ int vm_object_pagein_throttle = 16;
  * delay of HARD_THROTTLE_DELAY microseconds before being allowed to try the page fault again.
  */
 
-boolean_t thread_is_io_throttled(void);
+extern boolean_t thread_is_io_throttled(void);
+extern void throttle_lowpri_io(int);
 
 uint64_t vm_hard_throttle_threshold;
 
@@ -1015,6 +1016,7 @@ vm_fault_page(
                                        *top_page = first_m;
                                        if (type_of_fault)
                                                *type_of_fault = DBG_GUARD_FAULT;
+                                       thread_interrupt_level(interruptible_state);
                                        return VM_FAULT_SUCCESS;
                                } else {
                                        /*
@@ -4042,6 +4044,8 @@ handle_copy_delay:
 done:
        thread_interrupt_level(interruptible_state);
 
+       throttle_lowpri_io(TRUE);
+
        KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
                              (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
                              (int)((uint64_t)vaddr >> 32),
index 178a1cae06ada1f14cf517828e64c24fcae32d16..0dbde894112c04e3799a58d662f43b95038a45f0 100644 (file)
@@ -8758,12 +8758,11 @@ submap_recurse:
                        if (!mapped_needs_copy) {
                                if (vm_map_lock_read_to_write(map)) {
                                        vm_map_lock_read(map);
-                                       /* XXX FBDP: entry still valid ? */
-                                       if(*real_map == entry->object.sub_map)
-                                               *real_map = map;
+                                       *real_map = map;
                                        goto RetryLookup;
                                }
                                vm_map_lock_read(entry->object.sub_map);
+                               *var_map = entry->object.sub_map;
                                cow_sub_map_parent = map;
                                /* reset base to map before cow object */
                                /* this is the map which will accept   */
@@ -8774,12 +8773,14 @@ submap_recurse:
                                mapped_needs_copy = TRUE;
                        } else {
                                vm_map_lock_read(entry->object.sub_map);
+                               *var_map = entry->object.sub_map;
                                if((cow_sub_map_parent != map) &&
                                   (*real_map != map))
                                        vm_map_unlock(map);
                        }
                } else {
                        vm_map_lock_read(entry->object.sub_map);
+                       *var_map = entry->object.sub_map;       
                        /* leave map locked if it is a target */
                        /* cow sub_map above otherwise, just  */
                        /* follow the maps down to the object */
@@ -8789,8 +8790,7 @@ submap_recurse:
                                vm_map_unlock_read(map);
                }
 
-               /* XXX FBDP: map has been unlocked, what protects "entry" !? */
-               *var_map = map = entry->object.sub_map;
+               map = *var_map;
 
                /* calculate the offset in the submap for vaddr */
                local_vaddr = (local_vaddr - entry->vme_start) + entry->offset;
index 1cbe6926de2edbe2ac19fcdd7bbdc747e1d6a3bd..aeab64231d09fbe437912659db5674ba0d55674b 100644 (file)
@@ -4788,7 +4788,7 @@ static unsigned long vm_object_collapse_calls = 0;
 static unsigned long vm_object_collapse_objects = 0;
 static unsigned long vm_object_collapse_do_collapse = 0;
 static unsigned long vm_object_collapse_do_bypass = 0;
-static unsigned long vm_object_collapse_delays = 0;
+
 __private_extern__ void
 vm_object_collapse(
        register vm_object_t                    object,
@@ -5003,11 +5003,11 @@ retry:
                 */
                size = atop(object->vo_size);
                rcount = object->resident_page_count;
+
                if (rcount != size) {
                        vm_object_offset_t      offset;
                        vm_object_offset_t      backing_offset;
                        unsigned int            backing_rcount;
-                       unsigned int            lookups = 0;
 
                        /*
                         *      If the backing object has a pager but no pagemap,
@@ -5047,6 +5047,24 @@ retry:
                                continue;
                        }
 
+                       backing_offset = object->vo_shadow_offset;
+                       backing_rcount = backing_object->resident_page_count;
+
+                       if ( (int)backing_rcount - (int)(atop(backing_object->vo_size) - size) > (int)rcount) {
+                               /*
+                                * we have enough pages in the backing object to guarantee that
+                                * at least 1 of them must be 'uncovered' by a resident page
+                                * in the object we're evaluating, so move on and
+                                * try to collapse the rest of the shadow chain
+                                */
+                                if (object != original_object) {
+                                        vm_object_unlock(object);
+                                }
+                                object = backing_object;
+                                object_lock_type = backing_object_lock_type;
+                                continue;
+                       }
+
                        /*
                         *      If all of the pages in the backing object are
                         *      shadowed by the parent object, the parent
@@ -5060,17 +5078,14 @@ retry:
                         *
                         */
 
-                       backing_offset = object->vo_shadow_offset;
-                       backing_rcount = backing_object->resident_page_count;
-
 #if    MACH_PAGEMAP
 #define EXISTS_IN_OBJECT(obj, off, rc) \
        (vm_external_state_get((obj)->existence_map, \
         (vm_offset_t)(off)) == VM_EXTERNAL_STATE_EXISTS || \
-        ((rc) && ++lookups && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
+        ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
 #else
 #define EXISTS_IN_OBJECT(obj, off, rc) \
-       (((rc) && ++lookups && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
+       (((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
 #endif /* MACH_PAGEMAP */
 
                        /*
@@ -5103,36 +5118,23 @@ retry:
                         * pages in the backing object, it makes sense to
                         * walk the backing_object's resident pages first.
                         *
-                        * NOTE: Pages may be in both the existence map and 
-                        * resident.  So, we can't permanently decrement
-                        * the rcount here because the second loop may
-                        * find the same pages in the backing object'
-                        * existence map that we found here and we would
-                        * double-decrement the rcount.  We also may or
-                        * may not have found the 
+                        * NOTE: Pages may be in both the existence map and/or
+                         * resident, so if we don't find a dependency while
+                        * walking the backing object's resident page list
+                        * directly, and there is an existence map, we'll have
+                        * to run the offset based 2nd pass.  Because we may
+                        * have to run both passes, we need to be careful
+                        * not to decrement 'rcount' in the 1st pass
                         */
-                       if (backing_rcount && 
-#if    MACH_PAGEMAP
-                           size > ((backing_object->existence_map) ?
-                            backing_rcount : (backing_rcount >> 1))
-#else
-                           size > (backing_rcount >> 1)
-#endif /* MACH_PAGEMAP */
-                               ) {
+                       if (backing_rcount && backing_rcount < (size / 8)) {
                                unsigned int rc = rcount;
                                vm_page_t p;
 
                                backing_rcount = backing_object->resident_page_count;
                                p = (vm_page_t)queue_first(&backing_object->memq);
                                do {
-                                       /* Until we get more than one lookup lock */
-                                       if (lookups > 256) {
-                                               vm_object_collapse_delays++;
-                                               lookups = 0;
-                                               mutex_pause(0);
-                                       }
-
                                        offset = (p->offset - backing_offset);
+
                                        if (offset < object->vo_size &&
                                            offset != hint_offset &&
                                            !EXISTS_IN_OBJECT(object, offset, rc)) {
@@ -5144,6 +5146,7 @@ retry:
                                        p = (vm_page_t) queue_next(&p->listq);
 
                                } while (--backing_rcount);
+
                                if (backing_rcount != 0 ) {
                                        /* try and collapse the rest of the shadow chain */
                                        if (object != original_object) {
@@ -5170,13 +5173,6 @@ retry:
                                      (offset + PAGE_SIZE_64 < object->vo_size) ?
                                      (offset + PAGE_SIZE_64) : 0) != hint_offset) {
 
-                                       /* Until we get more than one lookup lock */
-                                       if (lookups > 256) {
-                                               vm_object_collapse_delays++;
-                                               lookups = 0;
-                                               mutex_pause(0);
-                                       }
-
                                        if (EXISTS_IN_OBJECT(backing_object, offset +
                                            backing_offset, backing_rcount) &&
                                            !EXISTS_IN_OBJECT(object, offset, rcount)) {