]> git.saurik.com Git - apple/xnu.git/commitdiff
xnu-2422.115.4.tar.gz os-x-1095 v2422.115.4
authorApple <opensource@apple.com>
Tue, 23 Sep 2014 21:31:05 +0000 (21:31 +0000)
committerApple <opensource@apple.com>
Tue, 23 Sep 2014 21:31:05 +0000 (21:31 +0000)
13 files changed:
config/IOKit.x86_64.exports
config/MasterVersion
iokit/IOKit/IOSharedDataQueue.h
iokit/Kernel/IODataQueue.cpp
iokit/Kernel/IOSharedDataQueue.cpp
osfmk/i386/acpi.c
osfmk/i386/machine_routines.c
osfmk/i386/machine_routines.h
osfmk/i386/mp_desc.c
osfmk/i386/pmap.h
osfmk/kern/telemetry.c
osfmk/x86_64/pmap.c
tools/tests/unit_tests/cpu_monitor_tests_11646922_src/cpu_hog/cpu_hog.m

index 012166184994000d3761c20de99ce514a0a77c59..217b17b013348bd17efca036648f10c821ede8b9 100644 (file)
@@ -205,6 +205,8 @@ __ZN17IOPolledInterface28_RESERVEDIOPolledInterface13Ev
 __ZN17IOPolledInterface28_RESERVEDIOPolledInterface14Ev
 __ZN17IOPolledInterface28_RESERVEDIOPolledInterface15Ev
 __ZN17IOSharedDataQueue11withEntriesEjj
+__ZN17IOSharedDataQueue12getQueueSizeEv
+__ZN17IOSharedDataQueue12setQueueSizeEj
 __ZN17IOSharedDataQueue12withCapacityEj
 __ZN17IOSharedDataQueue16initWithCapacityEj
 __ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue0Ev
@@ -216,6 +218,7 @@ __ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue5Ev
 __ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue6Ev
 __ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue7Ev
 __ZN17IOSharedDataQueue7dequeueEPvPj
+__ZN17IOSharedDataQueue7enqueueEPvj
 __ZN18IOMemoryDescriptor10setMappingEP4taskyj
 __ZN18IOMemoryDescriptor10writeBytesEyPKvy
 __ZN18IOMemoryDescriptor11makeMappingEPS_P4taskyjyy
index 8fcb30c965ed71820041a1f6c345544268d51c5b..14d9aa5e488c8de4ee859e9fd739a0b96a8889b1 100644 (file)
@@ -1,4 +1,4 @@
-13.3.0
+13.4.0
 
 # The first line of this file contains the master version number for the kernel.
 # All other instances of the kernel version in xnu are derived from this file.
index fdfa1d6733eec388c4d9cb390f52b4b7b4ac7822..a31cfc965bbace9680f1e5b06ee3a4c911d5290b 100644 (file)
@@ -53,6 +53,7 @@ class IOSharedDataQueue : public IODataQueue
     OSDeclareDefaultStructors(IOSharedDataQueue)
 
     struct ExpansionData { 
+        UInt32 queueSize;
     };
     /*! @var reserved
         Reserved for future use.  (Internal use only)  */
@@ -61,6 +62,9 @@ class IOSharedDataQueue : public IODataQueue
 protected:
     virtual void free();
 
+    UInt32 getQueueSize();
+    Boolean setQueueSize(UInt32 size);
+
 public:
     /*!
      * @function withCapacity
@@ -116,6 +120,16 @@ public:
      */
     virtual Boolean dequeue(void *data, UInt32 *dataSize);
 
+    /*!
+     * @function enqueue
+     * @abstract Enqueues a new entry on the queue.
+     * @discussion This method adds a new data entry of dataSize to the queue.  It sets the size parameter of the entry pointed to by the tail value and copies the memory pointed to by the data parameter in place in the queue.  Once that is done, it moves the tail to the next available location.  When attempting to add a new entry towards the end of the queue and there isn't enough space at the end, it wraps back to the beginning.<br>  If the queue is empty when a new entry is added, sendDataAvailableNotification() is called to send a message to the user process that data is now available.
+     * @param data Pointer to the data to be added to the queue.
+     * @param dataSize Size of the data pointed to by data.
+     * @result Returns true on success and false on failure.  Typically failure means that the queue is full.
+     */
+    virtual Boolean enqueue(void *data, UInt32 dataSize);
+
     OSMetaClassDeclareReservedUnused(IOSharedDataQueue, 0);
     OSMetaClassDeclareReservedUnused(IOSharedDataQueue, 1);
     OSMetaClassDeclareReservedUnused(IOSharedDataQueue, 2);
index 84f88322ecfa8b270b1168dfe3a3e422afdee626..1dd0c82a38b2a02fb6ded999c8b1161554a0e3e3 100644 (file)
@@ -30,6 +30,7 @@
 #include <IOKit/IODataQueueShared.h>
 #include <IOKit/IOLib.h>
 #include <IOKit/IOMemoryDescriptor.h>
+#include <libkern/OSAtomic.h>
 
 #ifdef enqueue
 #undef enqueue
@@ -79,6 +80,10 @@ Boolean IODataQueue::initWithCapacity(UInt32 size)
         return false;
     }
 
+    if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE) {
+        return false;
+    }
+    
     allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE);
 
     if (allocSize < size) {
@@ -99,6 +104,16 @@ Boolean IODataQueue::initWithCapacity(UInt32 size)
 
 Boolean IODataQueue::initWithEntries(UInt32 numEntries, UInt32 entrySize)
 {
+    // Checking overflow for (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE):
+    //  check (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE)
+    if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
+        //  check (numEntries + 1)
+        (numEntries > UINT32_MAX-1) ||
+        //  check (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE)
+        (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX/(numEntries+1))) {
+        return false;
+    }
+    
     return (initWithCapacity((numEntries + 1) * (DATA_QUEUE_ENTRY_HEADER_SIZE + entrySize)));
 }
 
@@ -120,10 +135,20 @@ Boolean IODataQueue::enqueue(void * data, UInt32 dataSize)
     const UInt32       entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
     IODataQueueEntry * entry;
 
+    // Check for overflow of entrySize
+    if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
+        return false;
+    }
+    // Check for underflow of (dataQueue->queueSize - tail)
+    if (dataQueue->queueSize < tail) {
+        return false;
+    }
+
     if ( tail >= head )
     {
         // Is there enough room at the end for the entry?
-        if ( (tail + entrySize) <= dataQueue->queueSize )
+        if ((entrySize <= UINT32_MAX - tail) &&
+            ((tail + entrySize) <= dataQueue->queueSize) )
         {
             entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
 
@@ -133,8 +158,8 @@ Boolean IODataQueue::enqueue(void * data, UInt32 dataSize)
             // The tail can be out of bound when the size of the new entry
             // exactly matches the available space at the end of the queue.
             // The tail can range from 0 to dataQueue->queueSize inclusive.
-
-            dataQueue->tail += entrySize;
+            
+            OSAddAtomic(entrySize, (SInt32 *)&dataQueue->tail);
         }
         else if ( head > entrySize )   // Is there enough room at the beginning?
         {
@@ -153,7 +178,7 @@ Boolean IODataQueue::enqueue(void * data, UInt32 dataSize)
             }
 
             memcpy(&dataQueue->queue->data, data, dataSize);
-            dataQueue->tail = entrySize;
+            OSCompareAndSwap(dataQueue->tail, entrySize, &dataQueue->tail);
         }
         else
         {
@@ -171,7 +196,7 @@ Boolean IODataQueue::enqueue(void * data, UInt32 dataSize)
 
             entry->size = dataSize;
             memcpy(&entry->data, data, dataSize);
-            dataQueue->tail += entrySize;
+            OSAddAtomic(entrySize, (SInt32 *)&dataQueue->tail);
         }
         else
         {
index 7fb3af426b5886109ae9a9369446a3ef6f4678b4..5eb1c35ed3eb6870ae22d3ab404f514af6df4877 100644 (file)
 #include <IOKit/IOLib.h>
 #include <IOKit/IOMemoryDescriptor.h>
 
+#ifdef enqueue
+#undef enqueue
+#endif
+
 #ifdef dequeue
 #undef dequeue
 #endif
@@ -70,12 +74,28 @@ IOSharedDataQueue *IOSharedDataQueue::withEntries(UInt32 numEntries, UInt32 entr
 Boolean IOSharedDataQueue::initWithCapacity(UInt32 size)
 {
     IODataQueueAppendix *   appendix;
+    vm_size_t               allocSize;
     
     if (!super::init()) {
         return false;
     }
     
-    dataQueue = (IODataQueueMemory *)IOMallocAligned(round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE), PAGE_SIZE);
+    _reserved = (ExpansionData *)IOMalloc(sizeof(struct ExpansionData));
+    if (!_reserved) {
+        return false;
+    }
+    
+    if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE - DATA_QUEUE_MEMORY_APPENDIX_SIZE) {
+        return false;
+    }
+    
+    allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE);
+    
+    if (allocSize < size) {
+        return false;
+    }
+    
+    dataQueue = (IODataQueueMemory *)IOMallocAligned(allocSize, PAGE_SIZE);
     if (dataQueue == 0) {
         return false;
     }
@@ -84,6 +104,10 @@ Boolean IOSharedDataQueue::initWithCapacity(UInt32 size)
     dataQueue->head         = 0;
     dataQueue->tail         = 0;
     
+    if (!setQueueSize(size)) {
+        return false;
+    }
+    
     appendix            = (IODataQueueAppendix *)((UInt8 *)dataQueue + size + DATA_QUEUE_MEMORY_HEADER_SIZE);
     appendix->version   = 0;
     notifyMsg           = &(appendix->msgh);
@@ -95,10 +119,15 @@ Boolean IOSharedDataQueue::initWithCapacity(UInt32 size)
 void IOSharedDataQueue::free()
 {
     if (dataQueue) {
-        IOFreeAligned(dataQueue, round_page(dataQueue->queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE));
+        IOFreeAligned(dataQueue, round_page(getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE));
         dataQueue = NULL;
     }
 
+    if (_reserved) {
+        IOFree (_reserved, sizeof(struct ExpansionData));
+        _reserved = NULL;
+    }
+    
     super::free();
 }
 
@@ -107,7 +136,7 @@ IOMemoryDescriptor *IOSharedDataQueue::getMemoryDescriptor()
     IOMemoryDescriptor *descriptor = 0;
 
     if (dataQueue != 0) {
-        descriptor = IOMemoryDescriptor::withAddress(dataQueue, dataQueue->queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE, kIODirectionOutIn);
+        descriptor = IOMemoryDescriptor::withAddress(dataQueue, getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE, kIODirectionOutIn);
     }
 
     return descriptor;
@@ -122,8 +151,12 @@ IODataQueueEntry * IOSharedDataQueue::peek()
         IODataQueueEntry *  head               = 0;
         UInt32              headSize    = 0;
         UInt32              headOffset  = dataQueue->head;
-        UInt32              queueSize   = dataQueue->queueSize;
+        UInt32              queueSize   = getQueueSize();
 
+        if (headOffset >= queueSize) {
+            return NULL;
+        }
+        
         head           = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
         headSize       = head->size;
         
@@ -131,10 +164,13 @@ IODataQueueEntry * IOSharedDataQueue::peek()
         // If there is room, check if there's enough room to hold the header and
         // the data.
 
-        if ((headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
-            ((headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE) > queueSize))
-        {
+        if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
+            (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
+            (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
+            (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
             // No room for the header or the data, wrap to the beginning of the queue.
+            // Note: wrapping even with the UINT32_MAX checks, as we have to support
+            // queueSize of UINT32_MAX
             entry = dataQueue->queue;
         } else {
             entry = head;
@@ -144,6 +180,93 @@ IODataQueueEntry * IOSharedDataQueue::peek()
     return entry;
 }
 
+Boolean IOSharedDataQueue::enqueue(void * data, UInt32 dataSize)
+{
+    const UInt32       head      = dataQueue->head;  // volatile
+    const UInt32       tail      = dataQueue->tail;
+    const UInt32       entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
+    IODataQueueEntry * entry;
+    
+    // Check for overflow of entrySize
+    if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
+        return false;
+    }
+    // Check for underflow of (getQueueSize() - tail)
+    if (getQueueSize() < tail) {
+        return false;
+    }
+    
+    if ( tail >= head )
+    {
+        // Is there enough room at the end for the entry?
+        if ((entrySize <= UINT32_MAX - tail) &&
+            ((tail + entrySize) <= getQueueSize()) )
+        {
+            entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
+            
+            entry->size = dataSize;
+            memcpy(&entry->data, data, dataSize);
+            
+            // The tail can be out of bound when the size of the new entry
+            // exactly matches the available space at the end of the queue.
+            // The tail can range from 0 to dataQueue->queueSize inclusive.
+            
+            OSAddAtomic(entrySize, (SInt32 *)&dataQueue->tail);
+        }
+        else if ( head > entrySize )     // Is there enough room at the beginning?
+        {
+            // Wrap around to the beginning, but do not allow the tail to catch
+            // up to the head.
+            
+            dataQueue->queue->size = dataSize;
+            
+            // We need to make sure that there is enough room to set the size before
+            // doing this. The user client checks for this and will look for the size
+            // at the beginning if there isn't room for it at the end.
+            
+            if ( ( getQueueSize() - tail ) >= DATA_QUEUE_ENTRY_HEADER_SIZE )
+            {
+                ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize;
+            }
+            
+            memcpy(&dataQueue->queue->data, data, dataSize);
+            OSCompareAndSwap(dataQueue->tail, entrySize, &dataQueue->tail);
+        }
+        else
+        {
+            return false;    // queue is full
+        }
+    }
+    else
+    {
+        // Do not allow the tail to catch up to the head when the queue is full.
+        // That's why the comparison uses a '>' rather than '>='.
+        
+        if ( (head - tail) > entrySize )
+        {
+            entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
+            
+            entry->size = dataSize;
+            memcpy(&entry->data, data, dataSize);
+            OSAddAtomic(entrySize, (SInt32 *)&dataQueue->tail);
+        }
+        else
+        {
+            return false;    // queue is full
+        }
+    }
+    
+    // Send notification (via mach message) that data is available.
+    
+    if ( ( head == tail )                                                   /* queue was empty prior to enqueue() */
+        ||   ( dataQueue->head == tail ) )   /* queue was emptied during enqueue() */
+    {
+        sendDataAvailableNotification();
+    }
+    
+    return true;
+}
+
 Boolean IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize)
 {
     Boolean             retVal          = TRUE;
@@ -156,23 +279,40 @@ Boolean IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize)
             IODataQueueEntry *  head           = 0;
             UInt32              headSize    = 0;
             UInt32              headOffset  = dataQueue->head;
-            UInt32              queueSize   = dataQueue->queueSize;
+            UInt32              queueSize   = getQueueSize();
 
+            if (headOffset > queueSize) {
+                return false;
+            }
+            
             head               = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
             headSize   = head->size;
             
-            // we wraped around to beginning, so read from there
-                       // either there was not even room for the header
-                       if ((headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
-                               // or there was room for the header, but not for the data
-                               ((headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE) > queueSize)) {
+            // we wrapped around to beginning, so read from there
+            // either there was not even room for the header
+            if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
+                (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
+                // or there was room for the header, but not for the data
+                (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
+                (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
+                // Note: we have to wrap to the beginning even with the UINT32_MAX checks
+                // because we have to support a queueSize of UINT32_MAX.
                 entry           = dataQueue->queue;
                 entrySize       = entry->size;
+                if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
+                    (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
+                    return false;
+                }
                 newHeadOffset   = entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
-            // else it is at the end
+                // else it is at the end
             } else {
                 entry           = head;
                 entrySize       = entry->size;
+                if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
+                    (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headOffset) ||
+                    (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE + headOffset > queueSize)) {
+                    return false;
+                }
                 newHeadOffset   = headOffset + entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
             }
         }
@@ -182,7 +322,7 @@ Boolean IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize)
                 if (dataSize) {
                     if (entrySize <= *dataSize) {
                         memcpy(data, &(entry->data), entrySize);
-                        dataQueue->head = newHeadOffset;
+                        OSCompareAndSwap( dataQueue->head, newHeadOffset, (SInt32 *)&dataQueue->head);
                     } else {
                         retVal = FALSE;
                     }
@@ -190,7 +330,7 @@ Boolean IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize)
                     retVal = FALSE;
                 }
             } else {
-                dataQueue->head = newHeadOffset;
+                OSCompareAndSwap( dataQueue->head, newHeadOffset, (SInt32 *)&dataQueue->head);
             }
 
             if (dataSize) {
@@ -206,6 +346,22 @@ Boolean IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize)
     return retVal;
 }
 
+UInt32 IOSharedDataQueue::getQueueSize()
+{
+    if (!_reserved) {
+        return 0;
+    }
+    return _reserved->queueSize;
+}
+
+Boolean IOSharedDataQueue::setQueueSize(UInt32 size)
+{
+    if (!_reserved) {
+        return false;
+    }
+    _reserved->queueSize = size;
+    return true;
+}
 
 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 0);
 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 1);
index bccc305cff30280a5f0aabca3e9053f2e840112b..fd73f24ff8462de4da16539ea74e21808bd8a8f0 100644 (file)
@@ -95,7 +95,7 @@ typedef struct acpi_hibernate_callback_data acpi_hibernate_callback_data_t;
 unsigned int           save_kdebug_enable = 0;
 static uint64_t                acpi_sleep_abstime;
 static uint64_t                acpi_idle_abstime;
-static uint64_t                acpi_wake_abstime;
+static uint64_t                acpi_wake_abstime, acpi_wake_postrebase_abstime;
 boolean_t              deep_idle_rebase = TRUE;
 
 #if CONFIG_SLEEP
@@ -292,7 +292,7 @@ acpi_sleep_kernel(acpi_sleep_callback func, void *refcon)
 
        /* let the realtime clock reset */
        rtc_sleep_wakeup(acpi_sleep_abstime);
-
+       acpi_wake_postrebase_abstime = mach_absolute_time();
        kdebug_enable = save_kdebug_enable;
 
        if (kdebug_enable == 0) {
@@ -419,7 +419,7 @@ acpi_idle_kernel(acpi_sleep_callback func, void *refcon)
                rtc_sleep_wakeup(acpi_idle_abstime);
                kdebug_enable = save_kdebug_enable;
        }
-
+       acpi_wake_postrebase_abstime = mach_absolute_time();
        cpu_datap(master_cpu)->cpu_running = TRUE;
 
        KERNEL_DEBUG_CONSTANT(
@@ -467,3 +467,9 @@ install_real_mode_bootstrap(void *prot_entry)
        __asm__("wbinvd");
 }
 
+boolean_t
+ml_recent_wake(void) {
+       uint64_t ctime = mach_absolute_time();
+       assert(ctime > acpi_wake_postrebase_abstime);
+       return ((ctime - acpi_wake_postrebase_abstime) < 5 * NSEC_PER_SEC);
+}
index 06c57561c6401af357db22da96236f2a68d89799..d958d8c2e0016ae3cd62151d09be389474fdcd7e 100644 (file)
@@ -603,7 +603,11 @@ ml_init_lock_timeout(void)
 {
        uint64_t        abstime;
        uint32_t        mtxspin;
+#if DEVELOPMENT || DEBUG
        uint64_t        default_timeout_ns = NSEC_PER_SEC>>2;
+#else
+       uint64_t        default_timeout_ns = NSEC_PER_SEC>>1;
+#endif
        uint32_t        slto;
        uint32_t        prt;
 
@@ -769,7 +773,7 @@ kernel_preempt_check(void)
 }
 
 boolean_t machine_timeout_suspended(void) {
-       return (virtualized || pmap_tlb_flush_timeout || spinlock_timed_out || panic_active() || mp_recent_debugger_activity());
+       return (virtualized || pmap_tlb_flush_timeout || spinlock_timed_out || panic_active() || mp_recent_debugger_activity() || ml_recent_wake());
 }
 
 /* Eagerly evaluate all pending timer and thread callouts
index f0c1baa710287754d717be766fd7327e492c672a..eeb451caf1ecf547e78eab2fb8fbf9532c2419e9 100644 (file)
@@ -351,5 +351,6 @@ boolean_t ml_timer_forced_evaluation(void);
 int ml_timer_get_user_idle_level(void);
 kern_return_t ml_timer_set_user_idle_level(int);
 
+boolean_t ml_recent_wake(void);
 #endif /* XNU_KERNEL_PRIVATE */
 #endif /* _I386_MACHINE_ROUTINES_H_ */
index b1bc88bf60ea31ae8d1e004f881f3243dddb115b..d4fd11af184e6476bfb6ae8fdf98df30d435b2ad 100644 (file)
@@ -72,7 +72,7 @@
 #include <i386/misc_protos.h>
 #include <i386/mp.h>
 #include <i386/pmap.h>
-#if defined(__i386__)
+#if defined(__i386__) || defined(__x86_64__)
 #include <i386/pmap_internal.h>
 #endif /* i386 */
 #if CONFIG_MCA
@@ -389,6 +389,26 @@ fix_desc64(void *descp, int count)
        }
 }
 
+static void
+cpu_gdt_alias(vm_map_offset_t gdt, vm_map_offset_t alias)
+{
+       pt_entry_t *pte = NULL;
+
+       /* Require page alignment */
+       assert(page_aligned(gdt));
+       assert(page_aligned(alias));
+
+       pte = pmap_pte(kernel_pmap, alias);
+       pmap_store_pte(pte, kvtophys(gdt) | INTEL_PTE_REF
+                                         | INTEL_PTE_MOD
+                                         | INTEL_PTE_WIRED
+                                         | INTEL_PTE_VALID
+                                         | INTEL_PTE_WRITE
+                                         | INTEL_PTE_NX);
+
+       /* TLB flush unneccessry because target processor isn't running yet */
+}
+
 
 void
 cpu_desc_init64(cpu_data_t *cdp)
@@ -430,19 +450,25 @@ cpu_desc_init64(cpu_data_t *cdp)
                master_ktss64.ist1 = (uintptr_t) low_eintstack
                                        - sizeof(x86_64_intr_stack_frame_t);
 
-       } else {
+       } else if (cdi->cdi_ktss == NULL) {     /* Skipping re-init on wake */
                cpu_desc_table64_t      *cdt = (cpu_desc_table64_t *) cdp->cpu_desc_tablep;
+
                /*
                 * Per-cpu GDT, IDT, KTSS descriptors are allocated in kernel 
                 * heap (cpu_desc_table). 
                 * LDT descriptors are mapped into a separate area.
+                * GDT descriptors are addressed by alias to avoid sgdt leaks to user-space.
                 */
                cdi->cdi_idt.ptr  = (void *)MASTER_IDT_ALIAS;
-               cdi->cdi_gdt.ptr  = (struct fake_descriptor *)cdt->gdt;
+               cdi->cdi_gdt.ptr  = (void *)CPU_GDT_ALIAS(cdp->cpu_number);
                cdi->cdi_ktss = (void *)&cdt->ktss;
                cdi->cdi_sstk = (vm_offset_t)&cdt->sstk.top;
                cdi->cdi_ldt  = cdp->cpu_ldtp;
 
+               /* Make the virtual alias address for the GDT */
+               cpu_gdt_alias((vm_map_offset_t) &cdt->gdt,
+                             (vm_map_offset_t) cdi->cdi_gdt.ptr);
+
                /*
                 * Copy the tables
                 */
@@ -487,6 +513,17 @@ cpu_desc_load64(cpu_data_t *cdp)
 {
        cpu_desc_index_t        *cdi = &cdp->cpu_desc_index;
 
+       /* Stuff the kernel per-cpu data area address into the MSRs */
+       wrmsr64(MSR_IA32_GS_BASE, (uintptr_t) cdp);
+       wrmsr64(MSR_IA32_KERNEL_GS_BASE, (uintptr_t) cdp);
+
+       /*
+        * Ensure the TSS segment's busy bit is clear. This is required
+        * for the case of reloading descriptors at wake to avoid
+        * their complete re-initialization.
+        */
+       gdt_desc_p(KERNEL_TSS)->access &= ~ACC_TSS_BUSY;
+
        /* Load the GDT, LDT, IDT and TSS */
        cdi->cdi_gdt.size = sizeof(struct real_descriptor)*GDTSZ - 1;
        cdi->cdi_idt.size = 0x1000 + cdp->cpu_number;
@@ -495,10 +532,6 @@ cpu_desc_load64(cpu_data_t *cdp)
        lldt(KERNEL_LDT);
        set_tr(KERNEL_TSS);
 
-       /* Stuff the kernel per-cpu data area address into the MSRs */
-       wrmsr64(MSR_IA32_GS_BASE, (uintptr_t) cdp);
-       wrmsr64(MSR_IA32_KERNEL_GS_BASE, (uintptr_t) cdp);
-
 #if GPROF // Hack to enable mcount to work on K64
        __asm__ volatile("mov %0, %%gs" : : "rm" ((unsigned short)(KERNEL_DS)));
 #endif
index 5e786acbe48cbb17e02eadd72b54acaf709f82e7..bccf937c6edb61b263146e14fbb2620ea552f636 100644 (file)
@@ -375,14 +375,13 @@ static    inline void * PHYSMAP_PTOV_check(void *paddr) {
 /*
  * For KASLR, we alias the master processor's IDT and GDT at fixed
  * virtual addresses to defeat SIDT/SGDT address leakage.
+ * And non-boot processor's GDT aliases likewise (skipping LOWGLOBAL_ALIAS)
+ * The low global vector page is mapped at a fixed alias also.
  */
 #define MASTER_IDT_ALIAS       (VM_MIN_KERNEL_ADDRESS + 0x0000)
 #define MASTER_GDT_ALIAS       (VM_MIN_KERNEL_ADDRESS + 0x1000)
-
-/*
- * The low global vector page is mapped at a fixed alias also.
- */
 #define LOWGLOBAL_ALIAS                (VM_MIN_KERNEL_ADDRESS + 0x2000)
+#define CPU_GDT_ALIAS(_cpu)    (LOWGLOBAL_ALIAS + (0x1000*(_cpu)))
 
 #endif /*__x86_64__ */
 
index 724f8a2af6e036ba87cd4b1fa4bf67d60bcc4682..15b0254183921213a1ed86f0f07f31af7dedef4c 100644 (file)
@@ -73,6 +73,7 @@ void telemetry_take_sample(thread_t thread, uint8_t microsnapshot_flags);
 #define TELEMETRY_MAX_BUFFER_SIZE (64*1024)
 
 #define        TELEMETRY_DEFAULT_NOTIFY_LEEWAY (4*1024) // Userland gets 4k of leeway to collect data after notification
+#define        TELEMETRY_MAX_UUID_COUNT (128) // Max of 128 non-shared-cache UUIDs to log for symbolication
 
 uint32_t                       telemetry_sample_rate = 0;
 volatile boolean_t     telemetry_needs_record = FALSE;
@@ -421,6 +422,14 @@ void telemetry_take_sample(thread_t thread, uint8_t microsnapshot_flags)
                uuid_info_count = 0;
        }
 
+       /*
+        * Don't copy in an unbounded amount of memory. The main binary and interesting
+        * non-shared-cache libraries should be in the first few images.
+        */
+       if (uuid_info_count > TELEMETRY_MAX_UUID_COUNT) {
+               uuid_info_count = TELEMETRY_MAX_UUID_COUNT;
+       }
+
        uint32_t uuid_info_size = (uint32_t)(task_has_64BitAddr(thread->task) ? sizeof(struct user64_dyld_uuid_info) : sizeof(struct user32_dyld_uuid_info));
        uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size;
        char     *uuid_info_array = NULL;
@@ -483,6 +492,10 @@ copytobuffer:
                 */
                telemetry_buffer_end_point = current_record_start;
                telemetry_buffer_current_position = 0;
+               if (current_record_start == 0) {
+                       /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
+                       goto cancel_sample;
+               }
                goto copytobuffer;
        }
 
@@ -499,6 +512,10 @@ copytobuffer:
        if ((telemetry_buffer_size - telemetry_buffer_current_position) < sizeof(struct task_snapshot)) {
                telemetry_buffer_end_point = current_record_start;
                telemetry_buffer_current_position = 0;
+               if (current_record_start == 0) {
+                       /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
+                       goto cancel_sample;
+               }
                goto copytobuffer;
        }
 
@@ -567,6 +584,10 @@ copytobuffer:
        if ((telemetry_buffer_size - telemetry_buffer_current_position) < uuid_info_array_size) {
                telemetry_buffer_end_point = current_record_start;
                telemetry_buffer_current_position = 0;
+               if (current_record_start == 0) {
+                       /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
+                       goto cancel_sample;
+               }
                goto copytobuffer;
        }
 
@@ -588,6 +609,10 @@ copytobuffer:
                /* wrap and overwrite */
                telemetry_buffer_end_point = current_record_start;              
                telemetry_buffer_current_position = 0;
+               if (current_record_start == 0) {
+                       /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
+                       goto cancel_sample;
+               }
                goto copytobuffer;
        }
 
@@ -627,6 +652,10 @@ copytobuffer:
                        /* wrap and overwrite */
                        telemetry_buffer_end_point = current_record_start;              
                        telemetry_buffer_current_position = 0;
+                       if (current_record_start == 0) {
+                               /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
+                               goto cancel_sample;
+                       }
                        goto copytobuffer;
                }
 
@@ -651,6 +680,10 @@ copytobuffer:
        if ((telemetry_buffer_size - telemetry_buffer_current_position)/framesize < btcount) {
                telemetry_buffer_end_point = current_record_start;
                telemetry_buffer_current_position = 0;
+               if (current_record_start == 0) {
+                       /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
+                       goto cancel_sample;
+               }
                goto copytobuffer;
        }
 
@@ -679,6 +712,8 @@ copytobuffer:
                notify = TRUE;
        }
 
+cancel_sample:
+
        TELEMETRY_UNLOCK();
 
        KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_RECORD) | DBG_FUNC_END, notify, telemetry_bytes_since_last_mark, telemetry_buffer_current_position, telemetry_buffer_end_point, 0);
index 32d345f9c2ba4863c86181a0a54b46afe54ac252..e9d0157eff67061d34356bceb44cdc191b5c8148 100644 (file)
@@ -911,7 +911,10 @@ pmap_lowmem_finalize(void)
         */
        DPRINTF("%s: Removing mappings from 0->0x%lx\n", __FUNCTION__, vm_kernel_base);
 
-       /* Remove all mappings past the descriptor aliases and low globals */
+       /*
+        * Remove all mappings past the boot-cpu descriptor aliases and low globals.
+        * Non-boot-cpu GDT aliases will be remapped later as needed. 
+        */
        pmap_remove(kernel_pmap, LOWGLOBAL_ALIAS + PAGE_SIZE, vm_kernel_base);
 
        /*
index 64fe2cb76618e583d9c08df835955baafb08faca..3cba388d9c275753b697639b3dd20c5dcb70e255 100644 (file)
@@ -9,12 +9,9 @@
 
 #import <Foundation/Foundation.h>
 
-//#include "/Volumes/ws/10864999/Sundance10A273/Libc-849/BUILD_arm/Libc_Libc-849.roots/Libc_Libc-849~hdrDst/usr/local/include/libproc_internal.h"
 #include <mach/message.h>
 #include <libproc_internal.h>
 
-//#include "/Volumes/ws/10864999/Sundance10A273/Libc-849/BUILD_arm/Libc_Libc-849.roots/Libc_Libc-849~hdrDst/usr/local/include/libproc_internal.h"
-
 #define        MAX_THREADS 1000
 
 char *pname;