__ZN17IOPolledInterface28_RESERVEDIOPolledInterface14Ev
__ZN17IOPolledInterface28_RESERVEDIOPolledInterface15Ev
__ZN17IOSharedDataQueue11withEntriesEjj
+__ZN17IOSharedDataQueue12getQueueSizeEv
+__ZN17IOSharedDataQueue12setQueueSizeEj
__ZN17IOSharedDataQueue12withCapacityEj
__ZN17IOSharedDataQueue16initWithCapacityEj
__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue0Ev
__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue6Ev
__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue7Ev
__ZN17IOSharedDataQueue7dequeueEPvPj
+__ZN17IOSharedDataQueue7enqueueEPvj
__ZN18IOMemoryDescriptor10setMappingEP4taskyj
__ZN18IOMemoryDescriptor10writeBytesEyPKvy
__ZN18IOMemoryDescriptor11makeMappingEPS_P4taskyjyy
-13.3.0
+13.4.0
# The first line of this file contains the master version number for the kernel.
# All other instances of the kernel version in xnu are derived from this file.
OSDeclareDefaultStructors(IOSharedDataQueue)
struct ExpansionData {
+ UInt32 queueSize;
};
/*! @var reserved
Reserved for future use. (Internal use only) */
protected:
virtual void free();
+ UInt32 getQueueSize();
+ Boolean setQueueSize(UInt32 size);
+
public:
/*!
* @function withCapacity
*/
virtual Boolean dequeue(void *data, UInt32 *dataSize);
+ /*!
+ * @function enqueue
+ * @abstract Enqueues a new entry on the queue.
+ * @discussion This method adds a new data entry of dataSize to the queue. It sets the size parameter of the entry pointed to by the tail value and copies the memory pointed to by the data parameter in place in the queue. Once that is done, it moves the tail to the next available location. When attempting to add a new entry towards the end of the queue and there isn't enough space at the end, it wraps back to the beginning.<br> If the queue is empty when a new entry is added, sendDataAvailableNotification() is called to send a message to the user process that data is now available.
+ * @param data Pointer to the data to be added to the queue.
+ * @param dataSize Size of the data pointed to by data.
+ * @result Returns true on success and false on failure. Typically failure means that the queue is full.
+ */
+ virtual Boolean enqueue(void *data, UInt32 dataSize);
+
OSMetaClassDeclareReservedUnused(IOSharedDataQueue, 0);
OSMetaClassDeclareReservedUnused(IOSharedDataQueue, 1);
OSMetaClassDeclareReservedUnused(IOSharedDataQueue, 2);
#include <IOKit/IODataQueueShared.h>
#include <IOKit/IOLib.h>
#include <IOKit/IOMemoryDescriptor.h>
+#include <libkern/OSAtomic.h>
#ifdef enqueue
#undef enqueue
return false;
}
+ if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE) {
+ return false;
+ }
+
allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE);
if (allocSize < size) {
Boolean IODataQueue::initWithEntries(UInt32 numEntries, UInt32 entrySize)
{
+ // Checking overflow for (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE):
+ // check (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE)
+ if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
+ // check (numEntries + 1)
+ (numEntries > UINT32_MAX-1) ||
+ // check (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE)
+ (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX/(numEntries+1))) {
+ return false;
+ }
+
return (initWithCapacity((numEntries + 1) * (DATA_QUEUE_ENTRY_HEADER_SIZE + entrySize)));
}
const UInt32 entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
IODataQueueEntry * entry;
+ // Check for overflow of entrySize
+ if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
+ return false;
+ }
+ // Check for underflow of (dataQueue->queueSize - tail)
+ if (dataQueue->queueSize < tail) {
+ return false;
+ }
+
if ( tail >= head )
{
// Is there enough room at the end for the entry?
- if ( (tail + entrySize) <= dataQueue->queueSize )
+ if ((entrySize <= UINT32_MAX - tail) &&
+ ((tail + entrySize) <= dataQueue->queueSize) )
{
entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
// The tail can be out of bound when the size of the new entry
// exactly matches the available space at the end of the queue.
// The tail can range from 0 to dataQueue->queueSize inclusive.
-
- dataQueue->tail += entrySize;
+
+ OSAddAtomic(entrySize, (SInt32 *)&dataQueue->tail);
}
else if ( head > entrySize ) // Is there enough room at the beginning?
{
}
memcpy(&dataQueue->queue->data, data, dataSize);
- dataQueue->tail = entrySize;
+ OSCompareAndSwap(dataQueue->tail, entrySize, &dataQueue->tail);
}
else
{
entry->size = dataSize;
memcpy(&entry->data, data, dataSize);
- dataQueue->tail += entrySize;
+ OSAddAtomic(entrySize, (SInt32 *)&dataQueue->tail);
}
else
{
#include <IOKit/IOLib.h>
#include <IOKit/IOMemoryDescriptor.h>
+#ifdef enqueue
+#undef enqueue
+#endif
+
#ifdef dequeue
#undef dequeue
#endif
Boolean IOSharedDataQueue::initWithCapacity(UInt32 size)
{
IODataQueueAppendix * appendix;
+ vm_size_t allocSize;
if (!super::init()) {
return false;
}
- dataQueue = (IODataQueueMemory *)IOMallocAligned(round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE), PAGE_SIZE);
+ _reserved = (ExpansionData *)IOMalloc(sizeof(struct ExpansionData));
+ if (!_reserved) {
+ return false;
+ }
+
+ if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE - DATA_QUEUE_MEMORY_APPENDIX_SIZE) {
+ return false;
+ }
+
+ allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE);
+
+ if (allocSize < size) {
+ return false;
+ }
+
+ dataQueue = (IODataQueueMemory *)IOMallocAligned(allocSize, PAGE_SIZE);
if (dataQueue == 0) {
return false;
}
dataQueue->head = 0;
dataQueue->tail = 0;
+ if (!setQueueSize(size)) {
+ return false;
+ }
+
appendix = (IODataQueueAppendix *)((UInt8 *)dataQueue + size + DATA_QUEUE_MEMORY_HEADER_SIZE);
appendix->version = 0;
notifyMsg = &(appendix->msgh);
void IOSharedDataQueue::free()
{
if (dataQueue) {
- IOFreeAligned(dataQueue, round_page(dataQueue->queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE));
+ IOFreeAligned(dataQueue, round_page(getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE));
dataQueue = NULL;
}
+ if (_reserved) {
+ IOFree (_reserved, sizeof(struct ExpansionData));
+ _reserved = NULL;
+ }
+
super::free();
}
IOMemoryDescriptor *descriptor = 0;
if (dataQueue != 0) {
- descriptor = IOMemoryDescriptor::withAddress(dataQueue, dataQueue->queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE, kIODirectionOutIn);
+ descriptor = IOMemoryDescriptor::withAddress(dataQueue, getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE, kIODirectionOutIn);
}
return descriptor;
IODataQueueEntry * head = 0;
UInt32 headSize = 0;
UInt32 headOffset = dataQueue->head;
- UInt32 queueSize = dataQueue->queueSize;
+ UInt32 queueSize = getQueueSize();
+ if (headOffset >= queueSize) {
+ return NULL;
+ }
+
head = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
headSize = head->size;
// If there is room, check if there's enough room to hold the header and
// the data.
- if ((headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
- ((headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE) > queueSize))
- {
+ if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
+ (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
+ (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
+ (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
// No room for the header or the data, wrap to the beginning of the queue.
+ // Note: wrapping even with the UINT32_MAX checks, as we have to support
+ // queueSize of UINT32_MAX
entry = dataQueue->queue;
} else {
entry = head;
return entry;
}
+Boolean IOSharedDataQueue::enqueue(void * data, UInt32 dataSize)
+{
+ const UInt32 head = dataQueue->head; // volatile
+ const UInt32 tail = dataQueue->tail;
+ const UInt32 entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
+ IODataQueueEntry * entry;
+
+ // Check for overflow of entrySize
+ if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
+ return false;
+ }
+ // Check for underflow of (getQueueSize() - tail)
+ if (getQueueSize() < tail) {
+ return false;
+ }
+
+ if ( tail >= head )
+ {
+ // Is there enough room at the end for the entry?
+ if ((entrySize <= UINT32_MAX - tail) &&
+ ((tail + entrySize) <= getQueueSize()) )
+ {
+ entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
+
+ entry->size = dataSize;
+ memcpy(&entry->data, data, dataSize);
+
+ // The tail can be out of bound when the size of the new entry
+ // exactly matches the available space at the end of the queue.
+ // The tail can range from 0 to dataQueue->queueSize inclusive.
+
+ OSAddAtomic(entrySize, (SInt32 *)&dataQueue->tail);
+ }
+ else if ( head > entrySize ) // Is there enough room at the beginning?
+ {
+ // Wrap around to the beginning, but do not allow the tail to catch
+ // up to the head.
+
+ dataQueue->queue->size = dataSize;
+
+ // We need to make sure that there is enough room to set the size before
+ // doing this. The user client checks for this and will look for the size
+ // at the beginning if there isn't room for it at the end.
+
+ if ( ( getQueueSize() - tail ) >= DATA_QUEUE_ENTRY_HEADER_SIZE )
+ {
+ ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize;
+ }
+
+ memcpy(&dataQueue->queue->data, data, dataSize);
+ OSCompareAndSwap(dataQueue->tail, entrySize, &dataQueue->tail);
+ }
+ else
+ {
+ return false; // queue is full
+ }
+ }
+ else
+ {
+ // Do not allow the tail to catch up to the head when the queue is full.
+ // That's why the comparison uses a '>' rather than '>='.
+
+ if ( (head - tail) > entrySize )
+ {
+ entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
+
+ entry->size = dataSize;
+ memcpy(&entry->data, data, dataSize);
+ OSAddAtomic(entrySize, (SInt32 *)&dataQueue->tail);
+ }
+ else
+ {
+ return false; // queue is full
+ }
+ }
+
+ // Send notification (via mach message) that data is available.
+
+ if ( ( head == tail ) /* queue was empty prior to enqueue() */
+ || ( dataQueue->head == tail ) ) /* queue was emptied during enqueue() */
+ {
+ sendDataAvailableNotification();
+ }
+
+ return true;
+}
+
Boolean IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize)
{
Boolean retVal = TRUE;
IODataQueueEntry * head = 0;
UInt32 headSize = 0;
UInt32 headOffset = dataQueue->head;
- UInt32 queueSize = dataQueue->queueSize;
+ UInt32 queueSize = getQueueSize();
+ if (headOffset > queueSize) {
+ return false;
+ }
+
head = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
headSize = head->size;
- // we wraped around to beginning, so read from there
- // either there was not even room for the header
- if ((headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
- // or there was room for the header, but not for the data
- ((headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE) > queueSize)) {
+ // we wrapped around to beginning, so read from there
+ // either there was not even room for the header
+ if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
+ (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
+ // or there was room for the header, but not for the data
+ (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
+ (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
+ // Note: we have to wrap to the beginning even with the UINT32_MAX checks
+ // because we have to support a queueSize of UINT32_MAX.
entry = dataQueue->queue;
entrySize = entry->size;
+ if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
+ (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
+ return false;
+ }
newHeadOffset = entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
- // else it is at the end
+ // else it is at the end
} else {
entry = head;
entrySize = entry->size;
+ if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
+ (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headOffset) ||
+ (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE + headOffset > queueSize)) {
+ return false;
+ }
newHeadOffset = headOffset + entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
}
}
if (dataSize) {
if (entrySize <= *dataSize) {
memcpy(data, &(entry->data), entrySize);
- dataQueue->head = newHeadOffset;
+ OSCompareAndSwap( dataQueue->head, newHeadOffset, (SInt32 *)&dataQueue->head);
} else {
retVal = FALSE;
}
retVal = FALSE;
}
} else {
- dataQueue->head = newHeadOffset;
+ OSCompareAndSwap( dataQueue->head, newHeadOffset, (SInt32 *)&dataQueue->head);
}
if (dataSize) {
return retVal;
}
+UInt32 IOSharedDataQueue::getQueueSize()
+{
+ if (!_reserved) {
+ return 0;
+ }
+ return _reserved->queueSize;
+}
+
+Boolean IOSharedDataQueue::setQueueSize(UInt32 size)
+{
+ if (!_reserved) {
+ return false;
+ }
+ _reserved->queueSize = size;
+ return true;
+}
OSMetaClassDefineReservedUnused(IOSharedDataQueue, 0);
OSMetaClassDefineReservedUnused(IOSharedDataQueue, 1);
unsigned int save_kdebug_enable = 0;
static uint64_t acpi_sleep_abstime;
static uint64_t acpi_idle_abstime;
-static uint64_t acpi_wake_abstime;
+static uint64_t acpi_wake_abstime, acpi_wake_postrebase_abstime;
boolean_t deep_idle_rebase = TRUE;
#if CONFIG_SLEEP
/* let the realtime clock reset */
rtc_sleep_wakeup(acpi_sleep_abstime);
-
+ acpi_wake_postrebase_abstime = mach_absolute_time();
kdebug_enable = save_kdebug_enable;
if (kdebug_enable == 0) {
rtc_sleep_wakeup(acpi_idle_abstime);
kdebug_enable = save_kdebug_enable;
}
-
+ acpi_wake_postrebase_abstime = mach_absolute_time();
cpu_datap(master_cpu)->cpu_running = TRUE;
KERNEL_DEBUG_CONSTANT(
__asm__("wbinvd");
}
+boolean_t
+ml_recent_wake(void) {
+ uint64_t ctime = mach_absolute_time();
+ assert(ctime > acpi_wake_postrebase_abstime);
+ return ((ctime - acpi_wake_postrebase_abstime) < 5 * NSEC_PER_SEC);
+}
{
uint64_t abstime;
uint32_t mtxspin;
+#if DEVELOPMENT || DEBUG
uint64_t default_timeout_ns = NSEC_PER_SEC>>2;
+#else
+ uint64_t default_timeout_ns = NSEC_PER_SEC>>1;
+#endif
uint32_t slto;
uint32_t prt;
}
boolean_t machine_timeout_suspended(void) {
- return (virtualized || pmap_tlb_flush_timeout || spinlock_timed_out || panic_active() || mp_recent_debugger_activity());
+ return (virtualized || pmap_tlb_flush_timeout || spinlock_timed_out || panic_active() || mp_recent_debugger_activity() || ml_recent_wake());
}
/* Eagerly evaluate all pending timer and thread callouts
int ml_timer_get_user_idle_level(void);
kern_return_t ml_timer_set_user_idle_level(int);
+boolean_t ml_recent_wake(void);
#endif /* XNU_KERNEL_PRIVATE */
#endif /* _I386_MACHINE_ROUTINES_H_ */
#include <i386/misc_protos.h>
#include <i386/mp.h>
#include <i386/pmap.h>
-#if defined(__i386__)
+#if defined(__i386__) || defined(__x86_64__)
#include <i386/pmap_internal.h>
#endif /* i386 */
#if CONFIG_MCA
}
}
+static void
+cpu_gdt_alias(vm_map_offset_t gdt, vm_map_offset_t alias)
+{
+ pt_entry_t *pte = NULL;
+
+ /* Require page alignment */
+ assert(page_aligned(gdt));
+ assert(page_aligned(alias));
+
+ pte = pmap_pte(kernel_pmap, alias);
+ pmap_store_pte(pte, kvtophys(gdt) | INTEL_PTE_REF
+ | INTEL_PTE_MOD
+ | INTEL_PTE_WIRED
+ | INTEL_PTE_VALID
+ | INTEL_PTE_WRITE
+ | INTEL_PTE_NX);
+
+ /* TLB flush unneccessry because target processor isn't running yet */
+}
+
void
cpu_desc_init64(cpu_data_t *cdp)
master_ktss64.ist1 = (uintptr_t) low_eintstack
- sizeof(x86_64_intr_stack_frame_t);
- } else {
+ } else if (cdi->cdi_ktss == NULL) { /* Skipping re-init on wake */
cpu_desc_table64_t *cdt = (cpu_desc_table64_t *) cdp->cpu_desc_tablep;
+
/*
* Per-cpu GDT, IDT, KTSS descriptors are allocated in kernel
* heap (cpu_desc_table).
* LDT descriptors are mapped into a separate area.
+ * GDT descriptors are addressed by alias to avoid sgdt leaks to user-space.
*/
cdi->cdi_idt.ptr = (void *)MASTER_IDT_ALIAS;
- cdi->cdi_gdt.ptr = (struct fake_descriptor *)cdt->gdt;
+ cdi->cdi_gdt.ptr = (void *)CPU_GDT_ALIAS(cdp->cpu_number);
cdi->cdi_ktss = (void *)&cdt->ktss;
cdi->cdi_sstk = (vm_offset_t)&cdt->sstk.top;
cdi->cdi_ldt = cdp->cpu_ldtp;
+ /* Make the virtual alias address for the GDT */
+ cpu_gdt_alias((vm_map_offset_t) &cdt->gdt,
+ (vm_map_offset_t) cdi->cdi_gdt.ptr);
+
/*
* Copy the tables
*/
{
cpu_desc_index_t *cdi = &cdp->cpu_desc_index;
+ /* Stuff the kernel per-cpu data area address into the MSRs */
+ wrmsr64(MSR_IA32_GS_BASE, (uintptr_t) cdp);
+ wrmsr64(MSR_IA32_KERNEL_GS_BASE, (uintptr_t) cdp);
+
+ /*
+ * Ensure the TSS segment's busy bit is clear. This is required
+ * for the case of reloading descriptors at wake to avoid
+ * their complete re-initialization.
+ */
+ gdt_desc_p(KERNEL_TSS)->access &= ~ACC_TSS_BUSY;
+
/* Load the GDT, LDT, IDT and TSS */
cdi->cdi_gdt.size = sizeof(struct real_descriptor)*GDTSZ - 1;
cdi->cdi_idt.size = 0x1000 + cdp->cpu_number;
lldt(KERNEL_LDT);
set_tr(KERNEL_TSS);
- /* Stuff the kernel per-cpu data area address into the MSRs */
- wrmsr64(MSR_IA32_GS_BASE, (uintptr_t) cdp);
- wrmsr64(MSR_IA32_KERNEL_GS_BASE, (uintptr_t) cdp);
-
#if GPROF // Hack to enable mcount to work on K64
__asm__ volatile("mov %0, %%gs" : : "rm" ((unsigned short)(KERNEL_DS)));
#endif
/*
* For KASLR, we alias the master processor's IDT and GDT at fixed
* virtual addresses to defeat SIDT/SGDT address leakage.
+ * And non-boot processor's GDT aliases likewise (skipping LOWGLOBAL_ALIAS)
+ * The low global vector page is mapped at a fixed alias also.
*/
#define MASTER_IDT_ALIAS (VM_MIN_KERNEL_ADDRESS + 0x0000)
#define MASTER_GDT_ALIAS (VM_MIN_KERNEL_ADDRESS + 0x1000)
-
-/*
- * The low global vector page is mapped at a fixed alias also.
- */
#define LOWGLOBAL_ALIAS (VM_MIN_KERNEL_ADDRESS + 0x2000)
+#define CPU_GDT_ALIAS(_cpu) (LOWGLOBAL_ALIAS + (0x1000*(_cpu)))
#endif /*__x86_64__ */
#define TELEMETRY_MAX_BUFFER_SIZE (64*1024)
#define TELEMETRY_DEFAULT_NOTIFY_LEEWAY (4*1024) // Userland gets 4k of leeway to collect data after notification
+#define TELEMETRY_MAX_UUID_COUNT (128) // Max of 128 non-shared-cache UUIDs to log for symbolication
uint32_t telemetry_sample_rate = 0;
volatile boolean_t telemetry_needs_record = FALSE;
uuid_info_count = 0;
}
+ /*
+ * Don't copy in an unbounded amount of memory. The main binary and interesting
+ * non-shared-cache libraries should be in the first few images.
+ */
+ if (uuid_info_count > TELEMETRY_MAX_UUID_COUNT) {
+ uuid_info_count = TELEMETRY_MAX_UUID_COUNT;
+ }
+
uint32_t uuid_info_size = (uint32_t)(task_has_64BitAddr(thread->task) ? sizeof(struct user64_dyld_uuid_info) : sizeof(struct user32_dyld_uuid_info));
uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size;
char *uuid_info_array = NULL;
*/
telemetry_buffer_end_point = current_record_start;
telemetry_buffer_current_position = 0;
+ if (current_record_start == 0) {
+ /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
+ goto cancel_sample;
+ }
goto copytobuffer;
}
if ((telemetry_buffer_size - telemetry_buffer_current_position) < sizeof(struct task_snapshot)) {
telemetry_buffer_end_point = current_record_start;
telemetry_buffer_current_position = 0;
+ if (current_record_start == 0) {
+ /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
+ goto cancel_sample;
+ }
goto copytobuffer;
}
if ((telemetry_buffer_size - telemetry_buffer_current_position) < uuid_info_array_size) {
telemetry_buffer_end_point = current_record_start;
telemetry_buffer_current_position = 0;
+ if (current_record_start == 0) {
+ /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
+ goto cancel_sample;
+ }
goto copytobuffer;
}
/* wrap and overwrite */
telemetry_buffer_end_point = current_record_start;
telemetry_buffer_current_position = 0;
+ if (current_record_start == 0) {
+ /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
+ goto cancel_sample;
+ }
goto copytobuffer;
}
/* wrap and overwrite */
telemetry_buffer_end_point = current_record_start;
telemetry_buffer_current_position = 0;
+ if (current_record_start == 0) {
+ /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
+ goto cancel_sample;
+ }
goto copytobuffer;
}
if ((telemetry_buffer_size - telemetry_buffer_current_position)/framesize < btcount) {
telemetry_buffer_end_point = current_record_start;
telemetry_buffer_current_position = 0;
+ if (current_record_start == 0) {
+ /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
+ goto cancel_sample;
+ }
goto copytobuffer;
}
notify = TRUE;
}
+cancel_sample:
+
TELEMETRY_UNLOCK();
KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_RECORD) | DBG_FUNC_END, notify, telemetry_bytes_since_last_mark, telemetry_buffer_current_position, telemetry_buffer_end_point, 0);
*/
DPRINTF("%s: Removing mappings from 0->0x%lx\n", __FUNCTION__, vm_kernel_base);
- /* Remove all mappings past the descriptor aliases and low globals */
+ /*
+ * Remove all mappings past the boot-cpu descriptor aliases and low globals.
+ * Non-boot-cpu GDT aliases will be remapped later as needed.
+ */
pmap_remove(kernel_pmap, LOWGLOBAL_ALIAS + PAGE_SIZE, vm_kernel_base);
/*
#import <Foundation/Foundation.h>
-//#include "/Volumes/ws/10864999/Sundance10A273/Libc-849/BUILD_arm/Libc_Libc-849.roots/Libc_Libc-849~hdrDst/usr/local/include/libproc_internal.h"
#include <mach/message.h>
#include <libproc_internal.h>
-//#include "/Volumes/ws/10864999/Sundance10A273/Libc-849/BUILD_arm/Libc_Libc-849.roots/Libc_Libc-849~hdrDst/usr/local/include/libproc_internal.h"
-
#define MAX_THREADS 1000
char *pname;