int options = 0; // KMA_LOMEM;
kr = kernel_memory_allocate(kernel_map, &vmaddr,
- page_size, 0, options);
+ page_size, 0, options, VM_KERN_MEMORY_IOKIT);
if (KERN_SUCCESS != kr) vmaddr = 0;
else bzero((void *) vmaddr, page_size);
_alignment = alignment;
+ if ((capacity + alignment) < _capacity) return (false);
+
if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
return false;
{
IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
#if IOALLOCDEBUG
- debug_iomalloc_size += capacity;
+ OSAddAtomic(capacity, &debug_iomalloc_size);
#endif
}
}
if( options & kIOMemoryPageable) {
#if IOALLOCDEBUG
- debug_iomallocpageable_size += size;
+ OSAddAtomicLong(size, &debug_iomallocpageable_size);
#endif
mapTask = inTask;
if (NULL == inTask)
if (options & kIOMemoryPageable)
{
#if IOALLOCDEBUG
- debug_iomallocpageable_size -= round_page(size);
+ OSAddAtomicLong(-(round_page(size)), &debug_iomallocpageable_size);
#endif
}
else if (buffer)
kmem_free(kernel_map, page, page_size);
}
#if IOALLOCDEBUG
- debug_iomalloc_size -= size;
+ OSAddAtomic(-size, &debug_iomalloc_size);
#endif
IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
}
void IOBufferMemoryDescriptor::setLength(vm_size_t length)
{
assert(length <= _capacity);
+ if (length > _capacity) return;
_length = length;
_ranges.v64->length = length;
IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
{
IOVirtualAddress address;
+
+ if ((start + withLength) < start) return 0;
+
if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
address = (IOVirtualAddress) _buffer;
else