#include <IOKit/system.h>
#include <mach/sync_policy.h>
#include <machine/machine_routines.h>
+#include <vm/vm_kern.h>
#include <libkern/c++/OSCPPDebug.h>
#include <IOKit/assert.h>
#include <sys/sysctl.h>
#endif
+#include "libkern/OSAtomic.h"
+#include <libkern/c++/OSKext.h>
+#include <IOKit/IOStatisticsPrivate.h>
+#include <sys/msgbuf.h>
+
+#if IOKITSTATS
+
+#define IOStatisticsAlloc(type, size) \
+do { \
+ IOStatistics::countAlloc(type, size); \
+} while (0)
+
+#else
+
+#define IOStatisticsAlloc(type, size)
+
+#endif /* IOKITSTATS */
+
extern "C"
{
extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
-extern kern_return_t kmem_suballoc(
- vm_map_t parent,
- vm_offset_t *addr,
- vm_size_t size,
- boolean_t pageable,
- boolean_t anywhere,
- vm_map_t *new_map);
+extern int
+__doprnt(
+ const char *fmt,
+ va_list argp,
+ void (*putc)(int, void *),
+ void *arg,
+ int radix);
+
+extern void cons_putc_locked(char);
+extern void bsd_log_lock(void);
+extern void bsd_log_unlock(void);
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
enum { kIOPageableMapSize = 96 * 1024 * 1024 };
enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
-/* LP64todo - these need to expand */
typedef struct {
- vm_map_t map;
+ vm_map_t map;
vm_offset_t address;
vm_offset_t end;
} IOMapData;
void * address;
address = (void *)kalloc(size);
+ if ( address ) {
#if IOALLOCDEBUG
- if (address) {
debug_iomalloc_size += size;
- }
#endif
+ IOStatisticsAlloc(kIOStatisticsMalloc, size);
+ }
+
return address;
}
#if IOALLOCDEBUG
debug_iomalloc_size -= size;
#endif
+ IOStatisticsAlloc(kIOStatisticsFree, size);
}
}
void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
{
kern_return_t kr;
- vm_address_t address;
- vm_address_t allocationAddress;
+ vm_offset_t address;
+ vm_offset_t allocationAddress;
vm_size_t adjustedSize;
- vm_offset_t alignMask;
+ uintptr_t alignMask;
if (size == 0)
return 0;
alignMask = alignment - 1;
adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
- if (adjustedSize >= page_size) {
+ if (size > adjustedSize) {
+ address = 0; /* overflow detected */
+ }
+ else if (adjustedSize >= page_size) {
kr = kernel_memory_allocate(kernel_map, &address,
size, alignMask, 0);
+ (sizeof(vm_size_t) + sizeof(vm_address_t)))
& (~alignMask);
- *((vm_size_t *)(address - sizeof(vm_size_t)
- - sizeof(vm_address_t))) = adjustedSize;
+ *((vm_size_t *)(address - sizeof(vm_size_t) - sizeof(vm_address_t)))
+ = adjustedSize;
*((vm_address_t *)(address - sizeof(vm_address_t)))
= allocationAddress;
} else
assert(0 == (address & alignMask));
-#if IOALLOCDEBUG
if( address) {
+#if IOALLOCDEBUG
debug_iomalloc_size += size;
- }
#endif
+ IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
+ }
return (void *) address;
}
void IOFreeAligned(void * address, vm_size_t size)
{
vm_address_t allocationAddress;
- vm_size_t adjustedSize;
+ vm_size_t adjustedSize;
if( !address)
return;
adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
if (adjustedSize >= page_size) {
- kmem_free( kernel_map, (vm_address_t) address, size);
+ kmem_free( kernel_map, (vm_offset_t) address, size);
} else {
- adjustedSize = *((vm_size_t *)( (vm_address_t) address
+ adjustedSize = *((vm_size_t *)( (vm_address_t) address
- sizeof(vm_address_t) - sizeof(vm_size_t)));
allocationAddress = *((vm_address_t *)( (vm_address_t) address
- sizeof(vm_address_t) ));
#if IOALLOCDEBUG
debug_iomalloc_size -= size;
#endif
+
+ IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
void
-IOKernelFreeContiguous(mach_vm_address_t address, mach_vm_size_t size)
+IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size)
{
mach_vm_address_t allocationAddress;
mach_vm_size_t adjustedSize;
adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
if (adjustedSize >= page_size) {
- kmem_free( kernel_map, (vm_address_t) address, size);
+ kmem_free( kernel_map, (vm_offset_t) address, size);
} else {
kfree((void *)allocationAddress, adjustedSize);
}
+ IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
#if IOALLOCDEBUG
debug_iomalloc_size -= size;
#endif
}
mach_vm_address_t
-IOKernelAllocateContiguous(mach_vm_size_t size, mach_vm_size_t alignment)
+IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys,
+ mach_vm_size_t alignment, bool contiguous)
{
kern_return_t kr;
mach_vm_address_t address;
alignMask = alignment - 1;
adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
- if (adjustedSize >= page_size)
+ contiguous = (contiguous && (adjustedSize > page_size))
+ || (alignment > page_size);
+
+ if (contiguous || maxPhys)
{
+ int options = 0;
vm_offset_t virt;
+
adjustedSize = size;
- if (adjustedSize > page_size)
+ contiguous = (contiguous && (adjustedSize > page_size))
+ || (alignment > page_size);
+
+ if (!contiguous)
+ {
+ if (maxPhys <= 0xFFFFFFFF)
+ {
+ maxPhys = 0;
+ options |= KMA_LOMEM;
+ }
+ else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage))
+ {
+ maxPhys = 0;
+ }
+ }
+ if (contiguous || maxPhys)
{
kr = kmem_alloc_contig(kernel_map, &virt, size,
- alignMask, 0, 0);
+ alignMask, atop(maxPhys), atop(alignMask), 0);
}
else
{
kr = kernel_memory_allocate(kernel_map, &virt,
- size, alignMask, 0);
+ size, alignMask, options);
}
if (KERN_SUCCESS == kr)
address = virt;
& (~alignMask);
if (atop_32(address) != atop_32(address + size - 1))
- address = round_page_32(address);
+ address = round_page(address);
*((mach_vm_size_t *)(address - sizeof(mach_vm_size_t)
- sizeof(mach_vm_address_t))) = adjustedSize;
address = 0;
}
-#if IOALLOCDEBUG
if (address) {
- debug_iomalloc_size += size;
- }
+ IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
+#if IOALLOCDEBUG
+ debug_iomalloc_size += size;
#endif
+ }
return (address);
}
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
struct _IOMallocContiguousEntry
/* Do we want a physical address? */
if (!physicalAddress)
{
- address = IOKernelAllocateContiguous(size, alignment);
+ address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true);
}
else do
{
IOBufferMemoryDescriptor * bmd;
mach_vm_address_t physicalMask;
- vm_offset_t alignMask;
+ vm_offset_t alignMask;
alignMask = alignment - 1;
- physicalMask = 0xFFFFFFFF ^ (alignMask & PAGE_MASK);
+ physicalMask = (0xFFFFFFFF ^ alignMask);
+
bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
if (!bmd)
}
else
{
- IOKernelFreeContiguous((mach_vm_address_t) address, size);
+ IOKernelFreePhysical((mach_vm_address_t) address, size);
}
}
struct IOMallocPageableRef
{
- vm_address_t address;
+ vm_offset_t address;
vm_size_t size;
};
if( kIOReturnSuccess != kr)
ref.address = 0;
+ if( ref.address) {
#if IOALLOCDEBUG
- if( ref.address)
- debug_iomallocpageable_size += round_page_32(size);
+ debug_iomallocpageable_size += round_page(size);
#endif
+ IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
+ }
return( (void *) ref.address );
}
-vm_map_t IOPageableMapForAddress( vm_address_t address )
+vm_map_t IOPageableMapForAddress( uintptr_t address )
{
vm_map_t map = 0;
UInt32 index;
}
}
if( !map)
- IOPanic("IOPageableMapForAddress: null");
+ panic("IOPageableMapForAddress: null");
return( map );
}
kmem_free( map, (vm_offset_t) address, size);
#if IOALLOCDEBUG
- debug_iomallocpageable_size -= round_page_32(size);
+ debug_iomallocpageable_size -= round_page(size);
#endif
-}
+ IOStatisticsAlloc(kIOStatisticsFreePageable, size);
+}
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
if( task != kernel_task)
return( kIOReturnUnsupported );
-
- length = round_page_32(address + length) - trunc_page_32( address );
- address = trunc_page_32( address );
+ if ((address | length) & PAGE_MASK)
+ {
+// OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
+ return( kIOReturnUnsupported );
+ }
+ length = round_page(address + length) - trunc_page( address );
+ address = trunc_page( address );
// make map mode
cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-SInt32 OSKernelStackRemaining( void )
+vm_offset_t OSKernelStackRemaining( void )
{
- SInt32 stack;
-
- stack = (((SInt32) &stack) & (KERNEL_STACK_SIZE - 1));
-
- return( stack );
+ return (ml_stack_remaining());
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+static void _iolog_consputc(int ch, void *arg __unused)
+{
+ cons_putc_locked(ch);
+}
+
+static void _iolog_logputc(int ch, void *arg __unused)
+{
+ log_putc_locked(ch);
+}
+
void IOLog(const char *format, ...)
{
- va_list ap;
- extern void conslog_putc(char);
- extern void logwakeup(void);
+ va_list ap;
- va_start(ap, format);
- _doprnt(format, &ap, conslog_putc, 16);
- va_end(ap);
+ va_start(ap, format);
+ IOLogv(format, ap);
+ va_end(ap);
}
+void IOLogv(const char *format, va_list ap)
+{
+ va_list ap2;
+
+ va_copy(ap2, ap);
+
+ bsd_log_lock();
+ __doprnt(format, ap, _iolog_logputc, NULL, 16);
+ bsd_log_unlock();
+
+ __doprnt(format, ap2, _iolog_consputc, NULL, 16);
+}
+
+#if !__LP64__
void IOPanic(const char *reason)
{
panic("%s", reason);
}
+#endif
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */