/*
- * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#define PAGE_SHIFT I386_PGSHIFT
#define PAGE_MASK (PAGE_SIZE - 1)
+#define PAGE_MAX_SHIFT PAGE_SHIFT
+#define PAGE_MAX_SIZE PAGE_SIZE
+#define PAGE_MAX_MASK PAGE_MASK
+
+#define PAGE_MIN_SHIFT PAGE_SHIFT
+#define PAGE_MIN_SIZE PAGE_SIZE
+#define PAGE_MIN_MASK PAGE_MASK
+
#define I386_LPGBYTES 2*1024*1024 /* bytes per large page */
#define I386_LPGSHIFT 21 /* bitshift for large pages */
#define I386_LPGMASK (I386_LPGBYTES-1)
#define i386_btop(x) ((ppnum_t)((x) >> I386_PGSHIFT))
#define machine_btop(x) i386_btop(x)
#define i386_ptob(x) (((pmap_paddr_t)(x)) << I386_PGSHIFT)
+#define machine_ptob(x) i386_ptob(x)
/*
* Round off or truncate to the nearest page. These will work
/*
* default top of user stack... it grows down from here
*/
-#define VM_USRSTACK64 ((user_addr_t) 0x00007FFF5FC00000ULL)
+#define VM_USRSTACK64 ((user_addr_t) 0x00007FFEEFC00000ULL)
+
+/*
+ * XXX TODO: Obsolete?
+ */
#define VM_DYLD64 ((user_addr_t) 0x00007FFF5FC00000ULL)
#define VM_LIB64_SHR_DATA ((user_addr_t) 0x00007FFF60000000ULL)
#define VM_LIB64_SHR_TEXT ((user_addr_t) 0x00007FFF80000000ULL)
/* process-relative values (all 32-bit legacy only for now) */
#define VM_MIN_ADDRESS ((vm_offset_t) 0)
-#define VM_USRSTACK32 ((vm_offset_t) 0xC0000000)
+#define VM_USRSTACK32 ((vm_offset_t) 0xC0000000) /* ASLR slides stack down by up to 1 MB */
#define VM_MAX_ADDRESS ((vm_offset_t) 0xFFE00000)
#ifdef KERNEL_PRIVATE
+#define TEST_PAGE_SIZE_16K FALSE
+#define TEST_PAGE_SIZE_4K TRUE
+
/* Kernel-wide values */
#define KB (1024ULL)
* Maximum physical memory supported.
*/
#define K32_MAXMEM (32*GB)
-#define K64_MAXMEM (96*GB)
-#if defined(__i386__)
-#define KERNEL_MAXMEM K32_MAXMEM
-#else
+#define K64_MAXMEM (252*GB)
#define KERNEL_MAXMEM K64_MAXMEM
-#endif
/*
* XXX
* We can't let VM allocate memory from there.
*/
-#if defined(__i386__)
-
-#define KERNEL_IMAGE_TO_PHYS(x) (x)
-#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t) 0x00001000U)
-#define VM_MIN_KERNEL_AND_KEXT_ADDRESS VM_MIN_KERNEL_ADDRESS
-#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t) 0xFE7FFFFFU)
-
-#elif defined(__x86_64__)
#define KERNEL_IMAGE_TO_PHYS(x) (x)
#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t) 0xFFFFFF8000000000UL)
#define KEXT_ALLOC_BASE(x) ((x) - KEXT_ALLOC_MAX_OFFSET)
#define KEXT_ALLOC_SIZE(x) (KEXT_ALLOC_MAX_OFFSET - (x))
-#else
-#error unsupported architecture
-#endif
-
-#define KERNEL_STACK_SIZE (I386_PGBYTES*4)
+#define VM_KERNEL_ADDRESS(va) ((((vm_address_t)(va))>=VM_MIN_KERNEL_AND_KEXT_ADDRESS) && \
+ (((vm_address_t)(va))<=VM_MAX_KERNEL_ADDRESS))
#define VM_MAP_MIN_ADDRESS MACH_VM_MIN_ADDRESS
#define VM_MAP_MAX_ADDRESS MACH_VM_MAX_ADDRESS
/* FIXME - always leave like this? */
-#define INTSTACK_SIZE (I386_PGBYTES*4)
+#if KASAN
+/* Increase the stack sizes to account for the redzones that get added to every
+ * stack object. */
+# define INTSTACK_SIZE (I386_PGBYTES*4*4)
+# define KERNEL_STACK_SIZE (I386_PGBYTES*4*4)
+#else
+# define INTSTACK_SIZE (I386_PGBYTES*4)
+# define KERNEL_STACK_SIZE (I386_PGBYTES*4)
+#endif
#ifdef MACH_KERNEL_PRIVATE
#define VM32_MIN_ADDRESS ((vm32_offset_t) 0)
#define VM32_MAX_ADDRESS ((vm32_offset_t) (VM_MAX_PAGE_ADDRESS & 0xFFFFFFFF))
-#if defined(__i386__)
-
-#define LINEAR_KERNEL_ADDRESS ((vm_offset_t) 0x00000000)
-
-#define VM_MIN_KERNEL_LOADED_ADDRESS ((vm_offset_t) 0x00000000U)
-#define VM_MAX_KERNEL_LOADED_ADDRESS ((vm_offset_t) 0x1FFFFFFFU)
+/*
+ * kalloc() parameters:
+ *
+ * Historically kalloc's underlying zones were power-of-2 sizes, with a
+ * KALLOC_MINSIZE of 16 bytes. The allocator ensured that
+ * (sizeof == alignof) >= 16 for all kalloc allocations.
+ *
+ * Today kalloc may use zones with intermediate sizes, constrained by
+ * KALLOC_MINSIZE and a minimum alignment, expressed by KALLOC_LOG2_MINALIGN.
+ *
+ * The common alignment for LP64 is for longs and pointers i.e. 8 bytes.
+ */
-#define NCOPY_WINDOWS 4
-#elif defined(__x86_64__)
+#define KALLOC_MINSIZE 16 /* minimum allocation size */
+#define KALLOC_LOG2_MINALIGN 4 /* log2 minimum alignment */
#define LINEAR_KERNEL_ADDRESS ((vm_offset_t) 0x00000000)
#define NCOPY_WINDOWS 0
-#else
-#error unsupported architecture
-#endif
+
/*
* Conversion between 80386 pages and VM pages
#define round_i386_to_vm(p) (atop(round_page(i386_ptob(p))))
#define vm_to_i386(p) (i386_btop(ptoa(p)))
-#define PMAP_ENTER(pmap, virtual_address, page, protection, flags, wired) \
- MACRO_BEGIN \
- pmap_t __pmap = (pmap); \
- vm_page_t __page = (page); \
- vm_prot_t __prot__ = (protection); \
- \
- if (__pmap == kernel_pmap) { \
- __prot__ |= VM_PROT_WRITE; \
- } else { \
- assert(!__page->encrypted); \
- } \
- \
- pmap_enter( \
- __pmap, \
- (virtual_address), \
- __page->phys_page, \
- __prot__, \
- flags, \
- (wired) \
- ); \
- MACRO_END
-#define PMAP_ENTER_OPTIONS(pmap, virtual_address, page, protection, \
- flags, wired, options, result) \
+#define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \
+ MACRO_BEGIN \
+ pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), (cache_attr)); \
+ (object)->set_cache_attr = TRUE; \
+ (void) batch_pmap_op; \
+ MACRO_END
+
+#define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, cache_attr, num_pages, batch_pmap_op)\
MACRO_BEGIN \
- result=KERN_SUCCESS; \
- PMAP_ENTER(pmap, virtual_address, page, protection, \
- flags, wired); \
+ (void) user_page_list; \
+ (void) num_pages; \
+ (void) batch_pmap_op; \
MACRO_END
#define IS_USERADDR64_CANONICAL(addr) \