- if (kernel_memory_allocate(stack_map, &stack, KERNEL_STACK_SIZE, stack_addr_mask, KMA_KOBJECT) != KERN_SUCCESS)
- panic("stack_alloc: kernel_memory_allocate");
+
+ /*
+ * Request guard pages on either side of the stack. Ask
+ * kernel_memory_allocate() for two extra pages to account
+ * for these.
+ */
+
+ flags = KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT;
+ kr = kernel_memory_allocate(kernel_map, &stack,
+ kernel_stack_size + (2*PAGE_SIZE),
+ stack_addr_mask,
+ flags,
+ VM_KERN_MEMORY_STACK);
+ if (kr != KERN_SUCCESS) {
+ panic("stack_alloc: kernel_memory_allocate(size:0x%llx, mask: 0x%llx, flags: 0x%x) failed with %d\n", (uint64_t)(kernel_stack_size + (2*PAGE_SIZE)), (uint64_t)stack_addr_mask, flags, kr);
+ }
+
+ /*
+ * The stack address that comes back is the address of the lower
+ * guard page. Skip past it to get the actual stack base address.
+ */
+
+ stack += PAGE_SIZE;