#include <kern/thread.h>
#include <kern/zalloc.h>
#include <kern/kalloc.h>
+#include <kern/ledger.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
#include <mach_debug.h>
+#include <san/kasan.h>
/*
* We allocate stacks from generic kernel VM.
static unsigned int stack_free_count, stack_free_hiwat; /* free list count */
static unsigned int stack_hiwat;
unsigned int stack_total; /* current total count */
+unsigned long long stack_allocs; /* total count of allocations */
+
+static int stack_fake_zone_index = -1; /* index in zone_info array */
static unsigned int stack_free_target;
static int stack_free_delta;
static vm_offset_t stack_addr_mask;
-unsigned int kernel_stack_pages = KERNEL_STACK_SIZE / PAGE_SIZE;
-vm_offset_t kernel_stack_size = KERNEL_STACK_SIZE;
-vm_offset_t kernel_stack_mask = -KERNEL_STACK_SIZE;
-vm_offset_t kernel_stack_depth_max = 0;
+unsigned int kernel_stack_pages;
+vm_offset_t kernel_stack_size;
+vm_offset_t kernel_stack_mask;
+vm_offset_t kernel_stack_depth_max;
/*
* The next field is at the base of the stack,
return 1UL << (log2(size - 1) + 1);
}
+static vm_offset_t stack_alloc_internal(void);
+static void stack_free_stack(vm_offset_t);
+
void
stack_init(void)
{
simple_lock_init(&stack_lock_data, 0);
+ kernel_stack_pages = KERNEL_STACK_SIZE / PAGE_SIZE;
+ kernel_stack_size = KERNEL_STACK_SIZE;
+ kernel_stack_mask = -KERNEL_STACK_SIZE;
+ kernel_stack_depth_max = 0;
+
if (PE_parse_boot_argn("kernel_stack_pages",
&kernel_stack_pages,
sizeof (kernel_stack_pages))) {
* Allocate a stack for a thread, may
* block.
*/
-void
-stack_alloc(
- thread_t thread)
+
+static vm_offset_t
+stack_alloc_internal(void)
{
- vm_offset_t stack;
+ vm_offset_t stack = 0;
spl_t s;
- int guard_flags;
-
- assert(thread->kernel_stack == 0);
+ int flags = 0;
+ kern_return_t kr = KERN_SUCCESS;
s = splsched();
stack_lock();
+ stack_allocs++;
stack = stack_free_list;
if (stack != 0) {
stack_free_list = stack_next(stack);
* for these.
*/
- guard_flags = KMA_GUARD_FIRST | KMA_GUARD_LAST;
- if (kernel_memory_allocate(kernel_map, &stack,
+ flags = KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT;
+ kr = kernel_memory_allocate(kernel_map, &stack,
kernel_stack_size + (2*PAGE_SIZE),
stack_addr_mask,
- KMA_KOBJECT | guard_flags)
- != KERN_SUCCESS)
- panic("stack_alloc: kernel_memory_allocate");
+ flags,
+ VM_KERN_MEMORY_STACK);
+ if (kr != KERN_SUCCESS) {
+ panic("stack_alloc: kernel_memory_allocate(size:0x%llx, mask: 0x%llx, flags: 0x%x) failed with %d\n", (uint64_t)(kernel_stack_size + (2*PAGE_SIZE)), (uint64_t)stack_addr_mask, flags, kr);
+ }
/*
* The stack address that comes back is the address of the lower
stack += PAGE_SIZE;
}
+ return stack;
+}
+
+void
+stack_alloc(
+ thread_t thread)
+{
- machine_stack_attach(thread, stack);
+ assert(thread->kernel_stack == 0);
+ machine_stack_attach(thread, stack_alloc_internal());
+}
+
+void
+stack_handoff(thread_t from, thread_t to)
+{
+ assert(from == current_thread());
+ machine_stack_handoff(from, to);
}
/*
{
vm_offset_t stack = machine_stack_detach(thread);
+#if KASAN
+ kasan_unpoison_stack(stack, kernel_stack_size);
+ kasan_unpoison_fakestack(thread);
+#endif
+
assert(stack);
- if (stack != thread->reserved_stack)
+ if (stack != thread->reserved_stack) {
stack_free_stack(stack);
+ }
}
void
+stack_free_reserved(
+ thread_t thread)
+{
+ if (thread->reserved_stack != thread->kernel_stack) {
+#if KASAN
+ kasan_unpoison_stack(thread->reserved_stack, kernel_stack_size);
+#endif
+ stack_free_stack(thread->reserved_stack);
+ }
+}
+
+static void
stack_free_stack(
vm_offset_t stack)
{
* back in stack_alloc().
*/
- stack = (vm_offset_t)vm_map_trunc_page(stack);
+ stack = (vm_offset_t)vm_map_trunc_page(
+ stack,
+ VM_MAP_PAGE_MASK(kernel_map));
stack -= PAGE_SIZE;
if (vm_map_remove(
kernel_map,
}
void
-stack_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
- vm_size_t *alloc_size, int *collectable, int *exhaustable)
+stack_fake_zone_init(int zone_index)
+{
+ stack_fake_zone_index = zone_index;
+}
+
+void
+stack_fake_zone_info(int *count,
+ vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, vm_size_t *alloc_size,
+ uint64_t *sum_size, int *collectable, int *exhaustable, int *caller_acct)
{
unsigned int total, hiwat, free;
+ unsigned long long all;
spl_t s;
s = splsched();
stack_lock();
+ all = stack_allocs;
total = stack_total;
hiwat = stack_hiwat;
free = stack_free_count;
*max_size = kernel_stack_size * hiwat;
*elem_size = kernel_stack_size;
*alloc_size = kernel_stack_size;
+ *sum_size = all * kernel_stack_size;
+
*collectable = 1;
*exhaustable = 0;
+ *caller_acct = 1;
}
/* OBSOLETE */
vm_size_t maxusage;
vm_offset_t maxstack;
- register thread_t *thread_list;
- register thread_t thread;
+ thread_t *thread_list;
+ thread_t thread;
unsigned int actual; /* this many things */
unsigned int i;
/* OK, have memory and list is locked */
thread_list = (thread_t *) addr;
- for (i = 0, thread = (thread_t) queue_first(&threads);
+ for (i = 0, thread = (thread_t)(void *) queue_first(&threads);
!queue_end(&threads, (queue_entry_t) thread);
- thread = (thread_t) queue_next(&thread->threads)) {
+ thread = (thread_t)(void *) queue_next(&thread->threads)) {
thread_reference_internal(thread);
thread_list[i++] = thread;
}