]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/stack.c
xnu-4570.20.62.tar.gz
[apple/xnu.git] / osfmk / kern / stack.c
index a5912256932b7222172b63e616ae5dfd9f24a259..18db3f24b1cb84c8b548accbe34f2b392cabdf24 100644 (file)
 #include <kern/thread.h>
 #include <kern/zalloc.h>
 #include <kern/kalloc.h>
+#include <kern/ledger.h>
 
 #include <vm/vm_map.h>
 #include <vm/vm_kern.h>
 
 #include <mach_debug.h>
+#include <san/kasan.h>
 
 /*
  *     We allocate stacks from generic kernel VM.
@@ -63,6 +65,9 @@ static vm_offset_t            stack_free_list;
 static unsigned int            stack_free_count, stack_free_hiwat;             /* free list count */
 static unsigned int            stack_hiwat;
 unsigned int                   stack_total;                            /* current total count */
+unsigned long long             stack_allocs;                           /* total count of allocations */
+
+static int                     stack_fake_zone_index = -1;     /* index in zone_info array */
 
 static unsigned int            stack_free_target;
 static int                             stack_free_delta;
@@ -71,10 +76,10 @@ static unsigned int         stack_new_count;                                                /* total new stack allocations */
 
 static vm_offset_t             stack_addr_mask;
 
-unsigned int                   kernel_stack_pages = KERNEL_STACK_SIZE / PAGE_SIZE;
-vm_offset_t                    kernel_stack_size = KERNEL_STACK_SIZE;
-vm_offset_t                    kernel_stack_mask = -KERNEL_STACK_SIZE;
-vm_offset_t                    kernel_stack_depth_max = 0;
+unsigned int                   kernel_stack_pages;
+vm_offset_t                    kernel_stack_size;
+vm_offset_t                    kernel_stack_mask;
+vm_offset_t                    kernel_stack_depth_max;
 
 /*
  *     The next field is at the base of the stack,
@@ -98,11 +103,19 @@ roundup_pow2(vm_offset_t size)
        return 1UL << (log2(size - 1) + 1); 
 }
 
+static vm_offset_t stack_alloc_internal(void);
+static void stack_free_stack(vm_offset_t);
+
 void
 stack_init(void)
 {
        simple_lock_init(&stack_lock_data, 0);
        
+       kernel_stack_pages = KERNEL_STACK_SIZE / PAGE_SIZE;
+       kernel_stack_size = KERNEL_STACK_SIZE;
+       kernel_stack_mask = -KERNEL_STACK_SIZE;
+       kernel_stack_depth_max = 0;
+
        if (PE_parse_boot_argn("kernel_stack_pages",
                               &kernel_stack_pages,
                               sizeof (kernel_stack_pages))) {
@@ -125,18 +138,18 @@ stack_init(void)
  *     Allocate a stack for a thread, may
  *     block.
  */
-void
-stack_alloc(
-       thread_t        thread)
+
+static vm_offset_t 
+stack_alloc_internal(void)
 {
-       vm_offset_t             stack;
+       vm_offset_t             stack = 0;
        spl_t                   s;
-       int                     guard_flags;
-
-       assert(thread->kernel_stack == 0);
+       int                     flags = 0;
+       kern_return_t           kr = KERN_SUCCESS;
 
        s = splsched();
        stack_lock();
+       stack_allocs++;
        stack = stack_free_list;
        if (stack != 0) {
                stack_free_list = stack_next(stack);
@@ -159,13 +172,15 @@ stack_alloc(
                 * for these.
                 */
 
-               guard_flags = KMA_GUARD_FIRST | KMA_GUARD_LAST;
-               if (kernel_memory_allocate(kernel_map, &stack,
+               flags = KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT;
+               kr = kernel_memory_allocate(kernel_map, &stack,
                                           kernel_stack_size + (2*PAGE_SIZE),
                                           stack_addr_mask,
-                                          KMA_KOBJECT | guard_flags)
-                   != KERN_SUCCESS)
-                       panic("stack_alloc: kernel_memory_allocate");
+                                          flags,
+                                          VM_KERN_MEMORY_STACK);
+               if (kr != KERN_SUCCESS) {
+                       panic("stack_alloc: kernel_memory_allocate(size:0x%llx, mask: 0x%llx, flags: 0x%x) failed with %d\n", (uint64_t)(kernel_stack_size + (2*PAGE_SIZE)), (uint64_t)stack_addr_mask, flags, kr);
+               }
 
                /*
                 * The stack address that comes back is the address of the lower
@@ -174,8 +189,23 @@ stack_alloc(
 
                stack += PAGE_SIZE;
        }
+       return stack;
+}
+
+void
+stack_alloc(
+       thread_t        thread)
+{
 
-       machine_stack_attach(thread, stack);
+       assert(thread->kernel_stack == 0);
+       machine_stack_attach(thread, stack_alloc_internal());
+}
+
+void
+stack_handoff(thread_t from, thread_t to)
+{
+       assert(from == current_thread());
+       machine_stack_handoff(from, to);
 }
 
 /*
@@ -189,12 +219,30 @@ stack_free(
 {
     vm_offset_t                stack = machine_stack_detach(thread);
 
+#if KASAN
+       kasan_unpoison_stack(stack, kernel_stack_size);
+       kasan_unpoison_fakestack(thread);
+#endif
+
        assert(stack);
-       if (stack != thread->reserved_stack)
+       if (stack != thread->reserved_stack) {
                stack_free_stack(stack);
+       }
 }
 
 void
+stack_free_reserved(
+       thread_t        thread)
+{
+       if (thread->reserved_stack != thread->kernel_stack) {
+#if KASAN
+               kasan_unpoison_stack(thread->reserved_stack, kernel_stack_size);
+#endif
+               stack_free_stack(thread->reserved_stack);
+       }
+}
+
+static void
 stack_free_stack(
        vm_offset_t             stack)
 {
@@ -301,7 +349,9 @@ stack_collect(void)
                         * back in stack_alloc().
                         */
 
-                       stack = (vm_offset_t)vm_map_trunc_page(stack);
+                       stack = (vm_offset_t)vm_map_trunc_page(
+                               stack,
+                               VM_MAP_PAGE_MASK(kernel_map));
                        stack -= PAGE_SIZE;
                        if (vm_map_remove(
                                    kernel_map,
@@ -360,14 +410,23 @@ __unused void             *arg)
 }
 
 void
-stack_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
-                    vm_size_t *alloc_size, int *collectable, int *exhaustable)
+stack_fake_zone_init(int zone_index)
+{
+       stack_fake_zone_index = zone_index;
+}
+
+void
+stack_fake_zone_info(int *count, 
+                    vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, vm_size_t *alloc_size,
+                    uint64_t *sum_size, int *collectable, int *exhaustable, int *caller_acct)
 {
        unsigned int    total, hiwat, free;
+       unsigned long long all;
        spl_t                   s;
 
        s = splsched();
        stack_lock();
+       all = stack_allocs;
        total = stack_total;
        hiwat = stack_hiwat;
        free = stack_free_count;
@@ -379,8 +438,11 @@ stack_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_si
        *max_size   = kernel_stack_size * hiwat;
        *elem_size  = kernel_stack_size;
        *alloc_size = kernel_stack_size;
+       *sum_size = all * kernel_stack_size;
+
        *collectable = 1;
        *exhaustable = 0;
+       *caller_acct = 1;
 }
 
 /* OBSOLETE */
@@ -413,8 +475,8 @@ processor_set_stack_usage(
        vm_size_t maxusage;
        vm_offset_t maxstack;
 
-       register thread_t *thread_list;
-       register thread_t thread;
+       thread_t *thread_list;
+       thread_t thread;
 
        unsigned int actual;    /* this many things */
        unsigned int i;
@@ -454,9 +516,9 @@ processor_set_stack_usage(
 
        /* OK, have memory and list is locked */
        thread_list = (thread_t *) addr;
-       for (i = 0, thread = (thread_t) queue_first(&threads);
+       for (i = 0, thread = (thread_t)(void *) queue_first(&threads);
                                        !queue_end(&threads, (queue_entry_t) thread);
-                                       thread = (thread_t) queue_next(&thread->threads)) {
+                                       thread = (thread_t)(void *) queue_next(&thread->threads)) {
                thread_reference_internal(thread);
                thread_list[i++] = thread;
        }