]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/stack.c
xnu-2782.20.48.tar.gz
[apple/xnu.git] / osfmk / kern / stack.c
index 68cdcabc0fd54b111f3e4e5defec347eb069a776..ef9f6b8dad81842118e1d64b5f98810a800f5f7f 100644 (file)
@@ -39,6 +39,7 @@
 #include <kern/thread.h>
 #include <kern/zalloc.h>
 #include <kern/kalloc.h>
+#include <kern/ledger.h>
 
 #include <vm/vm_map.h>
 #include <vm/vm_kern.h>
@@ -61,7 +62,11 @@ decl_simple_lock_data(static,stack_lock_data)
 static vm_offset_t             stack_free_list;
 
 static unsigned int            stack_free_count, stack_free_hiwat;             /* free list count */
-static unsigned int            stack_total, stack_hiwat;                               /* current total count */
+static unsigned int            stack_hiwat;
+unsigned int                   stack_total;                            /* current total count */
+unsigned long long             stack_allocs;                           /* total count of allocations */
+
+static int                     stack_fake_zone_index = -1;     /* index in zone_info array */
 
 static unsigned int            stack_free_target;
 static int                             stack_free_delta;
@@ -70,22 +75,108 @@ static unsigned int                stack_new_count;                                                /* total new stack allocations */
 
 static vm_offset_t             stack_addr_mask;
 
+unsigned int                   kernel_stack_pages;
+vm_offset_t                    kernel_stack_size;
+vm_offset_t                    kernel_stack_mask;
+vm_offset_t                    kernel_stack_depth_max;
+
+static inline void
+STACK_ZINFO_PALLOC(thread_t thread)
+{
+       task_t task;
+       zinfo_usage_t zinfo;
+
+       ledger_credit(thread->t_ledger, task_ledgers.tkm_private, kernel_stack_size);
+
+       if (stack_fake_zone_index != -1 &&
+           (task = thread->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
+               OSAddAtomic64(kernel_stack_size,
+                             (int64_t *)&zinfo[stack_fake_zone_index].alloc);
+}
+
+static inline void
+STACK_ZINFO_PFREE(thread_t thread)
+{
+       task_t task;
+       zinfo_usage_t zinfo;
+
+       ledger_debit(thread->t_ledger, task_ledgers.tkm_private, kernel_stack_size);
+
+       if (stack_fake_zone_index != -1 &&
+           (task = thread->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
+               OSAddAtomic64(kernel_stack_size, 
+                             (int64_t *)&zinfo[stack_fake_zone_index].free);
+}
+
+static inline void
+STACK_ZINFO_HANDOFF(thread_t from, thread_t to)
+{
+       ledger_debit(from->t_ledger, task_ledgers.tkm_private, kernel_stack_size);
+       ledger_credit(to->t_ledger, task_ledgers.tkm_private, kernel_stack_size);
+
+       if (stack_fake_zone_index != -1) {
+               task_t task;
+               zinfo_usage_t zinfo;
+       
+               if ((task = from->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
+                       OSAddAtomic64(kernel_stack_size, 
+                                     (int64_t *)&zinfo[stack_fake_zone_index].free);
+
+               if ((task = to->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
+                       OSAddAtomic64(kernel_stack_size, 
+                                     (int64_t *)&zinfo[stack_fake_zone_index].alloc);
+       }
+}
+
 /*
  *     The next field is at the base of the stack,
  *     so the low end is left unsullied.
  */
 #define stack_next(stack)      \
-                       (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1))
+       (*((vm_offset_t *)((stack) + kernel_stack_size) - 1))
+
+static inline int
+log2(vm_offset_t size)
+{
+       int     result;
+       for (result = 0; size > 0; result++)
+               size >>= 1;
+       return result;
+}
+
+static inline vm_offset_t
+roundup_pow2(vm_offset_t size)
+{
+       return 1UL << (log2(size - 1) + 1); 
+}
+
+static vm_offset_t stack_alloc_internal(void);
+static void stack_free_stack(vm_offset_t);
 
 void
 stack_init(void)
 {
        simple_lock_init(&stack_lock_data, 0);
        
-       if (KERNEL_STACK_SIZE < round_page(KERNEL_STACK_SIZE))
-               panic("stack_init: stack size %d not a multiple of page size %d\n",     KERNEL_STACK_SIZE, PAGE_SIZE);
+       kernel_stack_pages = KERNEL_STACK_SIZE / PAGE_SIZE;
+       kernel_stack_size = KERNEL_STACK_SIZE;
+       kernel_stack_mask = -KERNEL_STACK_SIZE;
+       kernel_stack_depth_max = 0;
+
+       if (PE_parse_boot_argn("kernel_stack_pages",
+                              &kernel_stack_pages,
+                              sizeof (kernel_stack_pages))) {
+               kernel_stack_size = kernel_stack_pages * PAGE_SIZE;
+               printf("stack_init: kernel_stack_pages=%d kernel_stack_size=%p\n",
+                       kernel_stack_pages, (void *) kernel_stack_size);
+       }
+
+       if (kernel_stack_size < round_page(kernel_stack_size))
+               panic("stack_init: stack size %p not a multiple of page size %d\n",
+                       (void *) kernel_stack_size, PAGE_SIZE);
        
-       stack_addr_mask = KERNEL_STACK_SIZE - 1;
+       stack_addr_mask = roundup_pow2(kernel_stack_size) - 1;
+       kernel_stack_mask = ~stack_addr_mask;
 }
 
 /*
@@ -94,18 +185,17 @@ stack_init(void)
  *     Allocate a stack for a thread, may
  *     block.
  */
-void
-stack_alloc(
-       thread_t        thread)
+
+static vm_offset_t 
+stack_alloc_internal(void)
 {
        vm_offset_t             stack;
        spl_t                   s;
        int                     guard_flags;
 
-       assert(thread->kernel_stack == 0);
-
        s = splsched();
        stack_lock();
+       stack_allocs++;
        stack = stack_free_list;
        if (stack != 0) {
                stack_free_list = stack_next(stack);
@@ -130,9 +220,9 @@ stack_alloc(
 
                guard_flags = KMA_GUARD_FIRST | KMA_GUARD_LAST;
                if (kernel_memory_allocate(kernel_map, &stack,
-                                          KERNEL_STACK_SIZE + (2*PAGE_SIZE),
+                                          kernel_stack_size + (2*PAGE_SIZE),
                                           stack_addr_mask,
-                                          KMA_KOBJECT | guard_flags)
+                                          KMA_KSTACK | KMA_KOBJECT | guard_flags)
                    != KERN_SUCCESS)
                        panic("stack_alloc: kernel_memory_allocate");
 
@@ -143,8 +233,25 @@ stack_alloc(
 
                stack += PAGE_SIZE;
        }
+       return stack;
+}
+
+void
+stack_alloc(
+       thread_t        thread)
+{
+
+       assert(thread->kernel_stack == 0);
+       machine_stack_attach(thread, stack_alloc_internal());
+       STACK_ZINFO_PALLOC(thread);
+}
 
-       machine_stack_attach(thread, stack);
+void
+stack_handoff(thread_t from, thread_t to)
+{
+       assert(from == current_thread());
+       machine_stack_handoff(from, to);
+       STACK_ZINFO_HANDOFF(from, to);
 }
 
 /*
@@ -159,11 +266,23 @@ stack_free(
     vm_offset_t                stack = machine_stack_detach(thread);
 
        assert(stack);
-       if (stack != thread->reserved_stack)
+       if (stack != thread->reserved_stack) {
+               STACK_ZINFO_PFREE(thread);
                stack_free_stack(stack);
+       }
 }
 
 void
+stack_free_reserved(
+       thread_t        thread)
+{
+       if (thread->reserved_stack != thread->kernel_stack) {
+               stack_free_stack(thread->reserved_stack);
+               STACK_ZINFO_PFREE(thread);
+       }
+}
+
+static void
 stack_free_stack(
        vm_offset_t             stack)
 {
@@ -209,6 +328,7 @@ stack_alloc_try(
        cache = &PROCESSOR_DATA(current_processor(), stack_cache);
        stack = cache->free;
        if (stack != 0) {
+               STACK_ZINFO_PALLOC(thread);
                cache->free = stack_next(stack);
                cache->count--;
        }
@@ -217,6 +337,7 @@ stack_alloc_try(
                        stack_lock();
                        stack = stack_free_list;
                        if (stack != 0) {
+                               STACK_ZINFO_PALLOC(thread);
                                stack_free_list = stack_next(stack);
                                stack_free_count--;
                                stack_free_delta--;
@@ -270,12 +391,14 @@ stack_collect(void)
                         * back in stack_alloc().
                         */
 
-                       stack = vm_map_trunc_page(stack);
+                       stack = (vm_offset_t)vm_map_trunc_page(
+                               stack,
+                               VM_MAP_PAGE_MASK(kernel_map));
                        stack -= PAGE_SIZE;
                        if (vm_map_remove(
                                    kernel_map,
                                    stack,
-                                   stack + KERNEL_STACK_SIZE+(2*PAGE_SIZE),
+                                   stack + kernel_stack_size+(2*PAGE_SIZE),
                                    VM_MAP_REMOVE_KUNWIRE)
                            != KERN_SUCCESS)
                                panic("stack_collect: vm_map_remove");
@@ -329,14 +452,23 @@ __unused void             *arg)
 }
 
 void
-stack_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
-                    vm_size_t *alloc_size, int *collectable, int *exhaustable)
+stack_fake_zone_init(int zone_index)
+{
+       stack_fake_zone_index = zone_index;
+}
+
+void
+stack_fake_zone_info(int *count, 
+                    vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, vm_size_t *alloc_size,
+                    uint64_t *sum_size, int *collectable, int *exhaustable, int *caller_acct)
 {
        unsigned int    total, hiwat, free;
+       unsigned long long all;
        spl_t                   s;
 
        s = splsched();
        stack_lock();
+       all = stack_allocs;
        total = stack_total;
        hiwat = stack_hiwat;
        free = stack_free_count;
@@ -344,12 +476,15 @@ stack_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_si
        splx(s);
 
        *count      = total - free;
-       *cur_size   = KERNEL_STACK_SIZE * total;
-       *max_size   = KERNEL_STACK_SIZE * hiwat;
-       *elem_size  = KERNEL_STACK_SIZE;
-       *alloc_size = KERNEL_STACK_SIZE;
+       *cur_size   = kernel_stack_size * total;
+       *max_size   = kernel_stack_size * hiwat;
+       *elem_size  = kernel_stack_size;
+       *alloc_size = kernel_stack_size;
+       *sum_size = all * kernel_stack_size;
+
        *collectable = 1;
        *exhaustable = 0;
+       *caller_acct = 1;
 }
 
 /* OBSOLETE */
@@ -398,7 +533,7 @@ processor_set_stack_usage(
        addr = NULL;
 
        for (;;) {
-               mutex_lock(&tasks_threads_lock);
+               lck_mtx_lock(&tasks_threads_lock);
 
                actual = threads_count;
 
@@ -408,7 +543,7 @@ processor_set_stack_usage(
                if (size_needed <= size)
                        break;
 
-               mutex_unlock(&tasks_threads_lock);
+               lck_mtx_unlock(&tasks_threads_lock);
 
                if (size != 0)
                        kfree(addr, size);
@@ -423,15 +558,15 @@ processor_set_stack_usage(
 
        /* OK, have memory and list is locked */
        thread_list = (thread_t *) addr;
-       for (i = 0, thread = (thread_t) queue_first(&threads);
+       for (i = 0, thread = (thread_t)(void *) queue_first(&threads);
                                        !queue_end(&threads, (queue_entry_t) thread);
-                                       thread = (thread_t) queue_next(&thread->threads)) {
+                                       thread = (thread_t)(void *) queue_next(&thread->threads)) {
                thread_reference_internal(thread);
                thread_list[i++] = thread;
        }
        assert(i <= actual);
 
-       mutex_unlock(&tasks_threads_lock);
+       lck_mtx_unlock(&tasks_threads_lock);
 
        /* calculate maxusage and free thread references */
 
@@ -451,7 +586,7 @@ processor_set_stack_usage(
                kfree(addr, size);
 
        *totalp = total;
-       *residentp = *spacep = total * round_page(KERNEL_STACK_SIZE);
+       *residentp = *spacep = total * round_page(kernel_stack_size);
        *maxusagep = maxusage;
        *maxstackp = maxstack;
        return KERN_SUCCESS;
@@ -461,10 +596,10 @@ processor_set_stack_usage(
 
 vm_offset_t min_valid_stack_address(void)
 {
-       return vm_map_min(kernel_map);
+       return (vm_offset_t)vm_map_min(kernel_map);
 }
 
 vm_offset_t max_valid_stack_address(void)
 {
-       return vm_map_max(kernel_map);
+       return (vm_offset_t)vm_map_max(kernel_map);
 }