]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/stack.c
xnu-3789.51.2.tar.gz
[apple/xnu.git] / osfmk / kern / stack.c
index 9906b8b3a9a0364dd4d6574f89d864fe8b15b44f..0cb79328666e504b9883579a07d7177a95f9b34a 100644 (file)
@@ -75,37 +75,21 @@ static unsigned int         stack_new_count;                                                /* total new stack allocations */
 
 static vm_offset_t             stack_addr_mask;
 
-unsigned int                   kernel_stack_pages = KERNEL_STACK_SIZE / PAGE_SIZE;
-vm_offset_t                    kernel_stack_size = KERNEL_STACK_SIZE;
-vm_offset_t                    kernel_stack_mask = -KERNEL_STACK_SIZE;
-vm_offset_t                    kernel_stack_depth_max = 0;
+unsigned int                   kernel_stack_pages;
+vm_offset_t                    kernel_stack_size;
+vm_offset_t                    kernel_stack_mask;
+vm_offset_t                    kernel_stack_depth_max;
 
 static inline void
 STACK_ZINFO_PALLOC(thread_t thread)
 {
-       task_t task;
-       zinfo_usage_t zinfo;
-
        ledger_credit(thread->t_ledger, task_ledgers.tkm_private, kernel_stack_size);
-
-       if (stack_fake_zone_index != -1 &&
-           (task = thread->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
-               OSAddAtomic64(kernel_stack_size,
-                             (int64_t *)&zinfo[stack_fake_zone_index].alloc);
 }
 
 static inline void
 STACK_ZINFO_PFREE(thread_t thread)
 {
-       task_t task;
-       zinfo_usage_t zinfo;
-
        ledger_debit(thread->t_ledger, task_ledgers.tkm_private, kernel_stack_size);
-
-       if (stack_fake_zone_index != -1 &&
-           (task = thread->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
-               OSAddAtomic64(kernel_stack_size, 
-                             (int64_t *)&zinfo[stack_fake_zone_index].free);
 }
 
 static inline void
@@ -113,19 +97,6 @@ STACK_ZINFO_HANDOFF(thread_t from, thread_t to)
 {
        ledger_debit(from->t_ledger, task_ledgers.tkm_private, kernel_stack_size);
        ledger_credit(to->t_ledger, task_ledgers.tkm_private, kernel_stack_size);
-
-       if (stack_fake_zone_index != -1) {
-               task_t task;
-               zinfo_usage_t zinfo;
-       
-               if ((task = from->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
-                       OSAddAtomic64(kernel_stack_size, 
-                                     (int64_t *)&zinfo[stack_fake_zone_index].free);
-
-               if ((task = to->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
-                       OSAddAtomic64(kernel_stack_size, 
-                                     (int64_t *)&zinfo[stack_fake_zone_index].alloc);
-       }
 }
 
 /*
@@ -158,6 +129,11 @@ stack_init(void)
 {
        simple_lock_init(&stack_lock_data, 0);
        
+       kernel_stack_pages = KERNEL_STACK_SIZE / PAGE_SIZE;
+       kernel_stack_size = KERNEL_STACK_SIZE;
+       kernel_stack_mask = -KERNEL_STACK_SIZE;
+       kernel_stack_depth_max = 0;
+
        if (PE_parse_boot_argn("kernel_stack_pages",
                               &kernel_stack_pages,
                               sizeof (kernel_stack_pages))) {
@@ -217,7 +193,8 @@ stack_alloc_internal(void)
                if (kernel_memory_allocate(kernel_map, &stack,
                                           kernel_stack_size + (2*PAGE_SIZE),
                                           stack_addr_mask,
-                                          KMA_KSTACK | KMA_KOBJECT | guard_flags)
+                                          KMA_KSTACK | KMA_KOBJECT | guard_flags,
+                                          VM_KERN_MEMORY_STACK)
                    != KERN_SUCCESS)
                        panic("stack_alloc: kernel_memory_allocate");
 
@@ -386,7 +363,9 @@ stack_collect(void)
                         * back in stack_alloc().
                         */
 
-                       stack = (vm_offset_t)vm_map_trunc_page(stack);
+                       stack = (vm_offset_t)vm_map_trunc_page(
+                               stack,
+                               VM_MAP_PAGE_MASK(kernel_map));
                        stack -= PAGE_SIZE;
                        if (vm_map_remove(
                                    kernel_map,
@@ -510,8 +489,8 @@ processor_set_stack_usage(
        vm_size_t maxusage;
        vm_offset_t maxstack;
 
-       register thread_t *thread_list;
-       register thread_t thread;
+       thread_t *thread_list;
+       thread_t thread;
 
        unsigned int actual;    /* this many things */
        unsigned int i;
@@ -551,9 +530,9 @@ processor_set_stack_usage(
 
        /* OK, have memory and list is locked */
        thread_list = (thread_t *) addr;
-       for (i = 0, thread = (thread_t) queue_first(&threads);
+       for (i = 0, thread = (thread_t)(void *) queue_first(&threads);
                                        !queue_end(&threads, (queue_entry_t) thread);
-                                       thread = (thread_t) queue_next(&thread->threads)) {
+                                       thread = (thread_t)(void *) queue_next(&thread->threads)) {
                thread_reference_internal(thread);
                thread_list[i++] = thread;
        }