X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/316670eb35587141e969394ae8537d66b9211e80..d26ffc64f583ab2d29df48f13518685602bc8832:/osfmk/kern/stack.c diff --git a/osfmk/kern/stack.c b/osfmk/kern/stack.c index 9906b8b3a..18db3f24b 100644 --- a/osfmk/kern/stack.c +++ b/osfmk/kern/stack.c @@ -45,6 +45,7 @@ #include #include +#include /* * We allocate stacks from generic kernel VM. @@ -75,58 +76,10 @@ static unsigned int stack_new_count; /* total new stack allocations */ static vm_offset_t stack_addr_mask; -unsigned int kernel_stack_pages = KERNEL_STACK_SIZE / PAGE_SIZE; -vm_offset_t kernel_stack_size = KERNEL_STACK_SIZE; -vm_offset_t kernel_stack_mask = -KERNEL_STACK_SIZE; -vm_offset_t kernel_stack_depth_max = 0; - -static inline void -STACK_ZINFO_PALLOC(thread_t thread) -{ - task_t task; - zinfo_usage_t zinfo; - - ledger_credit(thread->t_ledger, task_ledgers.tkm_private, kernel_stack_size); - - if (stack_fake_zone_index != -1 && - (task = thread->task) != NULL && (zinfo = task->tkm_zinfo) != NULL) - OSAddAtomic64(kernel_stack_size, - (int64_t *)&zinfo[stack_fake_zone_index].alloc); -} - -static inline void -STACK_ZINFO_PFREE(thread_t thread) -{ - task_t task; - zinfo_usage_t zinfo; - - ledger_debit(thread->t_ledger, task_ledgers.tkm_private, kernel_stack_size); - - if (stack_fake_zone_index != -1 && - (task = thread->task) != NULL && (zinfo = task->tkm_zinfo) != NULL) - OSAddAtomic64(kernel_stack_size, - (int64_t *)&zinfo[stack_fake_zone_index].free); -} - -static inline void -STACK_ZINFO_HANDOFF(thread_t from, thread_t to) -{ - ledger_debit(from->t_ledger, task_ledgers.tkm_private, kernel_stack_size); - ledger_credit(to->t_ledger, task_ledgers.tkm_private, kernel_stack_size); - - if (stack_fake_zone_index != -1) { - task_t task; - zinfo_usage_t zinfo; - - if ((task = from->task) != NULL && (zinfo = task->tkm_zinfo) != NULL) - OSAddAtomic64(kernel_stack_size, - (int64_t *)&zinfo[stack_fake_zone_index].free); - - if ((task = to->task) != NULL && (zinfo = task->tkm_zinfo) != NULL) - OSAddAtomic64(kernel_stack_size, - (int64_t *)&zinfo[stack_fake_zone_index].alloc); - } -} +unsigned int kernel_stack_pages; +vm_offset_t kernel_stack_size; +vm_offset_t kernel_stack_mask; +vm_offset_t kernel_stack_depth_max; /* * The next field is at the base of the stack, @@ -158,6 +111,11 @@ stack_init(void) { simple_lock_init(&stack_lock_data, 0); + kernel_stack_pages = KERNEL_STACK_SIZE / PAGE_SIZE; + kernel_stack_size = KERNEL_STACK_SIZE; + kernel_stack_mask = -KERNEL_STACK_SIZE; + kernel_stack_depth_max = 0; + if (PE_parse_boot_argn("kernel_stack_pages", &kernel_stack_pages, sizeof (kernel_stack_pages))) { @@ -184,9 +142,10 @@ stack_init(void) static vm_offset_t stack_alloc_internal(void) { - vm_offset_t stack; + vm_offset_t stack = 0; spl_t s; - int guard_flags; + int flags = 0; + kern_return_t kr = KERN_SUCCESS; s = splsched(); stack_lock(); @@ -213,13 +172,15 @@ stack_alloc_internal(void) * for these. */ - guard_flags = KMA_GUARD_FIRST | KMA_GUARD_LAST; - if (kernel_memory_allocate(kernel_map, &stack, + flags = KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT; + kr = kernel_memory_allocate(kernel_map, &stack, kernel_stack_size + (2*PAGE_SIZE), stack_addr_mask, - KMA_KSTACK | KMA_KOBJECT | guard_flags) - != KERN_SUCCESS) - panic("stack_alloc: kernel_memory_allocate"); + flags, + VM_KERN_MEMORY_STACK); + if (kr != KERN_SUCCESS) { + panic("stack_alloc: kernel_memory_allocate(size:0x%llx, mask: 0x%llx, flags: 0x%x) failed with %d\n", (uint64_t)(kernel_stack_size + (2*PAGE_SIZE)), (uint64_t)stack_addr_mask, flags, kr); + } /* * The stack address that comes back is the address of the lower @@ -238,7 +199,6 @@ stack_alloc( assert(thread->kernel_stack == 0); machine_stack_attach(thread, stack_alloc_internal()); - STACK_ZINFO_PALLOC(thread); } void @@ -246,7 +206,6 @@ stack_handoff(thread_t from, thread_t to) { assert(from == current_thread()); machine_stack_handoff(from, to); - STACK_ZINFO_HANDOFF(from, to); } /* @@ -260,9 +219,13 @@ stack_free( { vm_offset_t stack = machine_stack_detach(thread); +#if KASAN + kasan_unpoison_stack(stack, kernel_stack_size); + kasan_unpoison_fakestack(thread); +#endif + assert(stack); if (stack != thread->reserved_stack) { - STACK_ZINFO_PFREE(thread); stack_free_stack(stack); } } @@ -272,8 +235,10 @@ stack_free_reserved( thread_t thread) { if (thread->reserved_stack != thread->kernel_stack) { +#if KASAN + kasan_unpoison_stack(thread->reserved_stack, kernel_stack_size); +#endif stack_free_stack(thread->reserved_stack); - STACK_ZINFO_PFREE(thread); } } @@ -323,7 +288,6 @@ stack_alloc_try( cache = &PROCESSOR_DATA(current_processor(), stack_cache); stack = cache->free; if (stack != 0) { - STACK_ZINFO_PALLOC(thread); cache->free = stack_next(stack); cache->count--; } @@ -332,7 +296,6 @@ stack_alloc_try( stack_lock(); stack = stack_free_list; if (stack != 0) { - STACK_ZINFO_PALLOC(thread); stack_free_list = stack_next(stack); stack_free_count--; stack_free_delta--; @@ -386,7 +349,9 @@ stack_collect(void) * back in stack_alloc(). */ - stack = (vm_offset_t)vm_map_trunc_page(stack); + stack = (vm_offset_t)vm_map_trunc_page( + stack, + VM_MAP_PAGE_MASK(kernel_map)); stack -= PAGE_SIZE; if (vm_map_remove( kernel_map, @@ -510,8 +475,8 @@ processor_set_stack_usage( vm_size_t maxusage; vm_offset_t maxstack; - register thread_t *thread_list; - register thread_t thread; + thread_t *thread_list; + thread_t thread; unsigned int actual; /* this many things */ unsigned int i; @@ -551,9 +516,9 @@ processor_set_stack_usage( /* OK, have memory and list is locked */ thread_list = (thread_t *) addr; - for (i = 0, thread = (thread_t) queue_first(&threads); + for (i = 0, thread = (thread_t)(void *) queue_first(&threads); !queue_end(&threads, (queue_entry_t) thread); - thread = (thread_t) queue_next(&thread->threads)) { + thread = (thread_t)(void *) queue_next(&thread->threads)) { thread_reference_internal(thread); thread_list[i++] = thread; }