#include <kern/thread.h>
#include <kern/zalloc.h>
#include <kern/kalloc.h>
+#include <kern/ledger.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
static vm_offset_t stack_free_list;
static unsigned int stack_free_count, stack_free_hiwat; /* free list count */
-static unsigned int stack_total, stack_hiwat; /* current total count */
+static unsigned int stack_hiwat;
+unsigned int stack_total; /* current total count */
+unsigned long long stack_allocs; /* total count of allocations */
+
+static int stack_fake_zone_index = -1; /* index in zone_info array */
static unsigned int stack_free_target;
static int stack_free_delta;
static vm_offset_t stack_addr_mask;
+unsigned int kernel_stack_pages;
+vm_offset_t kernel_stack_size;
+vm_offset_t kernel_stack_mask;
+vm_offset_t kernel_stack_depth_max;
+
+static inline void
+STACK_ZINFO_PALLOC(thread_t thread)
+{
+ task_t task;
+ zinfo_usage_t zinfo;
+
+ ledger_credit(thread->t_ledger, task_ledgers.tkm_private, kernel_stack_size);
+
+ if (stack_fake_zone_index != -1 &&
+ (task = thread->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
+ OSAddAtomic64(kernel_stack_size,
+ (int64_t *)&zinfo[stack_fake_zone_index].alloc);
+}
+
+static inline void
+STACK_ZINFO_PFREE(thread_t thread)
+{
+ task_t task;
+ zinfo_usage_t zinfo;
+
+ ledger_debit(thread->t_ledger, task_ledgers.tkm_private, kernel_stack_size);
+
+ if (stack_fake_zone_index != -1 &&
+ (task = thread->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
+ OSAddAtomic64(kernel_stack_size,
+ (int64_t *)&zinfo[stack_fake_zone_index].free);
+}
+
+static inline void
+STACK_ZINFO_HANDOFF(thread_t from, thread_t to)
+{
+ ledger_debit(from->t_ledger, task_ledgers.tkm_private, kernel_stack_size);
+ ledger_credit(to->t_ledger, task_ledgers.tkm_private, kernel_stack_size);
+
+ if (stack_fake_zone_index != -1) {
+ task_t task;
+ zinfo_usage_t zinfo;
+
+ if ((task = from->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
+ OSAddAtomic64(kernel_stack_size,
+ (int64_t *)&zinfo[stack_fake_zone_index].free);
+
+ if ((task = to->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
+ OSAddAtomic64(kernel_stack_size,
+ (int64_t *)&zinfo[stack_fake_zone_index].alloc);
+ }
+}
+
/*
* The next field is at the base of the stack,
* so the low end is left unsullied.
*/
#define stack_next(stack) \
- (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1))
+ (*((vm_offset_t *)((stack) + kernel_stack_size) - 1))
+
+static inline int
+log2(vm_offset_t size)
+{
+ int result;
+ for (result = 0; size > 0; result++)
+ size >>= 1;
+ return result;
+}
+
+static inline vm_offset_t
+roundup_pow2(vm_offset_t size)
+{
+ return 1UL << (log2(size - 1) + 1);
+}
+
+static vm_offset_t stack_alloc_internal(void);
+static void stack_free_stack(vm_offset_t);
void
stack_init(void)
{
simple_lock_init(&stack_lock_data, 0);
- if (KERNEL_STACK_SIZE < round_page(KERNEL_STACK_SIZE))
- panic("stack_init: stack size %d not a multiple of page size %d\n", KERNEL_STACK_SIZE, PAGE_SIZE);
+ kernel_stack_pages = KERNEL_STACK_SIZE / PAGE_SIZE;
+ kernel_stack_size = KERNEL_STACK_SIZE;
+ kernel_stack_mask = -KERNEL_STACK_SIZE;
+ kernel_stack_depth_max = 0;
+
+ if (PE_parse_boot_argn("kernel_stack_pages",
+ &kernel_stack_pages,
+ sizeof (kernel_stack_pages))) {
+ kernel_stack_size = kernel_stack_pages * PAGE_SIZE;
+ printf("stack_init: kernel_stack_pages=%d kernel_stack_size=%p\n",
+ kernel_stack_pages, (void *) kernel_stack_size);
+ }
+
+ if (kernel_stack_size < round_page(kernel_stack_size))
+ panic("stack_init: stack size %p not a multiple of page size %d\n",
+ (void *) kernel_stack_size, PAGE_SIZE);
- stack_addr_mask = KERNEL_STACK_SIZE - 1;
+ stack_addr_mask = roundup_pow2(kernel_stack_size) - 1;
+ kernel_stack_mask = ~stack_addr_mask;
}
/*
* Allocate a stack for a thread, may
* block.
*/
-void
-stack_alloc(
- thread_t thread)
+
+static vm_offset_t
+stack_alloc_internal(void)
{
vm_offset_t stack;
spl_t s;
int guard_flags;
- assert(thread->kernel_stack == 0);
-
s = splsched();
stack_lock();
+ stack_allocs++;
stack = stack_free_list;
if (stack != 0) {
stack_free_list = stack_next(stack);
guard_flags = KMA_GUARD_FIRST | KMA_GUARD_LAST;
if (kernel_memory_allocate(kernel_map, &stack,
- KERNEL_STACK_SIZE + (2*PAGE_SIZE),
+ kernel_stack_size + (2*PAGE_SIZE),
stack_addr_mask,
- KMA_KOBJECT | guard_flags)
+ KMA_KSTACK | KMA_KOBJECT | guard_flags)
!= KERN_SUCCESS)
panic("stack_alloc: kernel_memory_allocate");
stack += PAGE_SIZE;
}
+ return stack;
+}
+
+void
+stack_alloc(
+ thread_t thread)
+{
+
+ assert(thread->kernel_stack == 0);
+ machine_stack_attach(thread, stack_alloc_internal());
+ STACK_ZINFO_PALLOC(thread);
+}
- machine_stack_attach(thread, stack);
+void
+stack_handoff(thread_t from, thread_t to)
+{
+ assert(from == current_thread());
+ machine_stack_handoff(from, to);
+ STACK_ZINFO_HANDOFF(from, to);
}
/*
vm_offset_t stack = machine_stack_detach(thread);
assert(stack);
- if (stack != thread->reserved_stack)
+ if (stack != thread->reserved_stack) {
+ STACK_ZINFO_PFREE(thread);
stack_free_stack(stack);
+ }
}
void
+stack_free_reserved(
+ thread_t thread)
+{
+ if (thread->reserved_stack != thread->kernel_stack) {
+ stack_free_stack(thread->reserved_stack);
+ STACK_ZINFO_PFREE(thread);
+ }
+}
+
+static void
stack_free_stack(
vm_offset_t stack)
{
cache = &PROCESSOR_DATA(current_processor(), stack_cache);
stack = cache->free;
if (stack != 0) {
+ STACK_ZINFO_PALLOC(thread);
cache->free = stack_next(stack);
cache->count--;
}
stack_lock();
stack = stack_free_list;
if (stack != 0) {
+ STACK_ZINFO_PALLOC(thread);
stack_free_list = stack_next(stack);
stack_free_count--;
stack_free_delta--;
* back in stack_alloc().
*/
- stack = vm_map_trunc_page(stack);
+ stack = (vm_offset_t)vm_map_trunc_page(
+ stack,
+ VM_MAP_PAGE_MASK(kernel_map));
stack -= PAGE_SIZE;
if (vm_map_remove(
kernel_map,
stack,
- stack + KERNEL_STACK_SIZE+(2*PAGE_SIZE),
+ stack + kernel_stack_size+(2*PAGE_SIZE),
VM_MAP_REMOVE_KUNWIRE)
!= KERN_SUCCESS)
panic("stack_collect: vm_map_remove");
}
void
-stack_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
- vm_size_t *alloc_size, int *collectable, int *exhaustable)
+stack_fake_zone_init(int zone_index)
+{
+ stack_fake_zone_index = zone_index;
+}
+
+void
+stack_fake_zone_info(int *count,
+ vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, vm_size_t *alloc_size,
+ uint64_t *sum_size, int *collectable, int *exhaustable, int *caller_acct)
{
unsigned int total, hiwat, free;
+ unsigned long long all;
spl_t s;
s = splsched();
stack_lock();
+ all = stack_allocs;
total = stack_total;
hiwat = stack_hiwat;
free = stack_free_count;
splx(s);
*count = total - free;
- *cur_size = KERNEL_STACK_SIZE * total;
- *max_size = KERNEL_STACK_SIZE * hiwat;
- *elem_size = KERNEL_STACK_SIZE;
- *alloc_size = KERNEL_STACK_SIZE;
+ *cur_size = kernel_stack_size * total;
+ *max_size = kernel_stack_size * hiwat;
+ *elem_size = kernel_stack_size;
+ *alloc_size = kernel_stack_size;
+ *sum_size = all * kernel_stack_size;
+
*collectable = 1;
*exhaustable = 0;
+ *caller_acct = 1;
}
/* OBSOLETE */
addr = NULL;
for (;;) {
- mutex_lock(&tasks_threads_lock);
+ lck_mtx_lock(&tasks_threads_lock);
actual = threads_count;
if (size_needed <= size)
break;
- mutex_unlock(&tasks_threads_lock);
+ lck_mtx_unlock(&tasks_threads_lock);
if (size != 0)
kfree(addr, size);
/* OK, have memory and list is locked */
thread_list = (thread_t *) addr;
- for (i = 0, thread = (thread_t) queue_first(&threads);
+ for (i = 0, thread = (thread_t)(void *) queue_first(&threads);
!queue_end(&threads, (queue_entry_t) thread);
- thread = (thread_t) queue_next(&thread->threads)) {
+ thread = (thread_t)(void *) queue_next(&thread->threads)) {
thread_reference_internal(thread);
thread_list[i++] = thread;
}
assert(i <= actual);
- mutex_unlock(&tasks_threads_lock);
+ lck_mtx_unlock(&tasks_threads_lock);
/* calculate maxusage and free thread references */
kfree(addr, size);
*totalp = total;
- *residentp = *spacep = total * round_page(KERNEL_STACK_SIZE);
+ *residentp = *spacep = total * round_page(kernel_stack_size);
*maxusagep = maxusage;
*maxstackp = maxstack;
return KERN_SUCCESS;
vm_offset_t min_valid_stack_address(void)
{
- return vm_map_min(kernel_map);
+ return (vm_offset_t)vm_map_min(kernel_map);
}
vm_offset_t max_valid_stack_address(void)
{
- return vm_map_max(kernel_map);
+ return (vm_offset_t)vm_map_max(kernel_map);
}