/*
- * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#define STACK_CACHE_SIZE 2
-static vm_map_t stack_map;
static vm_offset_t stack_free_list;
static unsigned int stack_free_count, stack_free_hiwat; /* free list count */
void
stack_init(void)
{
- vm_offset_t stacks, boundary;
- vm_map_offset_t map_addr;
-
simple_lock_init(&stack_lock_data, 0);
if (KERNEL_STACK_SIZE < round_page(KERNEL_STACK_SIZE))
panic("stack_init: stack size %d not a multiple of page size %d\n", KERNEL_STACK_SIZE, PAGE_SIZE);
- for (boundary = PAGE_SIZE; boundary <= KERNEL_STACK_SIZE; )
- boundary <<= 1;
-
- stack_addr_mask = boundary - 1;
-
- if (kmem_suballoc(kernel_map, &stacks, (boundary * (2 * THREAD_MAX + 64)),
- FALSE, VM_FLAGS_ANYWHERE, &stack_map) != KERN_SUCCESS)
- panic("stack_init: kmem_suballoc");
-
- map_addr = vm_map_min(stack_map);
- if (vm_map_enter(stack_map, &map_addr, vm_map_round_page(PAGE_SIZE), 0, (VM_MAKE_TAG(VM_MEMORY_STACK) | VM_FLAGS_FIXED),
- VM_OBJECT_NULL, 0, FALSE, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_DEFAULT) != KERN_SUCCESS)
- panic("stack_init: vm_map_enter");
+ stack_addr_mask = KERNEL_STACK_SIZE - 1;
}
/*
{
vm_offset_t stack;
spl_t s;
+ int guard_flags;
assert(thread->kernel_stack == 0);
splx(s);
if (stack == 0) {
- if (kernel_memory_allocate(stack_map, &stack, KERNEL_STACK_SIZE, stack_addr_mask, KMA_KOBJECT) != KERN_SUCCESS)
+
+ /*
+ * Request guard pages on either side of the stack. Ask
+ * kernel_memory_allocate() for two extra pages to account
+ * for these.
+ */
+
+ guard_flags = KMA_GUARD_FIRST | KMA_GUARD_LAST;
+ if (kernel_memory_allocate(kernel_map, &stack,
+ KERNEL_STACK_SIZE + (2*PAGE_SIZE),
+ stack_addr_mask,
+ KMA_KOBJECT | guard_flags)
+ != KERN_SUCCESS)
panic("stack_alloc: kernel_memory_allocate");
+
+ /*
+ * The stack address that comes back is the address of the lower
+ * guard page. Skip past it to get the actual stack base address.
+ */
+
+ stack += PAGE_SIZE;
}
machine_stack_attach(thread, stack);
vm_offset_t stack = machine_stack_detach(thread);
assert(stack);
- if (stack != thread->reserved_stack) {
- struct stack_cache *cache;
- spl_t s;
-
- s = splsched();
- cache = &PROCESSOR_DATA(current_processor(), stack_cache);
- if (cache->count < STACK_CACHE_SIZE) {
- stack_next(stack) = cache->free;
- cache->free = stack;
- cache->count++;
- }
- else {
- stack_lock();
- stack_next(stack) = stack_free_list;
- stack_free_list = stack;
- if (++stack_free_count > stack_free_hiwat)
- stack_free_hiwat = stack_free_count;
- stack_free_delta++;
- stack_unlock();
- }
- splx(s);
- }
+ if (stack != thread->reserved_stack)
+ stack_free_stack(stack);
}
void
stack_unlock();
splx(s);
- if (vm_map_remove(stack_map, vm_map_trunc_page(stack),
- vm_map_round_page(stack + KERNEL_STACK_SIZE), VM_MAP_REMOVE_KUNWIRE) != KERN_SUCCESS)
+ /*
+ * Get the stack base address, then decrement by one page
+ * to account for the lower guard page. Add two extra pages
+ * to the size to account for the guard pages on both ends
+ * that were originally requested when the stack was allocated
+ * back in stack_alloc().
+ */
+
+ stack = vm_map_trunc_page(stack);
+ stack -= PAGE_SIZE;
+ if (vm_map_remove(
+ kernel_map,
+ stack,
+ stack + KERNEL_STACK_SIZE+(2*PAGE_SIZE),
+ VM_MAP_REMOVE_KUNWIRE)
+ != KERN_SUCCESS)
panic("stack_collect: vm_map_remove");
+ stack = 0;
s = splsched();
stack_lock();
vm_size_t maxusage;
vm_offset_t maxstack;
- register thread_t *threads;
+ register thread_t *thread_list;
register thread_t thread;
unsigned int actual; /* this many things */
vm_size_t size, size_needed;
void *addr;
- if (pset == PROCESSOR_SET_NULL)
+ if (pset == PROCESSOR_SET_NULL || pset != &pset0)
return KERN_INVALID_ARGUMENT;
- size = 0; addr = 0;
+ size = 0;
+ addr = NULL;
for (;;) {
- pset_lock(pset);
- if (!pset->active) {
- pset_unlock(pset);
- return KERN_INVALID_ARGUMENT;
- }
+ mutex_lock(&tasks_threads_lock);
- actual = pset->thread_count;
+ actual = threads_count;
/* do we have the memory we need? */
if (size_needed <= size)
break;
- /* unlock the pset and allocate more memory */
- pset_unlock(pset);
+ mutex_unlock(&tasks_threads_lock);
if (size != 0)
kfree(addr, size);
return KERN_RESOURCE_SHORTAGE;
}
- /* OK, have memory and the processor_set is locked & active */
- threads = (thread_t *) addr;
- for (i = 0, thread = (thread_t) queue_first(&pset->threads);
- !queue_end(&pset->threads, (queue_entry_t) thread);
- thread = (thread_t) queue_next(&thread->pset_threads)) {
+ /* OK, have memory and list is locked */
+ thread_list = (thread_t *) addr;
+ for (i = 0, thread = (thread_t) queue_first(&threads);
+ !queue_end(&threads, (queue_entry_t) thread);
+ thread = (thread_t) queue_next(&thread->threads)) {
thread_reference_internal(thread);
- threads[i++] = thread;
+ thread_list[i++] = thread;
}
assert(i <= actual);
- /* can unlock processor set now that we have the thread refs */
- pset_unlock(pset);
+ mutex_unlock(&tasks_threads_lock);
/* calculate maxusage and free thread references */
maxusage = 0;
maxstack = 0;
while (i > 0) {
- thread_t threadref = threads[--i];
+ thread_t threadref = thread_list[--i];
if (threadref->kernel_stack != 0)
total++;
vm_offset_t min_valid_stack_address(void)
{
- return vm_map_min(stack_map);
+ return vm_map_min(kernel_map);
}
vm_offset_t max_valid_stack_address(void)
{
- return vm_map_max(stack_map);
+ return vm_map_max(kernel_map);
}