static vm_offset_t stack_addr_mask;
-unsigned int kernel_stack_pages = KERNEL_STACK_SIZE / PAGE_SIZE;
-vm_offset_t kernel_stack_size = KERNEL_STACK_SIZE;
-vm_offset_t kernel_stack_mask = -KERNEL_STACK_SIZE;
-vm_offset_t kernel_stack_depth_max = 0;
+unsigned int kernel_stack_pages;
+vm_offset_t kernel_stack_size;
+vm_offset_t kernel_stack_mask;
+vm_offset_t kernel_stack_depth_max;
static inline void
STACK_ZINFO_PALLOC(thread_t thread)
{
- task_t task;
- zinfo_usage_t zinfo;
-
ledger_credit(thread->t_ledger, task_ledgers.tkm_private, kernel_stack_size);
-
- if (stack_fake_zone_index != -1 &&
- (task = thread->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
- OSAddAtomic64(kernel_stack_size,
- (int64_t *)&zinfo[stack_fake_zone_index].alloc);
}
static inline void
STACK_ZINFO_PFREE(thread_t thread)
{
- task_t task;
- zinfo_usage_t zinfo;
-
ledger_debit(thread->t_ledger, task_ledgers.tkm_private, kernel_stack_size);
-
- if (stack_fake_zone_index != -1 &&
- (task = thread->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
- OSAddAtomic64(kernel_stack_size,
- (int64_t *)&zinfo[stack_fake_zone_index].free);
}
static inline void
{
ledger_debit(from->t_ledger, task_ledgers.tkm_private, kernel_stack_size);
ledger_credit(to->t_ledger, task_ledgers.tkm_private, kernel_stack_size);
-
- if (stack_fake_zone_index != -1) {
- task_t task;
- zinfo_usage_t zinfo;
-
- if ((task = from->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
- OSAddAtomic64(kernel_stack_size,
- (int64_t *)&zinfo[stack_fake_zone_index].free);
-
- if ((task = to->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
- OSAddAtomic64(kernel_stack_size,
- (int64_t *)&zinfo[stack_fake_zone_index].alloc);
- }
}
/*
{
simple_lock_init(&stack_lock_data, 0);
+ kernel_stack_pages = KERNEL_STACK_SIZE / PAGE_SIZE;
+ kernel_stack_size = KERNEL_STACK_SIZE;
+ kernel_stack_mask = -KERNEL_STACK_SIZE;
+ kernel_stack_depth_max = 0;
+
if (PE_parse_boot_argn("kernel_stack_pages",
&kernel_stack_pages,
sizeof (kernel_stack_pages))) {
if (kernel_memory_allocate(kernel_map, &stack,
kernel_stack_size + (2*PAGE_SIZE),
stack_addr_mask,
- KMA_KSTACK | KMA_KOBJECT | guard_flags)
+ KMA_KSTACK | KMA_KOBJECT | guard_flags,
+ VM_KERN_MEMORY_STACK)
!= KERN_SUCCESS)
panic("stack_alloc: kernel_memory_allocate");
* back in stack_alloc().
*/
- stack = (vm_offset_t)vm_map_trunc_page(stack);
+ stack = (vm_offset_t)vm_map_trunc_page(
+ stack,
+ VM_MAP_PAGE_MASK(kernel_map));
stack -= PAGE_SIZE;
if (vm_map_remove(
kernel_map,
vm_size_t maxusage;
vm_offset_t maxstack;
- register thread_t *thread_list;
- register thread_t thread;
+ thread_t *thread_list;
+ thread_t thread;
unsigned int actual; /* this many things */
unsigned int i;
/* OK, have memory and list is locked */
thread_list = (thread_t *) addr;
- for (i = 0, thread = (thread_t) queue_first(&threads);
+ for (i = 0, thread = (thread_t)(void *) queue_first(&threads);
!queue_end(&threads, (queue_entry_t) thread);
- thread = (thread_t) queue_next(&thread->threads)) {
+ thread = (thread_t)(void *) queue_next(&thread->threads)) {
thread_reference_internal(thread);
thread_list[i++] = thread;
}