-#ifndef MACHINE_STACK
- simple_lock_init(&stack_lock_data, ETAP_THREAD_STACK); /* Initialize the stack lock */
-
- if (KERNEL_STACK_SIZE < round_page_32(KERNEL_STACK_SIZE)) { /* Kernel stacks must be multiples of pages */
- panic("thread_init: kernel stack size (%08X) must be a multiple of page size (%08X)\n",
- KERNEL_STACK_SIZE, PAGE_SIZE);
- }
-
- for(stack_alloc_bndry = PAGE_SIZE; stack_alloc_bndry <= KERNEL_STACK_SIZE; stack_alloc_bndry <<= 1); /* Find next power of 2 above stack size */
-
- ret = kmem_suballoc(kernel_map, /* Suballocate from the kernel map */
-
- &stack,
- (stack_alloc_bndry * (2*THREAD_MAX + 64)), /* Allocate enough for all of it */
- FALSE, /* Say not pageable so that it is wired */
- TRUE, /* Allocate from anywhere */
- &stack_map); /* Allocate a submap */
-
- if(ret != KERN_SUCCESS) { /* Did we get one? */
- panic("thread_init: kmem_suballoc for stacks failed - ret = %d\n", ret); /* Die */
- }
- stack = vm_map_min(stack_map); /* Make sure we skip the first hunk */
- ret = vm_map_enter(stack_map, &stack, PAGE_SIZE, 0, /* Make sure there is nothing at the start */
- 0, /* Force it at start */
- VM_OBJECT_NULL, 0, /* No object yet */
- FALSE, /* No copy */
- VM_PROT_NONE, /* Allow no access */
- VM_PROT_NONE, /* Allow no access */
- VM_INHERIT_DEFAULT); /* Just be normal */
-
- if(ret != KERN_SUCCESS) { /* Did it work? */
- panic("thread_init: dummy alignment allocation failed; ret = %d\n", ret);