#include <vm/vm_kern.h>
#include <vm/vm_map.h>
-#include <i386/lock.h>
+#include <i386/bit_routines.h>
#include <i386/mp_desc.h>
#include <i386/misc_protos.h>
#include <i386/mp.h>
/*
* Allocate per-cpu data:
*/
- ret = kmem_alloc(kernel_map, (vm_offset_t *) &cdp, sizeof(cpu_data_t));
+ ret = kmem_alloc(kernel_map, (vm_offset_t *) &cdp, sizeof(cpu_data_t), VM_KERN_MEMORY_CPU);
if (ret != KERN_SUCCESS) {
printf("cpu_data_alloc() failed, ret=%d\n", ret);
goto abort;
*/
ret = kmem_alloc(kernel_map,
(vm_offset_t *) &cdp->cpu_int_stack_top,
- INTSTACK_SIZE);
+ INTSTACK_SIZE, VM_KERN_MEMORY_CPU);
if (ret != KERN_SUCCESS) {
printf("cpu_data_alloc() int stack failed, ret=%d\n", ret);
goto abort;
*/
ret = kmem_alloc(kernel_map,
(vm_offset_t *) &cdp->cpu_desc_tablep,
- sizeof(cpu_desc_table64_t));
+ sizeof(cpu_desc_table64_t),
+ VM_KERN_MEMORY_CPU);
if (ret != KERN_SUCCESS) {
printf("cpu_data_alloc() desc_table failed, ret=%d\n", ret);
goto abort;
*/
ret = kmem_alloc(kernel_map,
(vm_offset_t *) &cdp->cpu_ldtp,
- sizeof(struct real_descriptor) * LDTSZ);
+ sizeof(struct real_descriptor) * LDTSZ,
+ VM_KERN_MEMORY_CPU);
if (ret != KERN_SUCCESS) {
printf("cpu_data_alloc() ldt failed, ret=%d\n", ret);
goto abort;
real_ncpus++;
simple_unlock(&ncpus_lock);
+ /*
+ * Before this cpu has been assigned a real thread context,
+ * we give it a fake, unique, non-zero thread id which the locking
+ * primitives use as their lock value.
+ * Note that this does not apply to the boot processor, cpu 0, which
+ * transitions to a thread context well before other processors are
+ * started.
+ */
+ cdp->cpu_active_thread = (thread_t) (uintptr_t) cdp->cpu_number;
+
cdp->cpu_nanotime = &pal_rtc_nanotime_info;
kprintf("cpu_data_alloc(%d) %p desc_table: %p "
if (vm_allocate(kernel_map, &vaddr,
(NBPDE * NCOPY_WINDOWS * num_cpus) + NBPDE,
- VM_FLAGS_ANYWHERE) != KERN_SUCCESS)
+ VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_CPU)) != KERN_SUCCESS)
panic("cpu_userwindow_init: "
"couldn't allocate user map window");
if (phys_window == 0) {
if (vm_allocate(kernel_map, &phys_window,
- PAGE_SIZE, VM_FLAGS_ANYWHERE)
+ PAGE_SIZE, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_CPU))
!= KERN_SUCCESS)
panic("cpu_physwindow_init: "
"couldn't allocate phys map window");
cpu_data_t *cdp;
boolean_t istate;
- ret = kmem_alloc(kernel_map, &istk, INTSTACK_SIZE);
+ ret = kmem_alloc(kernel_map, &istk, INTSTACK_SIZE, VM_KERN_MEMORY_CPU);
if (ret != KERN_SUCCESS) {
panic("cpu_data_realloc() stack alloc, ret=%d\n", ret);
}
bzero((void*) istk, INTSTACK_SIZE);
istk += INTSTACK_SIZE;
- ret = kmem_alloc(kernel_map, (vm_offset_t *) &cdp, sizeof(cpu_data_t));
+ ret = kmem_alloc(kernel_map, (vm_offset_t *) &cdp, sizeof(cpu_data_t), VM_KERN_MEMORY_CPU);
if (ret != KERN_SUCCESS) {
panic("cpu_data_realloc() cpu data alloc, ret=%d\n", ret);
}
timer_call_queue_init(&cdp->rtclock_timer.queue);
/* Allocate the separate fault stack */
- ret = kmem_alloc(kernel_map, &fstk, PAGE_SIZE);
+ ret = kmem_alloc(kernel_map, &fstk, PAGE_SIZE, VM_KERN_MEMORY_CPU);
if (ret != KERN_SUCCESS) {
panic("cpu_data_realloc() fault stack alloc, ret=%d\n", ret);
}