X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/c0fea4742e91338fffdcf79f86a7c1d5e2b97eb1..8ad349bb6ed4a0be06e34c92be0d98b92e078db4:/osfmk/i386/cpu_threads.c diff --git a/osfmk/i386/cpu_threads.c b/osfmk/i386/cpu_threads.c index 0a4c3d5e2..c2249c521 100644 --- a/osfmk/i386/cpu_threads.c +++ b/osfmk/i386/cpu_threads.c @@ -1,23 +1,31 @@ /* * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_LICENSE_OSREFERENCE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. - * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. - * - * @APPLE_LICENSE_HEADER_END@ + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the + * License may not be used to create, or enable the creation or + * redistribution of, unlawful or unlicensed copies of an Apple operating + * system, or to circumvent, violate, or enable the circumvention or + * violation of, any terms of an Apple operating system software license + * agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_OSREFERENCE_HEADER_END@ */ #include #include @@ -25,7 +33,6 @@ #include #include #include -#include /* * Kernel parameter determining whether threads are halted unconditionally @@ -34,81 +41,63 @@ */ int idlehalt = 1; - -static boolean_t -cpu_is_hyperthreaded(void) -{ - if (cpuid_features() & CPUID_FEATURE_HTT) - return (cpuid_info()->cpuid_logical_per_package / - cpuid_info()->cpuid_cores_per_package) > 1; - else - return FALSE; -} - -void * -cpu_thread_alloc(int cpu) +void +cpu_thread_init(void) { - int core_base_cpu; + int my_cpu = get_cpu_number(); + int my_core_base_cpu; int ret; - cpu_core_t *core; + cpu_core_t *my_core; - /* - * Assume that all cpus have the same features. - */ - if (cpu_is_hyperthreaded()) { + /* Have we initialized already for this cpu? */ + if (cpu_core()) + return; + + if (cpuid_features() & CPUID_FEATURE_HTT) { /* * Get the cpu number of the base thread in the core. */ - core_base_cpu = cpu_to_core_cpu(cpu); - cpu_datap(cpu)->cpu_threadtype = CPU_THREADTYPE_INTEL_HTT; + my_core_base_cpu = cpu_to_core_cpu(my_cpu); + current_cpu_datap()->cpu_threadtype = CPU_THREADTYPE_INTEL_HTT; } else { - core_base_cpu = cpu; - cpu_datap(cpu)->cpu_threadtype = CPU_THREADTYPE_NONE; + my_core_base_cpu = my_cpu; + current_cpu_datap()->cpu_threadtype = CPU_THREADTYPE_NONE; } - core = (cpu_core_t *) cpu_to_core(core_base_cpu); - if (core == NULL) { + /* + * Allocate the base cpu_core struct if none exists. + * Since we could be racing with other threads in the same core, + * this needs care without using locks. We allocate a new core + * structure and assign it atomically, freeing it if we lost the race. + */ + my_core = (cpu_core_t *) cpu_to_core(my_core_base_cpu); + if (my_core == NULL) { + cpu_core_t *new_core; + ret = kmem_alloc(kernel_map, - (void *) &core, sizeof(cpu_core_t)); + (void *) &new_core, sizeof(cpu_core_t)); if (ret != KERN_SUCCESS) - panic("cpu_thread_alloc() kmem_alloc ret=%d\n", ret); - bzero((void *) core, sizeof(cpu_core_t)); - - core->base_cpu = core_base_cpu; - - atomic_incl((long *) &machine_info.physical_cpu_max, 1); - - /* Allocate performance counter data area (if available) */ - core->pmc = pmc_alloc(); + panic("cpu_thread_init() kmem_alloc ret=%d\n", ret); + bzero((void *) new_core, sizeof(cpu_core_t)); + new_core->base_cpu = my_core_base_cpu; + if (atomic_cmpxchg((uint32_t *) &cpu_to_core(my_core_base_cpu), + 0, (uint32_t) new_core)) { + atomic_incl((long *) &machine_info.physical_cpu, 1); + atomic_incl((long *) &machine_info.physical_cpu_max, 1); + } else { + kmem_free(kernel_map, + (vm_offset_t)new_core, sizeof(cpu_core_t)); + } + my_core = (cpu_core_t *) cpu_to_core(my_core_base_cpu); } - atomic_incl((long *) &machine_info.logical_cpu_max, 1); - - return (void *) core; -} - -void -cpu_thread_init(void) -{ - int my_cpu = get_cpu_number(); - cpu_core_t *my_core; - /* - * If we're the boot processor we allocate the core structure here. - * Otherwise the core has already been allocated (by the boot cpu). - */ - if (my_cpu == master_cpu) - cpu_to_core(master_cpu) = cpu_thread_alloc(master_cpu); - - my_core = cpu_core(); - if (my_core == NULL) - panic("cpu_thread_init() no core allocated for cpu %d", my_cpu); + cpu_to_core(my_cpu) = (struct cpu_core *) my_core; atomic_incl((long *) &my_core->active_threads, 1); - atomic_incl((long *) &machine_info.logical_cpu, 1); - /* Note: cpus are started serially so this isn't as racey as it looks */ - if (my_core->num_threads == 0) - atomic_incl((long *) &machine_info.physical_cpu, 1); atomic_incl((long *) &my_core->num_threads, 1); + atomic_incl((long *) &machine_info.logical_cpu, 1); + atomic_incl((long *) &machine_info.logical_cpu_max, 1); + } /* @@ -120,10 +109,10 @@ cpu_thread_halt(void) { cpu_core_t *my_core = cpu_core(); - atomic_decl((long *) &machine_info.logical_cpu, 1); + /* Note: don't ever decrement the number of physical processors */ atomic_decl((long *) &my_core->active_threads, 1); - if (atomic_decl_and_test((long *) &my_core->num_threads, 1)) - atomic_decl((long *) &machine_info.physical_cpu, 1); + atomic_decl((long *) &my_core->num_threads, 1); + atomic_decl((long *) &machine_info.logical_cpu, 1); cpu_halt(); }