X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/ff6e181ae92fc6f1e89841290f461d1f2f9badd9..c0fea4742e91338fffdcf79f86a7c1d5e2b97eb1:/osfmk/i386/cpu_threads.c diff --git a/osfmk/i386/cpu_threads.c b/osfmk/i386/cpu_threads.c index 6b064d5ac..0a4c3d5e2 100644 --- a/osfmk/i386/cpu_threads.c +++ b/osfmk/i386/cpu_threads.c @@ -3,20 +3,19 @@ * * @APPLE_LICENSE_HEADER_START@ * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. * * @APPLE_LICENSE_HEADER_END@ */ @@ -26,6 +25,7 @@ #include #include #include +#include /* * Kernel parameter determining whether threads are halted unconditionally @@ -34,63 +34,81 @@ */ int idlehalt = 1; -void -cpu_thread_init(void) + +static boolean_t +cpu_is_hyperthreaded(void) { - int my_cpu = get_cpu_number(); - int my_core_base_cpu; - int ret; - cpu_core_t *my_core; + if (cpuid_features() & CPUID_FEATURE_HTT) + return (cpuid_info()->cpuid_logical_per_package / + cpuid_info()->cpuid_cores_per_package) > 1; + else + return FALSE; +} - /* Have we initialized already for this cpu? */ - if (cpu_core()) - return; +void * +cpu_thread_alloc(int cpu) +{ + int core_base_cpu; + int ret; + cpu_core_t *core; - if (cpuid_features() & CPUID_FEATURE_HTT) { + /* + * Assume that all cpus have the same features. + */ + if (cpu_is_hyperthreaded()) { /* * Get the cpu number of the base thread in the core. */ - my_core_base_cpu = cpu_to_core_cpu(my_cpu); - current_cpu_datap()->cpu_threadtype = CPU_THREADTYPE_INTEL_HTT; + core_base_cpu = cpu_to_core_cpu(cpu); + cpu_datap(cpu)->cpu_threadtype = CPU_THREADTYPE_INTEL_HTT; } else { - my_core_base_cpu = my_cpu; - current_cpu_datap()->cpu_threadtype = CPU_THREADTYPE_NONE; + core_base_cpu = cpu; + cpu_datap(cpu)->cpu_threadtype = CPU_THREADTYPE_NONE; } - /* - * Allocate the base cpu_core struct if none exists. - * Since we could be racing with other threads in the same core, - * this needs care without using locks. We allocate a new core - * structure and assign it atomically, freeing it if we lost the race. - */ - my_core = (cpu_core_t *) cpu_to_core(my_core_base_cpu); - if (my_core == NULL) { - cpu_core_t *new_core; - + core = (cpu_core_t *) cpu_to_core(core_base_cpu); + if (core == NULL) { ret = kmem_alloc(kernel_map, - (void *) &new_core, sizeof(cpu_core_t)); + (void *) &core, sizeof(cpu_core_t)); if (ret != KERN_SUCCESS) - panic("cpu_thread_init() kmem_alloc ret=%d\n", ret); - bzero((void *) new_core, sizeof(cpu_core_t)); - new_core->base_cpu = my_core_base_cpu; - if (atomic_cmpxchg((uint32_t *) &cpu_to_core(my_core_base_cpu), - 0, (uint32_t) new_core)) { - atomic_incl((long *) &machine_info.physical_cpu, 1); - atomic_incl((long *) &machine_info.physical_cpu_max, 1); - } else { - kmem_free(kernel_map, - (vm_offset_t)new_core, sizeof(cpu_core_t)); - } - my_core = (cpu_core_t *) cpu_to_core(my_core_base_cpu); + panic("cpu_thread_alloc() kmem_alloc ret=%d\n", ret); + bzero((void *) core, sizeof(cpu_core_t)); + + core->base_cpu = core_base_cpu; + + atomic_incl((long *) &machine_info.physical_cpu_max, 1); + + /* Allocate performance counter data area (if available) */ + core->pmc = pmc_alloc(); } + atomic_incl((long *) &machine_info.logical_cpu_max, 1); - cpu_to_core(my_cpu) = (struct cpu_core *) my_core; + return (void *) core; +} + +void +cpu_thread_init(void) +{ + int my_cpu = get_cpu_number(); + cpu_core_t *my_core; + + /* + * If we're the boot processor we allocate the core structure here. + * Otherwise the core has already been allocated (by the boot cpu). + */ + if (my_cpu == master_cpu) + cpu_to_core(master_cpu) = cpu_thread_alloc(master_cpu); + + my_core = cpu_core(); + if (my_core == NULL) + panic("cpu_thread_init() no core allocated for cpu %d", my_cpu); atomic_incl((long *) &my_core->active_threads, 1); - atomic_incl((long *) &my_core->num_threads, 1); atomic_incl((long *) &machine_info.logical_cpu, 1); - atomic_incl((long *) &machine_info.logical_cpu_max, 1); - + /* Note: cpus are started serially so this isn't as racey as it looks */ + if (my_core->num_threads == 0) + atomic_incl((long *) &machine_info.physical_cpu, 1); + atomic_incl((long *) &my_core->num_threads, 1); } /* @@ -102,10 +120,10 @@ cpu_thread_halt(void) { cpu_core_t *my_core = cpu_core(); - /* Note: don't ever decrement the number of physical processors */ - atomic_decl((long *) &my_core->active_threads, 1); - atomic_decl((long *) &my_core->num_threads, 1); atomic_decl((long *) &machine_info.logical_cpu, 1); + atomic_decl((long *) &my_core->active_threads, 1); + if (atomic_decl_and_test((long *) &my_core->num_threads, 1)) + atomic_decl((long *) &machine_info.physical_cpu, 1); cpu_halt(); }