]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/cpu_threads.c
30a99af9115b4612486e01e2e48dc009e39cda13
[apple/xnu.git] / osfmk / i386 / cpu_threads.c
1 /*
2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <vm/vm_kern.h>
29 #include <mach/machine.h>
30 #include <i386/cpu_threads.h>
31 #include <i386/cpuid.h>
32 #include <i386/machine_cpu.h>
33 #include <i386/lock.h>
34 #include <i386/perfmon.h>
35
36 /*
37 * Kernel parameter determining whether threads are halted unconditionally
38 * in the idle state. This is the default behavior.
39 * See machine_idle() for use.
40 */
41 int idlehalt = 1;
42
43
44 static boolean_t
45 cpu_is_hyperthreaded(void)
46 {
47 if (cpuid_features() & CPUID_FEATURE_HTT)
48 return (cpuid_info()->cpuid_logical_per_package /
49 cpuid_info()->cpuid_cores_per_package) > 1;
50 else
51 return FALSE;
52 }
53
54 void *
55 cpu_thread_alloc(int cpu)
56 {
57 int core_base_cpu;
58 int ret;
59 cpu_core_t *core;
60
61 /*
62 * Assume that all cpus have the same features.
63 */
64 if (cpu_is_hyperthreaded()) {
65 /*
66 * Get the cpu number of the base thread in the core.
67 */
68 core_base_cpu = cpu_to_core_cpu(cpu);
69 cpu_datap(cpu)->cpu_threadtype = CPU_THREADTYPE_INTEL_HTT;
70 } else {
71 core_base_cpu = cpu;
72 cpu_datap(cpu)->cpu_threadtype = CPU_THREADTYPE_NONE;
73 }
74
75 core = (cpu_core_t *) cpu_to_core(core_base_cpu);
76 if (core == NULL) {
77 ret = kmem_alloc(kernel_map,
78 (void *) &core, sizeof(cpu_core_t));
79 if (ret != KERN_SUCCESS)
80 panic("cpu_thread_alloc() kmem_alloc ret=%d\n", ret);
81 bzero((void *) core, sizeof(cpu_core_t));
82
83 core->base_cpu = core_base_cpu;
84
85 atomic_incl((long *) &machine_info.physical_cpu_max, 1);
86
87 /* Allocate performance counter data area (if available) */
88 core->pmc = pmc_alloc();
89 }
90 atomic_incl((long *) &machine_info.logical_cpu_max, 1);
91
92 return (void *) core;
93 }
94
95 void
96 cpu_thread_init(void)
97 {
98 int my_cpu = get_cpu_number();
99 cpu_core_t *my_core;
100
101 /*
102 * If we're the boot processor we allocate the core structure here.
103 * Otherwise the core has already been allocated (by the boot cpu).
104 */
105 if (my_cpu == master_cpu)
106 cpu_to_core(master_cpu) = cpu_thread_alloc(master_cpu);
107
108 my_core = cpu_core();
109 if (my_core == NULL)
110 panic("cpu_thread_init() no core allocated for cpu %d", my_cpu);
111
112 atomic_incl((long *) &my_core->active_threads, 1);
113 atomic_incl((long *) &machine_info.logical_cpu, 1);
114 /* Note: cpus are started serially so this isn't as racey as it looks */
115 if (my_core->num_threads == 0)
116 atomic_incl((long *) &machine_info.physical_cpu, 1);
117 atomic_incl((long *) &my_core->num_threads, 1);
118 }
119
120 /*
121 * Called for a cpu to halt permanently
122 * (as opposed to halting and expecting an interrupt to awaken it).
123 */
124 void
125 cpu_thread_halt(void)
126 {
127 cpu_core_t *my_core = cpu_core();
128
129 atomic_decl((long *) &machine_info.logical_cpu, 1);
130 atomic_decl((long *) &my_core->active_threads, 1);
131 if (atomic_decl_and_test((long *) &my_core->num_threads, 1))
132 atomic_decl((long *) &machine_info.physical_cpu, 1);
133
134 cpu_halt();
135 }