]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/cpu_threads.c
0a4c3d5e2f926b17f6ca91a5d1779c9790f8612f
[apple/xnu.git] / osfmk / i386 / cpu_threads.c
1 /*
2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 #include <vm/vm_kern.h>
23 #include <mach/machine.h>
24 #include <i386/cpu_threads.h>
25 #include <i386/cpuid.h>
26 #include <i386/machine_cpu.h>
27 #include <i386/lock.h>
28 #include <i386/perfmon.h>
29
30 /*
31 * Kernel parameter determining whether threads are halted unconditionally
32 * in the idle state. This is the default behavior.
33 * See machine_idle() for use.
34 */
35 int idlehalt = 1;
36
37
38 static boolean_t
39 cpu_is_hyperthreaded(void)
40 {
41 if (cpuid_features() & CPUID_FEATURE_HTT)
42 return (cpuid_info()->cpuid_logical_per_package /
43 cpuid_info()->cpuid_cores_per_package) > 1;
44 else
45 return FALSE;
46 }
47
48 void *
49 cpu_thread_alloc(int cpu)
50 {
51 int core_base_cpu;
52 int ret;
53 cpu_core_t *core;
54
55 /*
56 * Assume that all cpus have the same features.
57 */
58 if (cpu_is_hyperthreaded()) {
59 /*
60 * Get the cpu number of the base thread in the core.
61 */
62 core_base_cpu = cpu_to_core_cpu(cpu);
63 cpu_datap(cpu)->cpu_threadtype = CPU_THREADTYPE_INTEL_HTT;
64 } else {
65 core_base_cpu = cpu;
66 cpu_datap(cpu)->cpu_threadtype = CPU_THREADTYPE_NONE;
67 }
68
69 core = (cpu_core_t *) cpu_to_core(core_base_cpu);
70 if (core == NULL) {
71 ret = kmem_alloc(kernel_map,
72 (void *) &core, sizeof(cpu_core_t));
73 if (ret != KERN_SUCCESS)
74 panic("cpu_thread_alloc() kmem_alloc ret=%d\n", ret);
75 bzero((void *) core, sizeof(cpu_core_t));
76
77 core->base_cpu = core_base_cpu;
78
79 atomic_incl((long *) &machine_info.physical_cpu_max, 1);
80
81 /* Allocate performance counter data area (if available) */
82 core->pmc = pmc_alloc();
83 }
84 atomic_incl((long *) &machine_info.logical_cpu_max, 1);
85
86 return (void *) core;
87 }
88
89 void
90 cpu_thread_init(void)
91 {
92 int my_cpu = get_cpu_number();
93 cpu_core_t *my_core;
94
95 /*
96 * If we're the boot processor we allocate the core structure here.
97 * Otherwise the core has already been allocated (by the boot cpu).
98 */
99 if (my_cpu == master_cpu)
100 cpu_to_core(master_cpu) = cpu_thread_alloc(master_cpu);
101
102 my_core = cpu_core();
103 if (my_core == NULL)
104 panic("cpu_thread_init() no core allocated for cpu %d", my_cpu);
105
106 atomic_incl((long *) &my_core->active_threads, 1);
107 atomic_incl((long *) &machine_info.logical_cpu, 1);
108 /* Note: cpus are started serially so this isn't as racey as it looks */
109 if (my_core->num_threads == 0)
110 atomic_incl((long *) &machine_info.physical_cpu, 1);
111 atomic_incl((long *) &my_core->num_threads, 1);
112 }
113
114 /*
115 * Called for a cpu to halt permanently
116 * (as opposed to halting and expecting an interrupt to awaken it).
117 */
118 void
119 cpu_thread_halt(void)
120 {
121 cpu_core_t *my_core = cpu_core();
122
123 atomic_decl((long *) &machine_info.logical_cpu, 1);
124 atomic_decl((long *) &my_core->active_threads, 1);
125 if (atomic_decl_and_test((long *) &my_core->num_threads, 1))
126 atomic_decl((long *) &machine_info.physical_cpu, 1);
127
128 cpu_halt();
129 }