]>
Commit | Line | Data |
---|---|---|
91447636 | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 2003-2007 Apple Inc. All rights reserved. |
91447636 | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
91447636 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
91447636 A |
27 | */ |
28 | #include <vm/vm_kern.h> | |
2d21ac55 | 29 | #include <kern/kalloc.h> |
91447636 A |
30 | #include <mach/machine.h> |
31 | #include <i386/cpu_threads.h> | |
32 | #include <i386/cpuid.h> | |
33 | #include <i386/machine_cpu.h> | |
34 | #include <i386/lock.h> | |
0c530ab8 | 35 | #include <i386/perfmon.h> |
2d21ac55 A |
36 | #include <i386/pmCPU.h> |
37 | ||
38 | #define bitmask(h,l) ((bit(h)|(bit(h)-1)) & ~(bit(l)-1)) | |
39 | #define bitfield(x,h,l) (((x) & bitmask(h,l)) >> l) | |
91447636 A |
40 | |
41 | /* | |
42 | * Kernel parameter determining whether threads are halted unconditionally | |
43 | * in the idle state. This is the default behavior. | |
44 | * See machine_idle() for use. | |
45 | */ | |
46 | int idlehalt = 1; | |
47 | ||
2d21ac55 A |
48 | x86_pkg_t *x86_pkgs = NULL; |
49 | uint32_t num_packages = 0; | |
50 | uint32_t num_Lx_caches[MAX_CACHE_DEPTH] = { 0 }; | |
51 | ||
52 | static x86_pkg_t *free_pkgs = NULL; | |
53 | static x86_core_t *free_cores = NULL; | |
54 | ||
55 | static x86_cpu_cache_t *x86_caches = NULL; | |
56 | static uint32_t num_caches = 0; | |
57 | ||
58 | decl_simple_lock_data(, x86_topo_lock); | |
59 | ||
60 | static x86_cpu_cache_t * | |
61 | x86_cache_alloc(void) | |
62 | { | |
63 | x86_cpu_cache_t *cache; | |
64 | int i; | |
65 | ||
66 | if (x86_caches == NULL) { | |
67 | cache = kalloc(sizeof(x86_cpu_cache_t) + (MAX_CPUS * sizeof(x86_lcpu_t *))); | |
68 | if (cache == NULL) | |
69 | return(NULL); | |
70 | } else { | |
71 | cache = x86_caches; | |
72 | x86_caches = cache->next; | |
73 | cache->next = NULL; | |
74 | } | |
75 | ||
76 | bzero(cache, sizeof(x86_cpu_cache_t)); | |
77 | cache->next = NULL; | |
78 | cache->maxcpus = MAX_CPUS; | |
79 | for (i = 0; i < cache->maxcpus; i += 1) { | |
80 | cache->cpus[i] = NULL; | |
81 | } | |
82 | ||
83 | num_caches += 1; | |
84 | ||
85 | return(cache); | |
86 | } | |
87 | ||
88 | static void | |
89 | x86_cache_free(x86_cpu_cache_t *cache) | |
90 | { | |
91 | num_caches -= 1; | |
92 | if (cache->level > 0 && cache->level <= MAX_CACHE_DEPTH) | |
93 | num_Lx_caches[cache->level - 1] -= 1; | |
94 | cache->next = x86_caches; | |
95 | x86_caches = cache; | |
96 | } | |
97 | ||
98 | /* | |
99 | * This returns a list of cache structures that represent the | |
100 | * caches for a CPU. Some of the structures may have to be | |
101 | * "freed" if they are actually shared between CPUs. | |
102 | */ | |
103 | static x86_cpu_cache_t * | |
104 | x86_cache_list(void) | |
105 | { | |
106 | x86_cpu_cache_t *root = NULL; | |
107 | x86_cpu_cache_t *cur = NULL; | |
108 | x86_cpu_cache_t *last = NULL; | |
109 | uint32_t index; | |
110 | uint32_t cache_info[4]; | |
111 | uint32_t nsets; | |
112 | ||
113 | do_cpuid(0, cache_info); | |
114 | ||
115 | if (cache_info[eax] < 4) { | |
116 | /* | |
117 | * Processor does not support deterministic | |
118 | * cache information. Don't report anything | |
119 | */ | |
120 | return NULL; | |
121 | } | |
122 | ||
123 | for (index = 0; ; index += 1) { | |
124 | cache_info[eax] = 4; | |
125 | cache_info[ecx] = index; | |
126 | cache_info[ebx] = 0; | |
127 | cache_info[edx] = 0; | |
128 | ||
129 | cpuid(cache_info); | |
130 | ||
131 | /* | |
132 | * See if all levels have been queried. | |
133 | */ | |
134 | if (bitfield(cache_info[eax], 4, 0) == 0) | |
135 | break; | |
136 | ||
137 | cur = x86_cache_alloc(); | |
138 | if (cur == NULL) { | |
139 | break; | |
140 | } | |
141 | ||
142 | cur->type = bitfield(cache_info[eax], 4, 0); | |
143 | cur->level = bitfield(cache_info[eax], 7, 5); | |
144 | cur->nlcpus = bitfield(cache_info[eax], 25, 14) + 1; | |
145 | cur->line_size = bitfield(cache_info[ebx], 11, 0) + 1; | |
146 | cur->partitions = bitfield(cache_info[ebx], 21, 12) + 1; | |
147 | cur->ways = bitfield(cache_info[ebx], 31, 22) + 1; | |
148 | nsets = bitfield(cache_info[ecx], 31, 0) + 1; | |
149 | cur->cache_size = cur->line_size * cur->ways * cur->partitions * nsets; | |
150 | ||
151 | if (last == NULL) { | |
152 | root = cur; | |
153 | last = cur; | |
154 | } else { | |
155 | last->next = cur; | |
156 | last = cur; | |
157 | } | |
158 | ||
159 | num_Lx_caches[cur->level - 1] += 1; | |
160 | } | |
161 | ||
162 | return(root); | |
163 | } | |
0c530ab8 A |
164 | |
165 | static boolean_t | |
166 | cpu_is_hyperthreaded(void) | |
4452a7af | 167 | { |
2d21ac55 A |
168 | if (cpuid_features() & CPUID_FEATURE_HTT) |
169 | return (cpuid_info()->cpuid_logical_per_package / | |
170 | cpuid_info()->cpuid_cores_per_package) > 1; | |
171 | else | |
172 | return FALSE; | |
173 | } | |
174 | ||
175 | static void | |
176 | x86_lcpu_init(int cpu) | |
177 | { | |
178 | cpu_data_t *cpup; | |
179 | x86_lcpu_t *lcpu; | |
180 | int i; | |
181 | ||
182 | cpup = cpu_datap(cpu); | |
183 | ||
184 | lcpu = &cpup->lcpu; | |
185 | lcpu->lcpu = lcpu; | |
186 | lcpu->cpu = cpup; | |
187 | lcpu->next = NULL; | |
188 | lcpu->core = NULL; | |
189 | lcpu->lnum = cpu; | |
190 | lcpu->pnum = cpup->cpu_phys_number; | |
191 | lcpu->halted = FALSE; /* XXX is this correct? */ | |
192 | lcpu->idle = FALSE; /* XXX is this correct? */ | |
193 | for (i = 0; i < MAX_CACHE_DEPTH; i += 1) | |
194 | lcpu->caches[i] = NULL; | |
195 | ||
196 | lcpu->master = (lcpu->pnum == (unsigned int) master_cpu); | |
197 | lcpu->primary = (lcpu->pnum % cpuid_info()->cpuid_logical_per_package) == 0; | |
198 | } | |
199 | ||
200 | static x86_core_t * | |
201 | x86_core_alloc(int cpu) | |
202 | { | |
203 | x86_core_t *core; | |
204 | cpu_data_t *cpup; | |
205 | uint32_t cpu_in_pkg; | |
206 | uint32_t lcpus_per_core; | |
207 | ||
208 | cpup = cpu_datap(cpu); | |
209 | ||
210 | simple_lock(&x86_topo_lock); | |
211 | if (free_cores != NULL) { | |
212 | core = free_cores; | |
213 | free_cores = core->next; | |
214 | core->next = NULL; | |
215 | simple_unlock(&x86_topo_lock); | |
216 | } else { | |
217 | simple_unlock(&x86_topo_lock); | |
218 | core = kalloc(sizeof(x86_core_t)); | |
219 | if (core == NULL) | |
220 | panic("x86_core_alloc() kalloc of x86_core_t failed!\n"); | |
221 | } | |
222 | ||
223 | bzero((void *) core, sizeof(x86_core_t)); | |
224 | ||
225 | cpu_in_pkg = cpu % cpuid_info()->cpuid_logical_per_package; | |
226 | lcpus_per_core = cpuid_info()->cpuid_logical_per_package / | |
227 | cpuid_info()->cpuid_cores_per_package; | |
228 | ||
229 | core->pcore_num = cpup->cpu_phys_number / lcpus_per_core; | |
230 | core->lcore_num = core->pcore_num % cpuid_info()->cpuid_cores_per_package; | |
231 | ||
232 | core->flags = X86CORE_FL_PRESENT | X86CORE_FL_READY; | |
233 | ||
234 | return(core); | |
235 | } | |
236 | ||
237 | static void | |
238 | x86_core_free(x86_core_t *core) | |
239 | { | |
240 | simple_lock(&x86_topo_lock); | |
241 | core->next = free_cores; | |
242 | free_cores = core; | |
243 | simple_unlock(&x86_topo_lock); | |
244 | } | |
245 | ||
246 | static x86_pkg_t * | |
247 | x86_package_find(int cpu) | |
248 | { | |
249 | x86_pkg_t *pkg; | |
250 | cpu_data_t *cpup; | |
251 | uint32_t pkg_num; | |
252 | ||
253 | cpup = cpu_datap(cpu); | |
254 | ||
255 | pkg_num = cpup->cpu_phys_number / cpuid_info()->cpuid_logical_per_package; | |
256 | ||
257 | pkg = x86_pkgs; | |
258 | while (pkg != NULL) { | |
259 | if (pkg->ppkg_num == pkg_num) | |
260 | break; | |
261 | pkg = pkg->next; | |
262 | } | |
263 | ||
264 | return(pkg); | |
265 | } | |
266 | ||
267 | static x86_core_t * | |
268 | x86_core_find(int cpu) | |
269 | { | |
270 | x86_core_t *core; | |
271 | x86_pkg_t *pkg; | |
272 | cpu_data_t *cpup; | |
273 | uint32_t core_num; | |
274 | ||
275 | cpup = cpu_datap(cpu); | |
276 | ||
277 | core_num = cpup->cpu_phys_number | |
278 | / (cpuid_info()->cpuid_logical_per_package | |
279 | / cpuid_info()->cpuid_cores_per_package); | |
280 | ||
281 | pkg = x86_package_find(cpu); | |
282 | if (pkg == NULL) | |
283 | return(NULL); | |
284 | ||
285 | core = pkg->cores; | |
286 | while (core != NULL) { | |
287 | if (core->pcore_num == core_num) | |
288 | break; | |
289 | core = core->next; | |
290 | } | |
291 | ||
292 | return(core); | |
293 | } | |
294 | ||
295 | static void | |
296 | x86_core_add_lcpu(x86_core_t *core, x86_lcpu_t *lcpu) | |
297 | { | |
298 | x86_cpu_cache_t *list; | |
299 | x86_cpu_cache_t *cur; | |
300 | x86_core_t *cur_core; | |
301 | x86_lcpu_t *cur_lcpu; | |
302 | boolean_t found; | |
303 | int level; | |
304 | int i; | |
305 | uint32_t cpu_mask; | |
306 | ||
307 | assert(core != NULL); | |
308 | assert(lcpu != NULL); | |
309 | ||
310 | /* | |
311 | * Add the cache data to the topology. | |
312 | */ | |
313 | list = x86_cache_list(); | |
314 | ||
315 | simple_lock(&x86_topo_lock); | |
316 | ||
317 | while (list != NULL) { | |
318 | /* | |
319 | * Remove the cache from the front of the list. | |
320 | */ | |
321 | cur = list; | |
322 | list = cur->next; | |
323 | cur->next = NULL; | |
324 | level = cur->level - 1; | |
325 | ||
326 | /* | |
327 | * If the cache isn't shared then just put it where it | |
328 | * belongs. | |
329 | */ | |
330 | if (cur->nlcpus == 1) { | |
331 | goto found_first; | |
332 | } | |
333 | ||
334 | /* | |
335 | * We'll assume that all of the caches at a particular level | |
336 | * have the same sharing. So if we have a cache already at | |
337 | * this level, we'll just skip looking for the match. | |
338 | */ | |
339 | if (lcpu->caches[level] != NULL) { | |
340 | x86_cache_free(cur); | |
341 | continue; | |
342 | } | |
343 | ||
344 | /* | |
345 | * This is a shared cache, so we have to figure out if | |
346 | * this is the first time we've seen this cache. We do | |
347 | * this by searching through the package and seeing if | |
348 | * a related core is already describing this cache. | |
349 | * | |
350 | * NOTE: This assumes that CPUs whose ID mod <# sharing cache> | |
351 | * are indeed sharing the cache. | |
352 | */ | |
353 | cpu_mask = lcpu->pnum & ~(cur->nlcpus - 1); | |
354 | cur_core = core->package->cores; | |
355 | found = FALSE; | |
356 | ||
357 | while (cur_core != NULL && !found) { | |
358 | cur_lcpu = cur_core->lcpus; | |
359 | while (cur_lcpu != NULL && !found) { | |
360 | if ((cur_lcpu->pnum & ~(cur->nlcpus - 1)) == cpu_mask) { | |
361 | lcpu->caches[level] = cur_lcpu->caches[level]; | |
362 | found = TRUE; | |
363 | x86_cache_free(cur); | |
364 | ||
365 | /* | |
366 | * Put the new CPU into the list of the cache. | |
367 | */ | |
368 | cur = lcpu->caches[level]; | |
369 | for (i = 0; i < cur->nlcpus; i += 1) { | |
370 | if (cur->cpus[i] == NULL) { | |
371 | cur->cpus[i] = lcpu; | |
372 | break; | |
373 | } | |
374 | } | |
375 | } | |
376 | cur_lcpu = cur_lcpu->next; | |
377 | } | |
378 | ||
379 | cur_core = cur_core->next; | |
380 | } | |
381 | ||
382 | if (!found) { | |
383 | found_first: | |
384 | cur->next = lcpu->caches[level]; | |
385 | lcpu->caches[level] = cur; | |
386 | cur->cpus[0] = lcpu; | |
387 | } | |
388 | } | |
389 | ||
390 | /* | |
391 | * Add the Logical CPU to the core. | |
392 | */ | |
393 | lcpu->next = core->lcpus; | |
394 | lcpu->core = core; | |
395 | core->lcpus = lcpu; | |
396 | core->num_lcpus += 1; | |
397 | ||
398 | simple_unlock(&x86_topo_lock); | |
399 | } | |
400 | ||
401 | static x86_pkg_t * | |
402 | x86_package_alloc(int cpu) | |
403 | { | |
404 | x86_pkg_t *pkg; | |
405 | cpu_data_t *cpup; | |
406 | ||
407 | cpup = cpu_datap(cpu); | |
408 | ||
409 | simple_lock(&x86_topo_lock); | |
410 | if (free_pkgs != NULL) { | |
411 | pkg = free_pkgs; | |
412 | free_pkgs = pkg->next; | |
413 | pkg->next = NULL; | |
414 | simple_unlock(&x86_topo_lock); | |
415 | } else { | |
416 | simple_unlock(&x86_topo_lock); | |
417 | pkg = kalloc(sizeof(x86_pkg_t)); | |
418 | if (pkg == NULL) | |
419 | panic("x86_package_alloc() kalloc of x86_pkg_t failed!\n"); | |
420 | } | |
421 | ||
422 | bzero((void *) pkg, sizeof(x86_pkg_t)); | |
423 | ||
424 | pkg->ppkg_num = cpup->cpu_phys_number | |
425 | / cpuid_info()->cpuid_logical_per_package; | |
426 | ||
427 | pkg->lpkg_num = num_packages; | |
428 | atomic_incl((long *) &num_packages, 1); | |
429 | ||
430 | pkg->flags = X86PKG_FL_PRESENT | X86PKG_FL_READY; | |
431 | return(pkg); | |
432 | } | |
433 | ||
434 | static void | |
435 | x86_package_free(x86_pkg_t *pkg) | |
436 | { | |
437 | simple_lock(&x86_topo_lock); | |
438 | pkg->next = free_pkgs; | |
439 | free_pkgs = pkg; | |
440 | atomic_decl((long *) &num_packages, 1); | |
441 | simple_unlock(&x86_topo_lock); | |
442 | } | |
443 | ||
444 | static void | |
445 | x86_package_add_core(x86_pkg_t *pkg, x86_core_t *core) | |
446 | { | |
447 | assert(pkg != NULL); | |
448 | assert(core != NULL); | |
449 | ||
450 | core->next = pkg->cores; | |
451 | core->package = pkg; | |
452 | pkg->cores = core; | |
453 | pkg->num_cores += 1; | |
0c530ab8 | 454 | } |
21362eb3 | 455 | |
0c530ab8 A |
456 | void * |
457 | cpu_thread_alloc(int cpu) | |
458 | { | |
2d21ac55 A |
459 | x86_core_t *core; |
460 | x86_pkg_t *pkg; | |
461 | cpu_data_t *cpup; | |
462 | uint32_t phys_cpu; | |
6601e61a | 463 | |
2d21ac55 A |
464 | cpup = cpu_datap(cpu); |
465 | ||
466 | phys_cpu = cpup->cpu_phys_number; | |
467 | ||
468 | x86_lcpu_init(cpu); | |
469 | ||
470 | /* | |
471 | * Assume that all cpus have the same features. | |
472 | */ | |
473 | if (cpu_is_hyperthreaded()) { | |
474 | cpup->cpu_threadtype = CPU_THREADTYPE_INTEL_HTT; | |
475 | } else { | |
476 | cpup->cpu_threadtype = CPU_THREADTYPE_NONE; | |
477 | } | |
478 | ||
479 | /* | |
480 | * Only allow one to manipulate the topology at a time. | |
481 | */ | |
482 | simple_lock(&x86_topo_lock); | |
483 | ||
484 | /* | |
485 | * Get the core for this logical CPU. | |
486 | */ | |
487 | core_again: | |
488 | core = x86_core_find(cpu); | |
489 | if (core == NULL) { | |
0c530ab8 | 490 | /* |
2d21ac55 A |
491 | * Core structure hasn't been created yet, do it now. |
492 | * | |
493 | * Get the package that the core is part of. | |
0c530ab8 | 494 | */ |
2d21ac55 A |
495 | package_again: |
496 | pkg = x86_package_find(cpu); | |
497 | if (pkg == NULL) { | |
498 | /* | |
499 | * Package structure hasn't been created yet, do it now. | |
500 | */ | |
501 | simple_unlock(&x86_topo_lock); | |
502 | pkg = x86_package_alloc(cpu); | |
503 | simple_lock(&x86_topo_lock); | |
504 | if (x86_package_find(cpu) != NULL) { | |
505 | x86_package_free(pkg); | |
506 | goto package_again; | |
507 | } | |
508 | ||
509 | /* | |
510 | * Add the new package to the global list of packages. | |
511 | */ | |
512 | pkg->next = x86_pkgs; | |
513 | x86_pkgs = pkg; | |
91447636 A |
514 | } |
515 | ||
2d21ac55 A |
516 | /* |
517 | * Allocate the core structure now. | |
518 | */ | |
519 | simple_unlock(&x86_topo_lock); | |
520 | core = x86_core_alloc(cpu); | |
521 | simple_lock(&x86_topo_lock); | |
522 | if (x86_core_find(cpu) != NULL) { | |
523 | x86_core_free(core); | |
524 | goto core_again; | |
525 | } | |
0c530ab8 | 526 | |
2d21ac55 A |
527 | /* |
528 | * Add it to the package. | |
529 | */ | |
530 | x86_package_add_core(pkg, core); | |
531 | machine_info.physical_cpu_max += 1; | |
0c530ab8 | 532 | |
2d21ac55 A |
533 | /* |
534 | * Allocate performance counter structure. | |
535 | */ | |
536 | simple_unlock(&x86_topo_lock); | |
537 | core->pmc = pmc_alloc(); | |
538 | simple_lock(&x86_topo_lock); | |
539 | } | |
540 | ||
541 | /* | |
542 | * Done manipulating the topology, so others can get in. | |
543 | */ | |
544 | machine_info.logical_cpu_max += 1; | |
545 | simple_unlock(&x86_topo_lock); | |
0c530ab8 | 546 | |
2d21ac55 | 547 | x86_core_add_lcpu(core, &cpup->lcpu); |
4452a7af | 548 | |
2d21ac55 | 549 | return (void *) core; |
0c530ab8 A |
550 | } |
551 | ||
552 | void | |
553 | cpu_thread_init(void) | |
554 | { | |
2d21ac55 A |
555 | int my_cpu = get_cpu_number(); |
556 | cpu_data_t *cpup = current_cpu_datap(); | |
557 | x86_core_t *core; | |
558 | static int initialized = 0; | |
559 | ||
560 | /* | |
561 | * If we're the boot processor, we do all of the initialization of | |
562 | * the CPU topology infrastructure. | |
563 | */ | |
564 | if (my_cpu == master_cpu && !initialized) { | |
565 | simple_lock_init(&x86_topo_lock, 0); | |
0c530ab8 A |
566 | |
567 | /* | |
2d21ac55 | 568 | * Put this logical CPU into the physical CPU topology. |
0c530ab8 | 569 | */ |
2d21ac55 A |
570 | cpup->lcpu.core = cpu_thread_alloc(my_cpu); |
571 | ||
572 | initialized = 1; | |
573 | } | |
0c530ab8 | 574 | |
2d21ac55 A |
575 | /* |
576 | * Do the CPU accounting. | |
577 | */ | |
578 | core = cpup->lcpu.core; | |
579 | simple_lock(&x86_topo_lock); | |
580 | machine_info.logical_cpu += 1; | |
581 | if (core->active_lcpus == 0) | |
582 | machine_info.physical_cpu += 1; | |
583 | core->active_lcpus += 1; | |
584 | cpup->lcpu.halted = FALSE; | |
585 | cpup->lcpu.idle = FALSE; | |
586 | simple_unlock(&x86_topo_lock); | |
91447636 | 587 | |
2d21ac55 A |
588 | pmCPUMarkRunning(cpup); |
589 | etimer_resync_deadlines(); | |
91447636 A |
590 | } |
591 | ||
592 | /* | |
593 | * Called for a cpu to halt permanently | |
594 | * (as opposed to halting and expecting an interrupt to awaken it). | |
595 | */ | |
596 | void | |
597 | cpu_thread_halt(void) | |
598 | { | |
2d21ac55 A |
599 | x86_core_t *core; |
600 | cpu_data_t *cpup = current_cpu_datap(); | |
91447636 | 601 | |
2d21ac55 A |
602 | simple_lock(&x86_topo_lock); |
603 | machine_info.logical_cpu -= 1; | |
604 | cpup->lcpu.idle = TRUE; | |
605 | core = cpup->lcpu.core; | |
606 | core->active_lcpus -= 1; | |
607 | if (core->active_lcpus == 0) | |
608 | machine_info.physical_cpu -= 1; | |
609 | simple_unlock(&x86_topo_lock); | |
91447636 | 610 | |
2d21ac55 A |
611 | /* |
612 | * Let the power management code determine the best way to "stop" | |
613 | * the processor. | |
614 | */ | |
615 | ml_set_interrupts_enabled(FALSE); | |
616 | while (1) { | |
617 | pmCPUHalt(PM_HALT_NORMAL); | |
618 | } | |
619 | /* NOT REACHED */ | |
91447636 | 620 | } |