]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/cpu_threads.c
xnu-792.tar.gz
[apple/xnu.git] / osfmk / i386 / cpu_threads.c
CommitLineData
91447636
A
1/*
2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22#include <vm/vm_kern.h>
23#include <mach/machine.h>
24#include <i386/cpu_threads.h>
25#include <i386/cpuid.h>
26#include <i386/machine_cpu.h>
27#include <i386/lock.h>
28
29/*
30 * Kernel parameter determining whether threads are halted unconditionally
31 * in the idle state. This is the default behavior.
32 * See machine_idle() for use.
33 */
34int idlehalt = 1;
35
36void
37cpu_thread_init(void)
38{
39 int my_cpu = get_cpu_number();
40 int my_core_base_cpu;
41 int ret;
42 cpu_core_t *my_core;
43
44 /* Have we initialized already for this cpu? */
45 if (cpu_core())
46 return;
47
48 if (cpuid_features() & CPUID_FEATURE_HTT) {
49 /*
50 * Get the cpu number of the base thread in the core.
51 */
52 my_core_base_cpu = cpu_to_core_cpu(my_cpu);
53 current_cpu_datap()->cpu_threadtype = CPU_THREADTYPE_INTEL_HTT;
54 } else {
55 my_core_base_cpu = my_cpu;
56 current_cpu_datap()->cpu_threadtype = CPU_THREADTYPE_NONE;
57 }
58
59 /*
60 * Allocate the base cpu_core struct if none exists.
61 * Since we could be racing with other threads in the same core,
62 * this needs care without using locks. We allocate a new core
63 * structure and assign it atomically, freeing it if we lost the race.
64 */
65 my_core = (cpu_core_t *) cpu_to_core(my_core_base_cpu);
66 if (my_core == NULL) {
67 cpu_core_t *new_core;
68
69 ret = kmem_alloc(kernel_map,
70 (void *) &new_core, sizeof(cpu_core_t));
71 if (ret != KERN_SUCCESS)
72 panic("cpu_thread_init() kmem_alloc ret=%d\n", ret);
73 bzero((void *) new_core, sizeof(cpu_core_t));
74 new_core->base_cpu = my_core_base_cpu;
75 if (atomic_cmpxchg((uint32_t *) &cpu_to_core(my_core_base_cpu),
76 0, (uint32_t) new_core)) {
77 atomic_incl((long *) &machine_info.physical_cpu, 1);
78 atomic_incl((long *) &machine_info.physical_cpu_max, 1);
79 } else {
80 kmem_free(kernel_map,
81 (vm_offset_t)new_core, sizeof(cpu_core_t));
82 }
83 my_core = (cpu_core_t *) cpu_to_core(my_core_base_cpu);
84 }
85
86 cpu_to_core(my_cpu) = (struct cpu_core *) my_core;
87
88 atomic_incl((long *) &my_core->active_threads, 1);
89 atomic_incl((long *) &my_core->num_threads, 1);
90 atomic_incl((long *) &machine_info.logical_cpu, 1);
91 atomic_incl((long *) &machine_info.logical_cpu_max, 1);
92
93}
94
95/*
96 * Called for a cpu to halt permanently
97 * (as opposed to halting and expecting an interrupt to awaken it).
98 */
99void
100cpu_thread_halt(void)
101{
102 cpu_core_t *my_core = cpu_core();
103
104 /* Note: don't ever decrement the number of physical processors */
105 atomic_decl((long *) &my_core->active_threads, 1);
106 atomic_decl((long *) &my_core->num_threads, 1);
107 atomic_decl((long *) &machine_info.logical_cpu, 1);
108
109 cpu_halt();
110}