]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
c910b4d9 | 2 | * Copyright (c) 2000-2008 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | * | |
31 | */ | |
32 | ||
33 | #ifndef I386_CPU_DATA | |
34 | #define I386_CPU_DATA | |
35 | ||
1c79356b A |
36 | #include <mach_assert.h> |
37 | ||
1c79356b A |
38 | #include <kern/assert.h> |
39 | #include <kern/kern_types.h> | |
b0d623f7 | 40 | #include <kern/queue.h> |
91447636 | 41 | #include <kern/processor.h> |
0c530ab8 | 42 | #include <kern/pms.h> |
55e303ae | 43 | #include <pexpert/pexpert.h> |
0c530ab8 | 44 | #include <mach/i386/thread_status.h> |
b0d623f7 | 45 | #include <mach/i386/vm_param.h> |
6d2010ae | 46 | #include <i386/rtclock_protos.h> |
0c530ab8 | 47 | #include <i386/pmCPU.h> |
2d21ac55 A |
48 | #include <i386/cpu_topology.h> |
49 | ||
b0d623f7 | 50 | #if CONFIG_VMX |
2d21ac55 | 51 | #include <i386/vmx/vmx_cpu.h> |
b0d623f7 | 52 | #endif |
91447636 | 53 | |
6d2010ae A |
54 | #include <machine/pal_routines.h> |
55 | ||
91447636 A |
56 | /* |
57 | * Data structures referenced (anonymously) from per-cpu data: | |
58 | */ | |
91447636 | 59 | struct cpu_cons_buffer; |
0c530ab8 | 60 | struct cpu_desc_table; |
2d21ac55 | 61 | struct mca_state; |
91447636 | 62 | |
91447636 A |
63 | /* |
64 | * Data structures embedded in per-cpu data: | |
65 | */ | |
66 | typedef struct rtclock_timer { | |
6d2010ae | 67 | mpqueue_head_t queue; |
c910b4d9 | 68 | uint64_t deadline; |
6d2010ae | 69 | uint64_t when_set; |
c910b4d9 | 70 | boolean_t has_expired; |
91447636 A |
71 | } rtclock_timer_t; |
72 | ||
2d21ac55 | 73 | |
b0d623f7 A |
74 | #if defined(__i386__) |
75 | ||
91447636 A |
76 | typedef struct { |
77 | struct i386_tss *cdi_ktss; | |
b0d623f7 A |
78 | struct __attribute__((packed)) { |
79 | uint16_t size; | |
80 | struct fake_descriptor *ptr; | |
81 | } cdi_gdt, cdi_idt; | |
82 | struct fake_descriptor *cdi_ldt; | |
83 | vm_offset_t cdi_sstk; | |
91447636 A |
84 | } cpu_desc_index_t; |
85 | ||
0c530ab8 A |
86 | typedef enum { |
87 | TASK_MAP_32BIT, /* 32-bit, compatibility mode */ | |
88 | TASK_MAP_64BIT, /* 64-bit, separate address space */ | |
89 | TASK_MAP_64BIT_SHARED /* 64-bit, kernel-shared addr space */ | |
90 | } task_map_t; | |
91 | ||
b0d623f7 A |
92 | #elif defined(__x86_64__) |
93 | ||
94 | ||
95 | typedef struct { | |
96 | struct x86_64_tss *cdi_ktss; | |
b0d623f7 A |
97 | struct __attribute__((packed)) { |
98 | uint16_t size; | |
99 | void *ptr; | |
100 | } cdi_gdt, cdi_idt; | |
101 | struct fake_descriptor *cdi_ldt; | |
102 | vm_offset_t cdi_sstk; | |
103 | } cpu_desc_index_t; | |
104 | ||
105 | typedef enum { | |
106 | TASK_MAP_32BIT, /* 32-bit user, compatibility mode */ | |
107 | TASK_MAP_64BIT, /* 64-bit user thread, shared space */ | |
108 | } task_map_t; | |
109 | ||
110 | #else | |
111 | #error Unsupported architecture | |
112 | #endif | |
113 | ||
0c530ab8 A |
114 | /* |
115 | * This structure is used on entry into the (uber-)kernel on syscall from | |
116 | * a 64-bit user. It contains the address of the machine state save area | |
117 | * for the current thread and a temporary place to save the user's rsp | |
118 | * before loading this address into rsp. | |
119 | */ | |
120 | typedef struct { | |
121 | addr64_t cu_isf; /* thread->pcb->iss.isf */ | |
122 | uint64_t cu_tmp; /* temporary scratch */ | |
6d2010ae | 123 | addr64_t cu_user_gs_base; |
0c530ab8 | 124 | } cpu_uber_t; |
91447636 | 125 | |
6d2010ae A |
126 | typedef uint16_t pcid_t; |
127 | typedef uint8_t pcid_ref_t; | |
91447636 A |
128 | /* |
129 | * Per-cpu data. | |
130 | * | |
131 | * Each processor has a per-cpu data area which is dereferenced through the | |
132 | * current_cpu_datap() macro. For speed, the %gs segment is based here, and | |
133 | * using this, inlines provides single-instruction access to frequently used | |
134 | * members - such as get_cpu_number()/cpu_number(), and get_active_thread()/ | |
135 | * current_thread(). | |
136 | * | |
137 | * Cpu data owned by another processor can be accessed using the | |
138 | * cpu_datap(cpu_number) macro which uses the cpu_data_ptr[] array of per-cpu | |
139 | * pointers. | |
140 | */ | |
141 | typedef struct cpu_data | |
142 | { | |
6d2010ae A |
143 | struct pal_cpu_data cpu_pal_data; /* PAL-specific data */ |
144 | #define cpu_pd cpu_pal_data /* convenience alias */ | |
91447636 A |
145 | struct cpu_data *cpu_this; /* pointer to myself */ |
146 | thread_t cpu_active_thread; | |
6d2010ae A |
147 | int cpu_preemption_level; |
148 | int cpu_number; /* Logical CPU */ | |
0c530ab8 A |
149 | void *cpu_int_state; /* interrupt state */ |
150 | vm_offset_t cpu_active_stack; /* kernel stack base */ | |
151 | vm_offset_t cpu_kernel_stack; /* kernel stack top */ | |
91447636 | 152 | vm_offset_t cpu_int_stack_top; |
91447636 | 153 | int cpu_interrupt_level; |
91447636 A |
154 | int cpu_phys_number; /* Physical CPU */ |
155 | cpu_id_t cpu_id; /* Platform Expert */ | |
156 | int cpu_signals; /* IPI events */ | |
060df5ea A |
157 | int cpu_prior_signals; /* Last set of events, |
158 | * debugging | |
159 | */ | |
91447636 A |
160 | int cpu_mcount_off; /* mcount recursion */ |
161 | ast_t cpu_pending_ast; | |
162 | int cpu_type; | |
163 | int cpu_subtype; | |
164 | int cpu_threadtype; | |
165 | int cpu_running; | |
0c530ab8 | 166 | rtclock_timer_t rtclock_timer; |
2d21ac55 | 167 | boolean_t cpu_is64bit; |
6d2010ae A |
168 | volatile addr64_t cpu_active_cr3 __attribute((aligned(64))); |
169 | union { | |
170 | volatile uint32_t cpu_tlb_invalid; | |
171 | struct { | |
172 | volatile uint16_t cpu_tlb_invalid_local; | |
173 | volatile uint16_t cpu_tlb_invalid_global; | |
174 | }; | |
175 | }; | |
176 | volatile task_map_t cpu_task_map; | |
b0d623f7 | 177 | volatile addr64_t cpu_task_cr3; |
2d21ac55 | 178 | addr64_t cpu_kernel_cr3; |
0c530ab8 A |
179 | cpu_uber_t cpu_uber; |
180 | void *cpu_chud; | |
6601e61a | 181 | void *cpu_console_buf; |
2d21ac55 | 182 | struct x86_lcpu lcpu; |
91447636 | 183 | struct processor *cpu_processor; |
b0d623f7 | 184 | #if NCOPY_WINDOWS > 0 |
91447636 | 185 | struct cpu_pmap *cpu_pmap; |
b0d623f7 | 186 | #endif |
0c530ab8 A |
187 | struct cpu_desc_table *cpu_desc_tablep; |
188 | struct fake_descriptor *cpu_ldtp; | |
91447636 | 189 | cpu_desc_index_t cpu_desc_index; |
0c530ab8 | 190 | int cpu_ldt; |
0c530ab8 A |
191 | boolean_t cpu_iflag; |
192 | boolean_t cpu_boot_complete; | |
193 | int cpu_hibernate; | |
b0d623f7 | 194 | #if NCOPY_WINDOWS > 0 |
2d21ac55 A |
195 | vm_offset_t cpu_copywindow_base; |
196 | uint64_t *cpu_copywindow_pdp; | |
197 | ||
198 | vm_offset_t cpu_physwindow_base; | |
199 | uint64_t *cpu_physwindow_ptep; | |
b0d623f7 | 200 | #endif |
6d2010ae | 201 | void *cpu_hi_iss; |
b0d623f7 | 202 | |
6d2010ae A |
203 | #define HWINTCNT_SIZE 256 |
204 | uint32_t cpu_hwIntCnt[HWINTCNT_SIZE]; /* Interrupt counts */ | |
0c530ab8 | 205 | uint64_t cpu_dr7; /* debug control register */ |
2d21ac55 | 206 | uint64_t cpu_int_event_time; /* intr entry/exit time */ |
b0d623f7 | 207 | #if CONFIG_VMX |
2d21ac55 | 208 | vmx_cpu_t cpu_vmx; /* wonderful world of virtualization */ |
b0d623f7 A |
209 | #endif |
210 | #if CONFIG_MCA | |
2d21ac55 | 211 | struct mca_state *cpu_mca_state; /* State at MC fault */ |
b0d623f7 | 212 | #endif |
2d21ac55 A |
213 | uint64_t cpu_uber_arg_store; /* Double mapped address |
214 | * of current thread's | |
215 | * uu_arg array. | |
216 | */ | |
217 | uint64_t cpu_uber_arg_store_valid; /* Double mapped | |
218 | * address of pcb | |
219 | * arg store | |
220 | * validity flag. | |
221 | */ | |
6d2010ae | 222 | pal_rtc_nanotime_t *cpu_nanotime; /* Nanotime info */ |
316670eb | 223 | #if CONFIG_COUNTERS |
b0d623f7 A |
224 | thread_t csw_old_thread; |
225 | thread_t csw_new_thread; | |
316670eb | 226 | #endif /* CONFIG COUNTERS */ |
6d2010ae A |
227 | #if defined(__x86_64__) |
228 | uint32_t cpu_pmap_pcid_enabled; | |
229 | pcid_t cpu_active_pcid; | |
230 | pcid_t cpu_last_pcid; | |
231 | volatile pcid_ref_t *cpu_pmap_pcid_coherentp; | |
232 | volatile pcid_ref_t *cpu_pmap_pcid_coherentp_kernel; | |
233 | #define PMAP_PCID_MAX_PCID (0x1000) | |
234 | pcid_t cpu_pcid_free_hint; | |
235 | pcid_ref_t cpu_pcid_refcounts[PMAP_PCID_MAX_PCID]; | |
236 | pmap_t cpu_pcid_last_pmap_dispatched[PMAP_PCID_MAX_PCID]; | |
237 | #ifdef PCID_STATS | |
238 | uint64_t cpu_pmap_pcid_flushes; | |
239 | uint64_t cpu_pmap_pcid_preserves; | |
240 | #endif | |
241 | #endif /* x86_64 */ | |
242 | uint64_t cpu_max_observed_int_latency; | |
243 | int cpu_max_observed_int_latency_vector; | |
060df5ea A |
244 | uint64_t debugger_entry_time; |
245 | volatile boolean_t cpu_NMI_acknowledged; | |
246 | /* A separate nested interrupt stack flag, to account | |
247 | * for non-nested interrupts arriving while on the interrupt stack | |
248 | * Currently only occurs when AICPM enables interrupts on the | |
249 | * interrupt stack during processor offlining. | |
250 | */ | |
251 | uint32_t cpu_nested_istack; | |
252 | uint32_t cpu_nested_istack_events; | |
6d2010ae A |
253 | x86_saved_state64_t *cpu_fatal_trap_state; |
254 | x86_saved_state64_t *cpu_post_fatal_trap_state; | |
55e303ae | 255 | } cpu_data_t; |
1c79356b | 256 | |
91447636 A |
257 | extern cpu_data_t *cpu_data_ptr[]; |
258 | extern cpu_data_t cpu_data_master; | |
9bccf70c | 259 | |
55e303ae | 260 | /* Macro to generate inline bodies to retrieve per-cpu data fields. */ |
2d21ac55 | 261 | #ifndef offsetof |
55e303ae | 262 | #define offsetof(TYPE,MEMBER) ((size_t) &((TYPE *)0)->MEMBER) |
2d21ac55 | 263 | #endif /* offsetof */ |
91447636 | 264 | #define CPU_DATA_GET(member,type) \ |
55e303ae | 265 | type ret; \ |
b0d623f7 | 266 | __asm__ volatile ("mov %%gs:%P1,%0" \ |
55e303ae | 267 | : "=r" (ret) \ |
91447636 | 268 | : "i" (offsetof(cpu_data_t,member))); \ |
55e303ae | 269 | return ret; |
9bccf70c | 270 | |
6d2010ae A |
271 | #define CPU_DATA_GET_INDEX(member,index,type) \ |
272 | type ret; \ | |
273 | __asm__ volatile ("mov %%gs:(%1),%0" \ | |
274 | : "=r" (ret) \ | |
275 | : "r" (offsetof(cpu_data_t,member[index]))); \ | |
276 | return ret; | |
277 | ||
278 | #define CPU_DATA_SET(member,value) \ | |
279 | __asm__ volatile ("mov %0,%%gs:%P1" \ | |
280 | : \ | |
281 | : "r" (value), "i" (offsetof(cpu_data_t,member))); | |
282 | #define CPU_DATA_XCHG(member,value,type) \ | |
283 | type ret; \ | |
284 | __asm__ volatile ("xchg %0,%%gs:%P1" \ | |
285 | : "=r" (ret) \ | |
286 | : "i" (offsetof(cpu_data_t,member)), "0" (value)); \ | |
287 | return ret; | |
288 | ||
1c79356b A |
289 | /* |
290 | * Everyone within the osfmk part of the kernel can use the fast | |
291 | * inline versions of these routines. Everyone outside, must call | |
292 | * the real thing, | |
293 | */ | |
91447636 A |
294 | static inline thread_t |
295 | get_active_thread(void) | |
1c79356b | 296 | { |
91447636 | 297 | CPU_DATA_GET(cpu_active_thread,thread_t) |
1c79356b | 298 | } |
91447636 A |
299 | #define current_thread_fast() get_active_thread() |
300 | #define current_thread() current_thread_fast() | |
1c79356b | 301 | |
0c530ab8 A |
302 | static inline boolean_t |
303 | get_is64bit(void) | |
304 | { | |
305 | CPU_DATA_GET(cpu_is64bit, boolean_t) | |
306 | } | |
6d2010ae | 307 | #if CONFIG_YONAH |
0c530ab8 | 308 | #define cpu_mode_is64bit() get_is64bit() |
6d2010ae | 309 | #else |
b0d623f7 A |
310 | #define cpu_mode_is64bit() TRUE |
311 | #endif | |
0c530ab8 | 312 | |
91447636 A |
313 | static inline int |
314 | get_preemption_level(void) | |
1c79356b | 315 | { |
91447636 | 316 | CPU_DATA_GET(cpu_preemption_level,int) |
55e303ae | 317 | } |
91447636 | 318 | static inline int |
91447636 | 319 | get_interrupt_level(void) |
55e303ae | 320 | { |
91447636 | 321 | CPU_DATA_GET(cpu_interrupt_level,int) |
55e303ae | 322 | } |
91447636 A |
323 | static inline int |
324 | get_cpu_number(void) | |
55e303ae A |
325 | { |
326 | CPU_DATA_GET(cpu_number,int) | |
327 | } | |
91447636 A |
328 | static inline int |
329 | get_cpu_phys_number(void) | |
55e303ae A |
330 | { |
331 | CPU_DATA_GET(cpu_phys_number,int) | |
1c79356b | 332 | } |
1c79356b | 333 | |
b0d623f7 | 334 | |
91447636 A |
335 | static inline void |
336 | disable_preemption(void) | |
1c79356b | 337 | { |
91447636 A |
338 | __asm__ volatile ("incl %%gs:%P0" |
339 | : | |
340 | : "i" (offsetof(cpu_data_t, cpu_preemption_level))); | |
341 | } | |
1c79356b | 342 | |
91447636 A |
343 | static inline void |
344 | enable_preemption(void) | |
345 | { | |
55e303ae A |
346 | assert(get_preemption_level() > 0); |
347 | ||
91447636 A |
348 | __asm__ volatile ("decl %%gs:%P0 \n\t" |
349 | "jne 1f \n\t" | |
350 | "call _kernel_preempt_check \n\t" | |
351 | "1:" | |
1c79356b | 352 | : /* no outputs */ |
91447636 A |
353 | : "i" (offsetof(cpu_data_t, cpu_preemption_level)) |
354 | : "eax", "ecx", "edx", "cc", "memory"); | |
1c79356b A |
355 | } |
356 | ||
91447636 A |
357 | static inline void |
358 | enable_preemption_no_check(void) | |
1c79356b | 359 | { |
1c79356b | 360 | assert(get_preemption_level() > 0); |
1c79356b | 361 | |
91447636 | 362 | __asm__ volatile ("decl %%gs:%P0" |
1c79356b | 363 | : /* no outputs */ |
91447636 | 364 | : "i" (offsetof(cpu_data_t, cpu_preemption_level)) |
1c79356b | 365 | : "cc", "memory"); |
1c79356b A |
366 | } |
367 | ||
91447636 A |
368 | static inline void |
369 | mp_disable_preemption(void) | |
1c79356b | 370 | { |
1c79356b | 371 | disable_preemption(); |
1c79356b A |
372 | } |
373 | ||
91447636 A |
374 | static inline void |
375 | mp_enable_preemption(void) | |
1c79356b | 376 | { |
1c79356b | 377 | enable_preemption(); |
1c79356b A |
378 | } |
379 | ||
91447636 A |
380 | static inline void |
381 | mp_enable_preemption_no_check(void) | |
1c79356b | 382 | { |
1c79356b | 383 | enable_preemption_no_check(); |
1c79356b A |
384 | } |
385 | ||
91447636 A |
386 | static inline cpu_data_t * |
387 | current_cpu_datap(void) | |
388 | { | |
389 | CPU_DATA_GET(cpu_this, cpu_data_t *); | |
390 | } | |
391 | ||
392 | static inline cpu_data_t * | |
393 | cpu_datap(int cpu) | |
394 | { | |
91447636 A |
395 | return cpu_data_ptr[cpu]; |
396 | } | |
397 | ||
398 | extern cpu_data_t *cpu_data_alloc(boolean_t is_boot_cpu); | |
316670eb | 399 | extern void cpu_data_realloc(void); |
1c79356b | 400 | |
1c79356b | 401 | #endif /* I386_CPU_DATA */ |