]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
c910b4d9 | 2 | * Copyright (c) 2000-2008 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | * | |
31 | */ | |
32 | ||
33 | #ifndef I386_CPU_DATA | |
34 | #define I386_CPU_DATA | |
35 | ||
1c79356b A |
36 | #include <mach_assert.h> |
37 | ||
1c79356b A |
38 | #include <kern/assert.h> |
39 | #include <kern/kern_types.h> | |
b0d623f7 | 40 | #include <kern/queue.h> |
91447636 | 41 | #include <kern/processor.h> |
0c530ab8 | 42 | #include <kern/pms.h> |
55e303ae | 43 | #include <pexpert/pexpert.h> |
0c530ab8 | 44 | #include <mach/i386/thread_status.h> |
b0d623f7 | 45 | #include <mach/i386/vm_param.h> |
6d2010ae | 46 | #include <i386/rtclock_protos.h> |
0c530ab8 | 47 | #include <i386/pmCPU.h> |
2d21ac55 A |
48 | #include <i386/cpu_topology.h> |
49 | ||
b0d623f7 | 50 | #if CONFIG_VMX |
2d21ac55 | 51 | #include <i386/vmx/vmx_cpu.h> |
b0d623f7 | 52 | #endif |
91447636 | 53 | |
6d2010ae A |
54 | #include <machine/pal_routines.h> |
55 | ||
91447636 A |
56 | /* |
57 | * Data structures referenced (anonymously) from per-cpu data: | |
58 | */ | |
91447636 | 59 | struct cpu_cons_buffer; |
0c530ab8 | 60 | struct cpu_desc_table; |
2d21ac55 | 61 | struct mca_state; |
91447636 | 62 | |
91447636 A |
63 | /* |
64 | * Data structures embedded in per-cpu data: | |
65 | */ | |
66 | typedef struct rtclock_timer { | |
6d2010ae | 67 | mpqueue_head_t queue; |
c910b4d9 | 68 | uint64_t deadline; |
6d2010ae | 69 | uint64_t when_set; |
c910b4d9 | 70 | boolean_t has_expired; |
91447636 A |
71 | } rtclock_timer_t; |
72 | ||
2d21ac55 | 73 | |
b0d623f7 A |
74 | #if defined(__i386__) |
75 | ||
91447636 A |
76 | typedef struct { |
77 | struct i386_tss *cdi_ktss; | |
78 | #if MACH_KDB | |
79 | struct i386_tss *cdi_dbtss; | |
80 | #endif /* MACH_KDB */ | |
b0d623f7 A |
81 | struct __attribute__((packed)) { |
82 | uint16_t size; | |
83 | struct fake_descriptor *ptr; | |
84 | } cdi_gdt, cdi_idt; | |
85 | struct fake_descriptor *cdi_ldt; | |
86 | vm_offset_t cdi_sstk; | |
91447636 A |
87 | } cpu_desc_index_t; |
88 | ||
0c530ab8 A |
89 | typedef enum { |
90 | TASK_MAP_32BIT, /* 32-bit, compatibility mode */ | |
91 | TASK_MAP_64BIT, /* 64-bit, separate address space */ | |
92 | TASK_MAP_64BIT_SHARED /* 64-bit, kernel-shared addr space */ | |
93 | } task_map_t; | |
94 | ||
b0d623f7 A |
95 | #elif defined(__x86_64__) |
96 | ||
97 | ||
98 | typedef struct { | |
99 | struct x86_64_tss *cdi_ktss; | |
100 | #if MACH_KDB | |
101 | struct x86_64_tss *cdi_dbtss; | |
102 | #endif /* MACH_KDB */ | |
103 | struct __attribute__((packed)) { | |
104 | uint16_t size; | |
105 | void *ptr; | |
106 | } cdi_gdt, cdi_idt; | |
107 | struct fake_descriptor *cdi_ldt; | |
108 | vm_offset_t cdi_sstk; | |
109 | } cpu_desc_index_t; | |
110 | ||
111 | typedef enum { | |
112 | TASK_MAP_32BIT, /* 32-bit user, compatibility mode */ | |
113 | TASK_MAP_64BIT, /* 64-bit user thread, shared space */ | |
114 | } task_map_t; | |
115 | ||
116 | #else | |
117 | #error Unsupported architecture | |
118 | #endif | |
119 | ||
0c530ab8 A |
120 | /* |
121 | * This structure is used on entry into the (uber-)kernel on syscall from | |
122 | * a 64-bit user. It contains the address of the machine state save area | |
123 | * for the current thread and a temporary place to save the user's rsp | |
124 | * before loading this address into rsp. | |
125 | */ | |
126 | typedef struct { | |
127 | addr64_t cu_isf; /* thread->pcb->iss.isf */ | |
128 | uint64_t cu_tmp; /* temporary scratch */ | |
6d2010ae | 129 | addr64_t cu_user_gs_base; |
0c530ab8 | 130 | } cpu_uber_t; |
91447636 | 131 | |
6d2010ae A |
132 | typedef uint16_t pcid_t; |
133 | typedef uint8_t pcid_ref_t; | |
91447636 A |
134 | /* |
135 | * Per-cpu data. | |
136 | * | |
137 | * Each processor has a per-cpu data area which is dereferenced through the | |
138 | * current_cpu_datap() macro. For speed, the %gs segment is based here, and | |
139 | * using this, inlines provides single-instruction access to frequently used | |
140 | * members - such as get_cpu_number()/cpu_number(), and get_active_thread()/ | |
141 | * current_thread(). | |
142 | * | |
143 | * Cpu data owned by another processor can be accessed using the | |
144 | * cpu_datap(cpu_number) macro which uses the cpu_data_ptr[] array of per-cpu | |
145 | * pointers. | |
146 | */ | |
147 | typedef struct cpu_data | |
148 | { | |
6d2010ae A |
149 | struct pal_cpu_data cpu_pal_data; /* PAL-specific data */ |
150 | #define cpu_pd cpu_pal_data /* convenience alias */ | |
91447636 A |
151 | struct cpu_data *cpu_this; /* pointer to myself */ |
152 | thread_t cpu_active_thread; | |
6d2010ae A |
153 | int cpu_preemption_level; |
154 | int cpu_number; /* Logical CPU */ | |
0c530ab8 A |
155 | void *cpu_int_state; /* interrupt state */ |
156 | vm_offset_t cpu_active_stack; /* kernel stack base */ | |
157 | vm_offset_t cpu_kernel_stack; /* kernel stack top */ | |
91447636 | 158 | vm_offset_t cpu_int_stack_top; |
91447636 | 159 | int cpu_interrupt_level; |
91447636 A |
160 | int cpu_phys_number; /* Physical CPU */ |
161 | cpu_id_t cpu_id; /* Platform Expert */ | |
162 | int cpu_signals; /* IPI events */ | |
060df5ea A |
163 | int cpu_prior_signals; /* Last set of events, |
164 | * debugging | |
165 | */ | |
91447636 A |
166 | int cpu_mcount_off; /* mcount recursion */ |
167 | ast_t cpu_pending_ast; | |
168 | int cpu_type; | |
169 | int cpu_subtype; | |
170 | int cpu_threadtype; | |
171 | int cpu_running; | |
0c530ab8 | 172 | rtclock_timer_t rtclock_timer; |
2d21ac55 | 173 | boolean_t cpu_is64bit; |
6d2010ae A |
174 | volatile addr64_t cpu_active_cr3 __attribute((aligned(64))); |
175 | union { | |
176 | volatile uint32_t cpu_tlb_invalid; | |
177 | struct { | |
178 | volatile uint16_t cpu_tlb_invalid_local; | |
179 | volatile uint16_t cpu_tlb_invalid_global; | |
180 | }; | |
181 | }; | |
182 | volatile task_map_t cpu_task_map; | |
b0d623f7 | 183 | volatile addr64_t cpu_task_cr3; |
2d21ac55 | 184 | addr64_t cpu_kernel_cr3; |
0c530ab8 A |
185 | cpu_uber_t cpu_uber; |
186 | void *cpu_chud; | |
6601e61a | 187 | void *cpu_console_buf; |
2d21ac55 | 188 | struct x86_lcpu lcpu; |
91447636 | 189 | struct processor *cpu_processor; |
b0d623f7 | 190 | #if NCOPY_WINDOWS > 0 |
91447636 | 191 | struct cpu_pmap *cpu_pmap; |
b0d623f7 | 192 | #endif |
0c530ab8 A |
193 | struct cpu_desc_table *cpu_desc_tablep; |
194 | struct fake_descriptor *cpu_ldtp; | |
91447636 | 195 | cpu_desc_index_t cpu_desc_index; |
0c530ab8 | 196 | int cpu_ldt; |
91447636 A |
197 | #ifdef MACH_KDB |
198 | /* XXX Untested: */ | |
199 | int cpu_db_pass_thru; | |
2d21ac55 A |
200 | vm_offset_t cpu_db_stacks; |
201 | void *cpu_kdb_saved_state; | |
202 | spl_t cpu_kdb_saved_ipl; | |
91447636 A |
203 | int cpu_kdb_is_slave; |
204 | int cpu_kdb_active; | |
205 | #endif /* MACH_KDB */ | |
0c530ab8 A |
206 | boolean_t cpu_iflag; |
207 | boolean_t cpu_boot_complete; | |
208 | int cpu_hibernate; | |
b0d623f7 | 209 | #if NCOPY_WINDOWS > 0 |
2d21ac55 A |
210 | vm_offset_t cpu_copywindow_base; |
211 | uint64_t *cpu_copywindow_pdp; | |
212 | ||
213 | vm_offset_t cpu_physwindow_base; | |
214 | uint64_t *cpu_physwindow_ptep; | |
b0d623f7 | 215 | #endif |
6d2010ae | 216 | void *cpu_hi_iss; |
b0d623f7 | 217 | |
6d2010ae A |
218 | #define HWINTCNT_SIZE 256 |
219 | uint32_t cpu_hwIntCnt[HWINTCNT_SIZE]; /* Interrupt counts */ | |
0c530ab8 | 220 | uint64_t cpu_dr7; /* debug control register */ |
2d21ac55 | 221 | uint64_t cpu_int_event_time; /* intr entry/exit time */ |
b0d623f7 | 222 | #if CONFIG_VMX |
2d21ac55 | 223 | vmx_cpu_t cpu_vmx; /* wonderful world of virtualization */ |
b0d623f7 A |
224 | #endif |
225 | #if CONFIG_MCA | |
2d21ac55 | 226 | struct mca_state *cpu_mca_state; /* State at MC fault */ |
b0d623f7 | 227 | #endif |
2d21ac55 A |
228 | uint64_t cpu_uber_arg_store; /* Double mapped address |
229 | * of current thread's | |
230 | * uu_arg array. | |
231 | */ | |
232 | uint64_t cpu_uber_arg_store_valid; /* Double mapped | |
233 | * address of pcb | |
234 | * arg store | |
235 | * validity flag. | |
236 | */ | |
6d2010ae | 237 | pal_rtc_nanotime_t *cpu_nanotime; /* Nanotime info */ |
b0d623f7 A |
238 | thread_t csw_old_thread; |
239 | thread_t csw_new_thread; | |
6d2010ae A |
240 | #if defined(__x86_64__) |
241 | uint32_t cpu_pmap_pcid_enabled; | |
242 | pcid_t cpu_active_pcid; | |
243 | pcid_t cpu_last_pcid; | |
244 | volatile pcid_ref_t *cpu_pmap_pcid_coherentp; | |
245 | volatile pcid_ref_t *cpu_pmap_pcid_coherentp_kernel; | |
246 | #define PMAP_PCID_MAX_PCID (0x1000) | |
247 | pcid_t cpu_pcid_free_hint; | |
248 | pcid_ref_t cpu_pcid_refcounts[PMAP_PCID_MAX_PCID]; | |
249 | pmap_t cpu_pcid_last_pmap_dispatched[PMAP_PCID_MAX_PCID]; | |
250 | #ifdef PCID_STATS | |
251 | uint64_t cpu_pmap_pcid_flushes; | |
252 | uint64_t cpu_pmap_pcid_preserves; | |
253 | #endif | |
254 | #endif /* x86_64 */ | |
255 | uint64_t cpu_max_observed_int_latency; | |
256 | int cpu_max_observed_int_latency_vector; | |
060df5ea A |
257 | uint64_t debugger_entry_time; |
258 | volatile boolean_t cpu_NMI_acknowledged; | |
259 | /* A separate nested interrupt stack flag, to account | |
260 | * for non-nested interrupts arriving while on the interrupt stack | |
261 | * Currently only occurs when AICPM enables interrupts on the | |
262 | * interrupt stack during processor offlining. | |
263 | */ | |
264 | uint32_t cpu_nested_istack; | |
265 | uint32_t cpu_nested_istack_events; | |
6d2010ae A |
266 | x86_saved_state64_t *cpu_fatal_trap_state; |
267 | x86_saved_state64_t *cpu_post_fatal_trap_state; | |
55e303ae | 268 | } cpu_data_t; |
1c79356b | 269 | |
91447636 A |
270 | extern cpu_data_t *cpu_data_ptr[]; |
271 | extern cpu_data_t cpu_data_master; | |
9bccf70c | 272 | |
55e303ae | 273 | /* Macro to generate inline bodies to retrieve per-cpu data fields. */ |
2d21ac55 | 274 | #ifndef offsetof |
55e303ae | 275 | #define offsetof(TYPE,MEMBER) ((size_t) &((TYPE *)0)->MEMBER) |
2d21ac55 | 276 | #endif /* offsetof */ |
91447636 | 277 | #define CPU_DATA_GET(member,type) \ |
55e303ae | 278 | type ret; \ |
b0d623f7 | 279 | __asm__ volatile ("mov %%gs:%P1,%0" \ |
55e303ae | 280 | : "=r" (ret) \ |
91447636 | 281 | : "i" (offsetof(cpu_data_t,member))); \ |
55e303ae | 282 | return ret; |
9bccf70c | 283 | |
6d2010ae A |
284 | #define CPU_DATA_GET_INDEX(member,index,type) \ |
285 | type ret; \ | |
286 | __asm__ volatile ("mov %%gs:(%1),%0" \ | |
287 | : "=r" (ret) \ | |
288 | : "r" (offsetof(cpu_data_t,member[index]))); \ | |
289 | return ret; | |
290 | ||
291 | #define CPU_DATA_SET(member,value) \ | |
292 | __asm__ volatile ("mov %0,%%gs:%P1" \ | |
293 | : \ | |
294 | : "r" (value), "i" (offsetof(cpu_data_t,member))); | |
295 | #define CPU_DATA_XCHG(member,value,type) \ | |
296 | type ret; \ | |
297 | __asm__ volatile ("xchg %0,%%gs:%P1" \ | |
298 | : "=r" (ret) \ | |
299 | : "i" (offsetof(cpu_data_t,member)), "0" (value)); \ | |
300 | return ret; | |
301 | ||
1c79356b A |
302 | /* |
303 | * Everyone within the osfmk part of the kernel can use the fast | |
304 | * inline versions of these routines. Everyone outside, must call | |
305 | * the real thing, | |
306 | */ | |
91447636 A |
307 | static inline thread_t |
308 | get_active_thread(void) | |
1c79356b | 309 | { |
91447636 | 310 | CPU_DATA_GET(cpu_active_thread,thread_t) |
1c79356b | 311 | } |
91447636 A |
312 | #define current_thread_fast() get_active_thread() |
313 | #define current_thread() current_thread_fast() | |
1c79356b | 314 | |
0c530ab8 A |
315 | static inline boolean_t |
316 | get_is64bit(void) | |
317 | { | |
318 | CPU_DATA_GET(cpu_is64bit, boolean_t) | |
319 | } | |
6d2010ae | 320 | #if CONFIG_YONAH |
0c530ab8 | 321 | #define cpu_mode_is64bit() get_is64bit() |
6d2010ae | 322 | #else |
b0d623f7 A |
323 | #define cpu_mode_is64bit() TRUE |
324 | #endif | |
0c530ab8 | 325 | |
91447636 A |
326 | static inline int |
327 | get_preemption_level(void) | |
1c79356b | 328 | { |
91447636 | 329 | CPU_DATA_GET(cpu_preemption_level,int) |
55e303ae | 330 | } |
91447636 | 331 | static inline int |
91447636 | 332 | get_interrupt_level(void) |
55e303ae | 333 | { |
91447636 | 334 | CPU_DATA_GET(cpu_interrupt_level,int) |
55e303ae | 335 | } |
91447636 A |
336 | static inline int |
337 | get_cpu_number(void) | |
55e303ae A |
338 | { |
339 | CPU_DATA_GET(cpu_number,int) | |
340 | } | |
91447636 A |
341 | static inline int |
342 | get_cpu_phys_number(void) | |
55e303ae A |
343 | { |
344 | CPU_DATA_GET(cpu_phys_number,int) | |
1c79356b | 345 | } |
1c79356b | 346 | |
b0d623f7 | 347 | |
91447636 A |
348 | static inline void |
349 | disable_preemption(void) | |
1c79356b | 350 | { |
91447636 A |
351 | __asm__ volatile ("incl %%gs:%P0" |
352 | : | |
353 | : "i" (offsetof(cpu_data_t, cpu_preemption_level))); | |
354 | } | |
1c79356b | 355 | |
91447636 A |
356 | static inline void |
357 | enable_preemption(void) | |
358 | { | |
55e303ae A |
359 | assert(get_preemption_level() > 0); |
360 | ||
91447636 A |
361 | __asm__ volatile ("decl %%gs:%P0 \n\t" |
362 | "jne 1f \n\t" | |
363 | "call _kernel_preempt_check \n\t" | |
364 | "1:" | |
1c79356b | 365 | : /* no outputs */ |
91447636 A |
366 | : "i" (offsetof(cpu_data_t, cpu_preemption_level)) |
367 | : "eax", "ecx", "edx", "cc", "memory"); | |
1c79356b A |
368 | } |
369 | ||
91447636 A |
370 | static inline void |
371 | enable_preemption_no_check(void) | |
1c79356b | 372 | { |
1c79356b | 373 | assert(get_preemption_level() > 0); |
1c79356b | 374 | |
91447636 | 375 | __asm__ volatile ("decl %%gs:%P0" |
1c79356b | 376 | : /* no outputs */ |
91447636 | 377 | : "i" (offsetof(cpu_data_t, cpu_preemption_level)) |
1c79356b | 378 | : "cc", "memory"); |
1c79356b A |
379 | } |
380 | ||
91447636 A |
381 | static inline void |
382 | mp_disable_preemption(void) | |
1c79356b | 383 | { |
1c79356b | 384 | disable_preemption(); |
1c79356b A |
385 | } |
386 | ||
91447636 A |
387 | static inline void |
388 | mp_enable_preemption(void) | |
1c79356b | 389 | { |
1c79356b | 390 | enable_preemption(); |
1c79356b A |
391 | } |
392 | ||
91447636 A |
393 | static inline void |
394 | mp_enable_preemption_no_check(void) | |
1c79356b | 395 | { |
1c79356b | 396 | enable_preemption_no_check(); |
1c79356b A |
397 | } |
398 | ||
91447636 A |
399 | static inline cpu_data_t * |
400 | current_cpu_datap(void) | |
401 | { | |
402 | CPU_DATA_GET(cpu_this, cpu_data_t *); | |
403 | } | |
404 | ||
405 | static inline cpu_data_t * | |
406 | cpu_datap(int cpu) | |
407 | { | |
91447636 A |
408 | return cpu_data_ptr[cpu]; |
409 | } | |
410 | ||
411 | extern cpu_data_t *cpu_data_alloc(boolean_t is_boot_cpu); | |
1c79356b | 412 | |
1c79356b | 413 | #endif /* I386_CPU_DATA */ |