]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
39236c6e | 2 | * Copyright (c) 2000-2012 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | * | |
31 | */ | |
32 | ||
33 | #ifndef I386_CPU_DATA | |
34 | #define I386_CPU_DATA | |
35 | ||
1c79356b A |
36 | #include <mach_assert.h> |
37 | ||
1c79356b A |
38 | #include <kern/assert.h> |
39 | #include <kern/kern_types.h> | |
b0d623f7 | 40 | #include <kern/queue.h> |
91447636 | 41 | #include <kern/processor.h> |
0c530ab8 | 42 | #include <kern/pms.h> |
55e303ae | 43 | #include <pexpert/pexpert.h> |
0c530ab8 | 44 | #include <mach/i386/thread_status.h> |
b0d623f7 | 45 | #include <mach/i386/vm_param.h> |
fe8ab488 | 46 | #include <i386/locks.h> |
6d2010ae | 47 | #include <i386/rtclock_protos.h> |
0c530ab8 | 48 | #include <i386/pmCPU.h> |
2d21ac55 | 49 | #include <i386/cpu_topology.h> |
5ba3f43e | 50 | #include <i386/seg.h> |
2d21ac55 | 51 | |
b0d623f7 | 52 | #if CONFIG_VMX |
2d21ac55 | 53 | #include <i386/vmx/vmx_cpu.h> |
b0d623f7 | 54 | #endif |
91447636 | 55 | |
5ba3f43e A |
56 | #if MONOTONIC |
57 | #include <machine/monotonic.h> | |
58 | #endif /* MONOTONIC */ | |
59 | ||
6d2010ae A |
60 | #include <machine/pal_routines.h> |
61 | ||
91447636 A |
62 | /* |
63 | * Data structures referenced (anonymously) from per-cpu data: | |
64 | */ | |
91447636 | 65 | struct cpu_cons_buffer; |
0c530ab8 | 66 | struct cpu_desc_table; |
2d21ac55 | 67 | struct mca_state; |
fe8ab488 | 68 | struct prngContext; |
91447636 | 69 | |
91447636 A |
70 | /* |
71 | * Data structures embedded in per-cpu data: | |
72 | */ | |
73 | typedef struct rtclock_timer { | |
6d2010ae | 74 | mpqueue_head_t queue; |
c910b4d9 | 75 | uint64_t deadline; |
6d2010ae | 76 | uint64_t when_set; |
c910b4d9 | 77 | boolean_t has_expired; |
91447636 A |
78 | } rtclock_timer_t; |
79 | ||
91447636 | 80 | typedef struct { |
5c9f4661 A |
81 | /* The 'u' suffixed fields store the double-mapped descriptor addresses */ |
82 | struct x86_64_tss *cdi_ktssu; | |
83 | struct x86_64_tss *cdi_ktssb; | |
84 | x86_64_desc_register_t cdi_gdtu; | |
85 | x86_64_desc_register_t cdi_gdtb; | |
86 | x86_64_desc_register_t cdi_idtu; | |
87 | x86_64_desc_register_t cdi_idtb; | |
88 | struct fake_descriptor *cdi_ldtu; | |
89 | struct fake_descriptor *cdi_ldtb; | |
90 | vm_offset_t cdi_sstku; | |
91 | vm_offset_t cdi_sstkb; | |
b0d623f7 A |
92 | } cpu_desc_index_t; |
93 | ||
94 | typedef enum { | |
95 | TASK_MAP_32BIT, /* 32-bit user, compatibility mode */ | |
96 | TASK_MAP_64BIT, /* 64-bit user thread, shared space */ | |
97 | } task_map_t; | |
98 | ||
b0d623f7 | 99 | |
0c530ab8 A |
100 | /* |
101 | * This structure is used on entry into the (uber-)kernel on syscall from | |
102 | * a 64-bit user. It contains the address of the machine state save area | |
103 | * for the current thread and a temporary place to save the user's rsp | |
104 | * before loading this address into rsp. | |
105 | */ | |
106 | typedef struct { | |
107 | addr64_t cu_isf; /* thread->pcb->iss.isf */ | |
108 | uint64_t cu_tmp; /* temporary scratch */ | |
6d2010ae | 109 | addr64_t cu_user_gs_base; |
0c530ab8 | 110 | } cpu_uber_t; |
91447636 | 111 | |
6d2010ae A |
112 | typedef uint16_t pcid_t; |
113 | typedef uint8_t pcid_ref_t; | |
bd504ef0 A |
114 | |
115 | #define CPU_RTIME_BINS (12) | |
116 | #define CPU_ITIME_BINS (CPU_RTIME_BINS) | |
117 | ||
5c9f4661 | 118 | #define MAXPLFRAMES (16) |
39037602 A |
119 | typedef struct { |
120 | boolean_t pltype; | |
121 | int plevel; | |
122 | uint64_t plbt[MAXPLFRAMES]; | |
123 | } plrecord_t; | |
124 | ||
91447636 A |
125 | /* |
126 | * Per-cpu data. | |
127 | * | |
128 | * Each processor has a per-cpu data area which is dereferenced through the | |
129 | * current_cpu_datap() macro. For speed, the %gs segment is based here, and | |
130 | * using this, inlines provides single-instruction access to frequently used | |
131 | * members - such as get_cpu_number()/cpu_number(), and get_active_thread()/ | |
132 | * current_thread(). | |
133 | * | |
134 | * Cpu data owned by another processor can be accessed using the | |
135 | * cpu_datap(cpu_number) macro which uses the cpu_data_ptr[] array of per-cpu | |
136 | * pointers. | |
137 | */ | |
5c9f4661 A |
138 | typedef struct { |
139 | pcid_t cpu_pcid_free_hint; | |
140 | #define PMAP_PCID_MAX_PCID (0x800) | |
141 | pcid_ref_t cpu_pcid_refcounts[PMAP_PCID_MAX_PCID]; | |
142 | pmap_t cpu_pcid_last_pmap_dispatched[PMAP_PCID_MAX_PCID]; | |
143 | } pcid_cdata_t; | |
144 | ||
91447636 A |
145 | typedef struct cpu_data |
146 | { | |
6d2010ae A |
147 | struct pal_cpu_data cpu_pal_data; /* PAL-specific data */ |
148 | #define cpu_pd cpu_pal_data /* convenience alias */ | |
91447636 A |
149 | struct cpu_data *cpu_this; /* pointer to myself */ |
150 | thread_t cpu_active_thread; | |
39236c6e A |
151 | thread_t cpu_nthread; |
152 | volatile int cpu_preemption_level; | |
6d2010ae | 153 | int cpu_number; /* Logical CPU */ |
0c530ab8 A |
154 | void *cpu_int_state; /* interrupt state */ |
155 | vm_offset_t cpu_active_stack; /* kernel stack base */ | |
156 | vm_offset_t cpu_kernel_stack; /* kernel stack top */ | |
91447636 | 157 | vm_offset_t cpu_int_stack_top; |
91447636 | 158 | int cpu_interrupt_level; |
39236c6e A |
159 | volatile int cpu_signals; /* IPI events */ |
160 | volatile int cpu_prior_signals; /* Last set of events, | |
060df5ea A |
161 | * debugging |
162 | */ | |
91447636 | 163 | ast_t cpu_pending_ast; |
bd504ef0 | 164 | volatile int cpu_running; |
5ba3f43e | 165 | #if !MONOTONIC |
bd504ef0 | 166 | boolean_t cpu_fixed_pmcs_enabled; |
5ba3f43e | 167 | #endif /* !MONOTONIC */ |
0c530ab8 | 168 | rtclock_timer_t rtclock_timer; |
5ba3f43e | 169 | uint64_t quantum_timer_deadline; |
6d2010ae A |
170 | volatile addr64_t cpu_active_cr3 __attribute((aligned(64))); |
171 | union { | |
172 | volatile uint32_t cpu_tlb_invalid; | |
173 | struct { | |
174 | volatile uint16_t cpu_tlb_invalid_local; | |
175 | volatile uint16_t cpu_tlb_invalid_global; | |
176 | }; | |
177 | }; | |
178 | volatile task_map_t cpu_task_map; | |
b0d623f7 | 179 | volatile addr64_t cpu_task_cr3; |
2d21ac55 | 180 | addr64_t cpu_kernel_cr3; |
a39ff7e2 | 181 | volatile addr64_t cpu_ucr3; |
39037602 | 182 | boolean_t cpu_pagezero_mapped; |
0c530ab8 | 183 | cpu_uber_t cpu_uber; |
a39ff7e2 | 184 | /* Double-mapped per-CPU exception stack address */ |
5c9f4661 | 185 | uintptr_t cd_estack; |
a39ff7e2 A |
186 | /* Address of shadowed, partially mirrored CPU data structures located |
187 | * in the double mapped PML4 | |
188 | */ | |
5c9f4661 | 189 | void *cd_shadow; |
91447636 | 190 | struct processor *cpu_processor; |
b0d623f7 | 191 | #if NCOPY_WINDOWS > 0 |
91447636 | 192 | struct cpu_pmap *cpu_pmap; |
b0d623f7 | 193 | #endif |
5c9f4661 | 194 | struct real_descriptor *cpu_ldtp; |
a39ff7e2 | 195 | struct cpu_desc_table *cpu_desc_tablep; |
91447636 | 196 | cpu_desc_index_t cpu_desc_index; |
0c530ab8 | 197 | int cpu_ldt; |
b0d623f7 | 198 | #if NCOPY_WINDOWS > 0 |
2d21ac55 A |
199 | vm_offset_t cpu_copywindow_base; |
200 | uint64_t *cpu_copywindow_pdp; | |
201 | ||
202 | vm_offset_t cpu_physwindow_base; | |
203 | uint64_t *cpu_physwindow_ptep; | |
b0d623f7 A |
204 | #endif |
205 | ||
6d2010ae A |
206 | #define HWINTCNT_SIZE 256 |
207 | uint32_t cpu_hwIntCnt[HWINTCNT_SIZE]; /* Interrupt counts */ | |
bd504ef0 | 208 | uint64_t cpu_hwIntpexits[HWINTCNT_SIZE]; |
0c530ab8 | 209 | uint64_t cpu_dr7; /* debug control register */ |
2d21ac55 | 210 | uint64_t cpu_int_event_time; /* intr entry/exit time */ |
6d2010ae | 211 | pal_rtc_nanotime_t *cpu_nanotime; /* Nanotime info */ |
39236c6e A |
212 | #if KPC |
213 | /* double-buffered performance counter data */ | |
214 | uint64_t *cpu_kpc_buf[2]; | |
215 | /* PMC shadow and reload value buffers */ | |
216 | uint64_t *cpu_kpc_shadow; | |
217 | uint64_t *cpu_kpc_reload; | |
218 | #endif | |
5ba3f43e A |
219 | #if MONOTONIC |
220 | struct mt_cpu cpu_monotonic; | |
221 | #endif /* MONOTONIC */ | |
6d2010ae A |
222 | uint32_t cpu_pmap_pcid_enabled; |
223 | pcid_t cpu_active_pcid; | |
224 | pcid_t cpu_last_pcid; | |
39037602 | 225 | pcid_t cpu_kernel_pcid; |
6d2010ae A |
226 | volatile pcid_ref_t *cpu_pmap_pcid_coherentp; |
227 | volatile pcid_ref_t *cpu_pmap_pcid_coherentp_kernel; | |
5c9f4661 | 228 | pcid_cdata_t *cpu_pcid_data; |
6d2010ae A |
229 | #ifdef PCID_STATS |
230 | uint64_t cpu_pmap_pcid_flushes; | |
231 | uint64_t cpu_pmap_pcid_preserves; | |
232 | #endif | |
4b17d6b6 A |
233 | uint64_t cpu_aperf; |
234 | uint64_t cpu_mperf; | |
235 | uint64_t cpu_c3res; | |
236 | uint64_t cpu_c6res; | |
237 | uint64_t cpu_c7res; | |
238 | uint64_t cpu_itime_total; | |
239 | uint64_t cpu_rtime_total; | |
4b17d6b6 | 240 | uint64_t cpu_ixtime; |
bd504ef0 | 241 | uint64_t cpu_idle_exits; |
5ba3f43e A |
242 | uint64_t cpu_rtimes[CPU_RTIME_BINS]; |
243 | uint64_t cpu_itimes[CPU_ITIME_BINS]; | |
244 | #if !MONOTONIC | |
245 | uint64_t cpu_cur_insns; | |
246 | uint64_t cpu_cur_ucc; | |
247 | uint64_t cpu_cur_urc; | |
248 | #endif /* !MONOTONIC */ | |
a1c7dba1 | 249 | uint64_t cpu_gpmcs[4]; |
6d2010ae A |
250 | uint64_t cpu_max_observed_int_latency; |
251 | int cpu_max_observed_int_latency_vector; | |
39236c6e | 252 | volatile boolean_t cpu_NMI_acknowledged; |
060df5ea | 253 | uint64_t debugger_entry_time; |
bd504ef0 | 254 | uint64_t debugger_ipi_time; |
060df5ea A |
255 | /* A separate nested interrupt stack flag, to account |
256 | * for non-nested interrupts arriving while on the interrupt stack | |
257 | * Currently only occurs when AICPM enables interrupts on the | |
258 | * interrupt stack during processor offlining. | |
259 | */ | |
260 | uint32_t cpu_nested_istack; | |
261 | uint32_t cpu_nested_istack_events; | |
6d2010ae A |
262 | x86_saved_state64_t *cpu_fatal_trap_state; |
263 | x86_saved_state64_t *cpu_post_fatal_trap_state; | |
bd504ef0 A |
264 | #if CONFIG_VMX |
265 | vmx_cpu_t cpu_vmx; /* wonderful world of virtualization */ | |
266 | #endif | |
267 | #if CONFIG_MCA | |
268 | struct mca_state *cpu_mca_state; /* State at MC fault */ | |
269 | #endif | |
fe8ab488 | 270 | struct prngContext *cpu_prng; /* PRNG's context */ |
bd504ef0 A |
271 | int cpu_type; |
272 | int cpu_subtype; | |
273 | int cpu_threadtype; | |
274 | boolean_t cpu_iflag; | |
275 | boolean_t cpu_boot_complete; | |
39037602 | 276 | int cpu_hibernate; |
5c9f4661 | 277 | #define MAX_PREEMPTION_RECORDS (8) |
39037602 A |
278 | #if DEVELOPMENT || DEBUG |
279 | int cpu_plri; | |
280 | plrecord_t plrecords[MAX_PREEMPTION_RECORDS]; | |
5c9f4661 | 281 | #endif |
5c9f4661 A |
282 | void *cpu_console_buf; |
283 | struct x86_lcpu lcpu; | |
284 | int cpu_phys_number; /* Physical CPU */ | |
285 | cpu_id_t cpu_id; /* Platform Expert */ | |
286 | #if DEBUG | |
287 | uint64_t cpu_entry_cr3; | |
288 | uint64_t cpu_exit_cr3; | |
289 | uint64_t cpu_pcid_last_cr3; | |
39037602 | 290 | #endif |
55e303ae | 291 | } cpu_data_t; |
1c79356b | 292 | |
91447636 | 293 | extern cpu_data_t *cpu_data_ptr[]; |
9bccf70c | 294 | |
55e303ae | 295 | /* Macro to generate inline bodies to retrieve per-cpu data fields. */ |
39236c6e A |
296 | #if defined(__clang__) |
297 | #define GS_RELATIVE volatile __attribute__((address_space(256))) | |
298 | #ifndef offsetof | |
299 | #define offsetof(TYPE,MEMBER) __builtin_offsetof(TYPE,MEMBER) | |
300 | #endif | |
301 | ||
302 | #define CPU_DATA_GET(member,type) \ | |
303 | cpu_data_t GS_RELATIVE *cpu_data = \ | |
304 | (cpu_data_t GS_RELATIVE *)0UL; \ | |
305 | type ret; \ | |
306 | ret = cpu_data->member; \ | |
307 | return ret; | |
308 | ||
309 | #define CPU_DATA_GET_INDEX(member,index,type) \ | |
310 | cpu_data_t GS_RELATIVE *cpu_data = \ | |
311 | (cpu_data_t GS_RELATIVE *)0UL; \ | |
312 | type ret; \ | |
313 | ret = cpu_data->member[index]; \ | |
314 | return ret; | |
315 | ||
316 | #define CPU_DATA_SET(member,value) \ | |
317 | cpu_data_t GS_RELATIVE *cpu_data = \ | |
318 | (cpu_data_t GS_RELATIVE *)0UL; \ | |
319 | cpu_data->member = value; | |
320 | ||
321 | #define CPU_DATA_XCHG(member,value,type) \ | |
322 | cpu_data_t GS_RELATIVE *cpu_data = \ | |
323 | (cpu_data_t GS_RELATIVE *)0UL; \ | |
324 | type ret; \ | |
325 | ret = cpu_data->member; \ | |
326 | cpu_data->member = value; \ | |
327 | return ret; | |
328 | ||
329 | #else /* !defined(__clang__) */ | |
330 | ||
2d21ac55 | 331 | #ifndef offsetof |
55e303ae | 332 | #define offsetof(TYPE,MEMBER) ((size_t) &((TYPE *)0)->MEMBER) |
2d21ac55 | 333 | #endif /* offsetof */ |
91447636 | 334 | #define CPU_DATA_GET(member,type) \ |
55e303ae | 335 | type ret; \ |
b0d623f7 | 336 | __asm__ volatile ("mov %%gs:%P1,%0" \ |
55e303ae | 337 | : "=r" (ret) \ |
91447636 | 338 | : "i" (offsetof(cpu_data_t,member))); \ |
55e303ae | 339 | return ret; |
9bccf70c | 340 | |
6d2010ae A |
341 | #define CPU_DATA_GET_INDEX(member,index,type) \ |
342 | type ret; \ | |
343 | __asm__ volatile ("mov %%gs:(%1),%0" \ | |
344 | : "=r" (ret) \ | |
345 | : "r" (offsetof(cpu_data_t,member[index]))); \ | |
346 | return ret; | |
347 | ||
348 | #define CPU_DATA_SET(member,value) \ | |
349 | __asm__ volatile ("mov %0,%%gs:%P1" \ | |
350 | : \ | |
351 | : "r" (value), "i" (offsetof(cpu_data_t,member))); | |
39236c6e | 352 | |
6d2010ae A |
353 | #define CPU_DATA_XCHG(member,value,type) \ |
354 | type ret; \ | |
355 | __asm__ volatile ("xchg %0,%%gs:%P1" \ | |
356 | : "=r" (ret) \ | |
357 | : "i" (offsetof(cpu_data_t,member)), "0" (value)); \ | |
358 | return ret; | |
359 | ||
39236c6e A |
360 | #endif /* !defined(__clang__) */ |
361 | ||
1c79356b A |
362 | /* |
363 | * Everyone within the osfmk part of the kernel can use the fast | |
364 | * inline versions of these routines. Everyone outside, must call | |
365 | * the real thing, | |
366 | */ | |
91447636 A |
367 | static inline thread_t |
368 | get_active_thread(void) | |
1c79356b | 369 | { |
91447636 | 370 | CPU_DATA_GET(cpu_active_thread,thread_t) |
1c79356b | 371 | } |
91447636 A |
372 | #define current_thread_fast() get_active_thread() |
373 | #define current_thread() current_thread_fast() | |
1c79356b | 374 | |
b0d623f7 | 375 | #define cpu_mode_is64bit() TRUE |
0c530ab8 | 376 | |
91447636 A |
377 | static inline int |
378 | get_preemption_level(void) | |
1c79356b | 379 | { |
91447636 | 380 | CPU_DATA_GET(cpu_preemption_level,int) |
55e303ae | 381 | } |
91447636 | 382 | static inline int |
91447636 | 383 | get_interrupt_level(void) |
55e303ae | 384 | { |
91447636 | 385 | CPU_DATA_GET(cpu_interrupt_level,int) |
55e303ae | 386 | } |
91447636 A |
387 | static inline int |
388 | get_cpu_number(void) | |
55e303ae A |
389 | { |
390 | CPU_DATA_GET(cpu_number,int) | |
391 | } | |
91447636 A |
392 | static inline int |
393 | get_cpu_phys_number(void) | |
55e303ae A |
394 | { |
395 | CPU_DATA_GET(cpu_phys_number,int) | |
1c79356b | 396 | } |
1c79356b | 397 | |
39037602 A |
398 | static inline cpu_data_t * |
399 | current_cpu_datap(void) { | |
400 | CPU_DATA_GET(cpu_this, cpu_data_t *); | |
401 | } | |
402 | ||
403 | /* | |
404 | * Facility to diagnose preemption-level imbalances, which are otherwise | |
405 | * challenging to debug. On each operation that enables or disables preemption, | |
406 | * we record a backtrace into a per-CPU ring buffer, along with the current | |
407 | * preemption level and operation type. Thus, if an imbalance is observed, | |
408 | * one can examine these per-CPU records to determine which codepath failed | |
409 | * to re-enable preemption, enabled premption without a corresponding | |
410 | * disablement etc. The backtracer determines which stack is currently active, | |
411 | * and uses that to perform bounds checks on unterminated stacks. | |
412 | * To enable, sysctl -w machdep.pltrace=1 on DEVELOPMENT or DEBUG kernels (DRK '15) | |
413 | * The bounds check currently doesn't account for non-default thread stack sizes. | |
414 | */ | |
415 | #if DEVELOPMENT || DEBUG | |
416 | static inline void pltrace_bt(uint64_t *rets, int maxframes, uint64_t stacklo, uint64_t stackhi) { | |
417 | uint64_t *cfp = (uint64_t *) __builtin_frame_address(0); | |
418 | int plbtf; | |
419 | ||
420 | assert(stacklo !=0 && stackhi !=0); | |
421 | ||
422 | for (plbtf = 0; plbtf < maxframes; plbtf++) { | |
423 | if (((uint64_t)cfp == 0) || (((uint64_t)cfp < stacklo) || ((uint64_t)cfp > stackhi))) { | |
424 | rets[plbtf] = 0; | |
425 | continue; | |
426 | } | |
427 | rets[plbtf] = *(cfp + 1); | |
428 | cfp = (uint64_t *) (*cfp); | |
429 | } | |
430 | } | |
431 | ||
432 | ||
433 | extern uint32_t low_intstack[]; /* bottom */ | |
434 | extern uint32_t low_eintstack[]; /* top */ | |
435 | extern char mp_slave_stack[PAGE_SIZE]; | |
436 | ||
437 | static inline void pltrace_internal(boolean_t enable) { | |
438 | cpu_data_t *cdata = current_cpu_datap(); | |
439 | int cpli = cdata->cpu_preemption_level; | |
440 | int cplrecord = cdata->cpu_plri; | |
441 | uint64_t kstackb, kstackt, *plbts; | |
442 | ||
443 | assert(cpli >= 0); | |
444 | ||
445 | cdata->plrecords[cplrecord].pltype = enable; | |
446 | cdata->plrecords[cplrecord].plevel = cpli; | |
447 | ||
448 | plbts = &cdata->plrecords[cplrecord].plbt[0]; | |
449 | ||
450 | cplrecord++; | |
451 | ||
452 | if (cplrecord >= MAX_PREEMPTION_RECORDS) { | |
453 | cplrecord = 0; | |
454 | } | |
455 | ||
456 | cdata->cpu_plri = cplrecord; | |
457 | /* Obtain the 'current' program counter, initial backtrace | |
458 | * element. This will also indicate if we were unable to | |
459 | * trace further up the stack for some reason | |
460 | */ | |
461 | __asm__ volatile("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:" | |
462 | : "=m" (plbts[0]) | |
463 | : | |
464 | : "rax"); | |
465 | ||
466 | ||
467 | thread_t cplthread = cdata->cpu_active_thread; | |
468 | if (cplthread) { | |
469 | uintptr_t csp; | |
470 | __asm__ __volatile__ ("movq %%rsp, %0": "=r" (csp):); | |
471 | /* Determine which stack we're on to populate stack bounds. | |
472 | * We don't need to trace across stack boundaries for this | |
473 | * routine. | |
474 | */ | |
475 | kstackb = cdata->cpu_active_stack; | |
476 | kstackt = kstackb + KERNEL_STACK_SIZE; | |
477 | if (csp < kstackb || csp > kstackt) { | |
478 | kstackt = cdata->cpu_kernel_stack; | |
479 | kstackb = kstackb - KERNEL_STACK_SIZE; | |
480 | if (csp < kstackb || csp > kstackt) { | |
481 | kstackt = cdata->cpu_int_stack_top; | |
482 | kstackb = kstackt - INTSTACK_SIZE; | |
483 | if (csp < kstackb || csp > kstackt) { | |
484 | kstackt = (uintptr_t)low_eintstack; | |
485 | kstackb = (uintptr_t)low_eintstack - INTSTACK_SIZE; | |
486 | if (csp < kstackb || csp > kstackt) { | |
487 | kstackb = (uintptr_t) mp_slave_stack; | |
488 | kstackt = (uintptr_t) mp_slave_stack + PAGE_SIZE; | |
489 | } | |
490 | } | |
491 | } | |
492 | } | |
493 | ||
494 | if (kstackb) { | |
495 | pltrace_bt(&plbts[1], MAXPLFRAMES - 1, kstackb, kstackt); | |
496 | } | |
497 | } | |
498 | } | |
499 | ||
500 | extern int plctrace_enabled; | |
501 | #endif /* DEVELOPMENT || DEBUG */ | |
502 | ||
503 | static inline void pltrace(boolean_t plenable) { | |
504 | #if DEVELOPMENT || DEBUG | |
505 | if (__improbable(plctrace_enabled != 0)) { | |
506 | pltrace_internal(plenable); | |
507 | } | |
508 | #else | |
509 | (void)plenable; | |
510 | #endif | |
511 | } | |
b0d623f7 | 512 | |
91447636 | 513 | static inline void |
39037602 A |
514 | disable_preemption_internal(void) { |
515 | assert(get_preemption_level() >= 0); | |
516 | ||
39236c6e A |
517 | #if defined(__clang__) |
518 | cpu_data_t GS_RELATIVE *cpu_data = (cpu_data_t GS_RELATIVE *)0UL; | |
519 | cpu_data->cpu_preemption_level++; | |
520 | #else | |
91447636 | 521 | __asm__ volatile ("incl %%gs:%P0" |
39037602 A |
522 | : |
523 | : "i" (offsetof(cpu_data_t, cpu_preemption_level))); | |
39236c6e | 524 | #endif |
39037602 | 525 | pltrace(FALSE); |
91447636 | 526 | } |
1c79356b | 527 | |
91447636 | 528 | static inline void |
39037602 | 529 | enable_preemption_internal(void) { |
55e303ae | 530 | assert(get_preemption_level() > 0); |
39037602 | 531 | pltrace(TRUE); |
39236c6e A |
532 | #if defined(__clang__) |
533 | cpu_data_t GS_RELATIVE *cpu_data = (cpu_data_t GS_RELATIVE *)0UL; | |
534 | if (0 == --cpu_data->cpu_preemption_level) | |
535 | kernel_preempt_check(); | |
536 | #else | |
91447636 A |
537 | __asm__ volatile ("decl %%gs:%P0 \n\t" |
538 | "jne 1f \n\t" | |
539 | "call _kernel_preempt_check \n\t" | |
540 | "1:" | |
1c79356b | 541 | : /* no outputs */ |
91447636 A |
542 | : "i" (offsetof(cpu_data_t, cpu_preemption_level)) |
543 | : "eax", "ecx", "edx", "cc", "memory"); | |
39236c6e | 544 | #endif |
1c79356b A |
545 | } |
546 | ||
91447636 A |
547 | static inline void |
548 | enable_preemption_no_check(void) | |
1c79356b | 549 | { |
1c79356b | 550 | assert(get_preemption_level() > 0); |
1c79356b | 551 | |
39037602 | 552 | pltrace(TRUE); |
39236c6e A |
553 | #if defined(__clang__) |
554 | cpu_data_t GS_RELATIVE *cpu_data = (cpu_data_t GS_RELATIVE *)0UL; | |
555 | cpu_data->cpu_preemption_level--; | |
556 | #else | |
91447636 | 557 | __asm__ volatile ("decl %%gs:%P0" |
1c79356b | 558 | : /* no outputs */ |
91447636 | 559 | : "i" (offsetof(cpu_data_t, cpu_preemption_level)) |
1c79356b | 560 | : "cc", "memory"); |
39236c6e | 561 | #endif |
1c79356b A |
562 | } |
563 | ||
39037602 A |
564 | static inline void |
565 | _enable_preemption_no_check(void) { | |
566 | enable_preemption_no_check(); | |
567 | } | |
568 | ||
91447636 A |
569 | static inline void |
570 | mp_disable_preemption(void) | |
1c79356b | 571 | { |
39037602 | 572 | disable_preemption_internal(); |
1c79356b A |
573 | } |
574 | ||
91447636 | 575 | static inline void |
39037602 | 576 | _mp_disable_preemption(void) |
1c79356b | 577 | { |
39037602 | 578 | disable_preemption_internal(); |
1c79356b A |
579 | } |
580 | ||
91447636 | 581 | static inline void |
39037602 | 582 | mp_enable_preemption(void) |
1c79356b | 583 | { |
39037602 A |
584 | enable_preemption_internal(); |
585 | } | |
586 | ||
587 | static inline void | |
588 | _mp_enable_preemption(void) { | |
589 | enable_preemption_internal(); | |
590 | } | |
591 | ||
592 | static inline void | |
593 | mp_enable_preemption_no_check(void) { | |
1c79356b | 594 | enable_preemption_no_check(); |
1c79356b A |
595 | } |
596 | ||
39037602 A |
597 | static inline void |
598 | _mp_enable_preemption_no_check(void) { | |
599 | enable_preemption_no_check(); | |
91447636 A |
600 | } |
601 | ||
39037602 A |
602 | #ifdef XNU_KERNEL_PRIVATE |
603 | #define disable_preemption() disable_preemption_internal() | |
604 | #define enable_preemption() enable_preemption_internal() | |
605 | #define MACHINE_PREEMPTION_MACROS (1) | |
606 | #endif | |
607 | ||
91447636 | 608 | static inline cpu_data_t * |
39037602 | 609 | cpu_datap(int cpu) { |
91447636 A |
610 | return cpu_data_ptr[cpu]; |
611 | } | |
612 | ||
a39ff7e2 A |
613 | static inline int |
614 | cpu_is_running(int cpu) { | |
615 | return ((cpu_datap(cpu) != NULL) && (cpu_datap(cpu)->cpu_running)); | |
616 | } | |
617 | ||
5c9f4661 A |
618 | #ifdef MACH_KERNEL_PRIVATE |
619 | static inline cpu_data_t * | |
620 | cpu_shadowp(int cpu) { | |
621 | return cpu_data_ptr[cpu]->cd_shadow; | |
622 | } | |
623 | ||
624 | #endif | |
91447636 | 625 | extern cpu_data_t *cpu_data_alloc(boolean_t is_boot_cpu); |
316670eb | 626 | extern void cpu_data_realloc(void); |
1c79356b | 627 | |
1c79356b | 628 | #endif /* I386_CPU_DATA */ |