]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/cpu_data.h
xnu-517.9.5.tar.gz
[apple/xnu.git] / osfmk / i386 / cpu_data.h
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 *
25 */
26
27 #ifndef I386_CPU_DATA
28 #define I386_CPU_DATA
29
30 #include <cpus.h>
31 #include <mach_assert.h>
32
33 #if defined(__GNUC__)
34
35 #include <kern/assert.h>
36 #include <kern/kern_types.h>
37 #include <pexpert/pexpert.h>
38
39 typedef struct
40 {
41 thread_act_t *active_thread;
42 int preemption_level;
43 int simple_lock_count;
44 int interrupt_level;
45 int cpu_number; /* Logical CPU number */
46 int cpu_phys_number; /* Physical CPU Number */
47 cpu_id_t cpu_id; /* Platform Expert handle */
48 int cpu_status; /* Boot Status */
49 int cpu_signals; /* IPI events */
50 int mcount_off; /* mcount recursion flag */
51 } cpu_data_t;
52
53 extern cpu_data_t cpu_data[NCPUS];
54
55 /* Macro to generate inline bodies to retrieve per-cpu data fields. */
56 #define offsetof(TYPE,MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
57 #define CPU_DATA_GET(field,type) \
58 type ret; \
59 __asm__ volatile ("movl %%gs:%P1,%0" \
60 : "=r" (ret) \
61 : "i" (offsetof(cpu_data_t,field))); \
62 return ret;
63
64 /*
65 * Everyone within the osfmk part of the kernel can use the fast
66 * inline versions of these routines. Everyone outside, must call
67 * the real thing,
68 */
69 extern thread_act_t __inline__ get_active_thread(void)
70 {
71 CPU_DATA_GET(active_thread,thread_act_t)
72 }
73 #define current_act_fast() get_active_thread()
74 #define current_act() current_act_fast()
75 #define current_thread() current_act_fast()->thread
76
77 extern int __inline__ get_preemption_level(void)
78 {
79 CPU_DATA_GET(preemption_level,int)
80 }
81 extern int __inline__ get_simple_lock_count(void)
82 {
83 CPU_DATA_GET(simple_lock_count,int)
84 }
85 extern int __inline__ get_interrupt_level(void)
86 {
87 CPU_DATA_GET(interrupt_level,int)
88 }
89 extern int __inline__ get_cpu_number(void)
90 {
91 CPU_DATA_GET(cpu_number,int)
92 }
93 extern int __inline__ get_cpu_phys_number(void)
94 {
95 CPU_DATA_GET(cpu_phys_number,int)
96 }
97
98 extern void __inline__ disable_preemption(void)
99 {
100 register int idx = (int)&((cpu_data_t *)0)->preemption_level;
101
102 __asm__ volatile (" incl %%gs:(%0)" : : "r" (idx));
103 }
104
105 extern void __inline__ enable_preemption(void)
106 {
107 extern void kernel_preempt_check (void);
108 register int idx = (int)&((cpu_data_t *)0)->preemption_level;
109 register void (*kpc)(void)= kernel_preempt_check;
110
111 assert(get_preemption_level() > 0);
112
113 __asm__ volatile ("decl %%gs:(%0); jne 1f; \
114 call %1; 1:"
115 : /* no outputs */
116 : "r" (idx), "r" (kpc)
117 : "%eax", "%ecx", "%edx", "cc", "memory");
118 }
119
120 extern void __inline__ enable_preemption_no_check(void)
121 {
122 register int idx = (int)&((cpu_data_t *)0)->preemption_level;
123
124 assert(get_preemption_level() > 0);
125
126 __asm__ volatile ("decl %%gs:(%0)"
127 : /* no outputs */
128 : "r" (idx)
129 : "cc", "memory");
130 }
131
132 extern void __inline__ mp_disable_preemption(void)
133 {
134 #if NCPUS > 1
135 disable_preemption();
136 #endif /* NCPUS > 1 */
137 }
138
139 extern void __inline__ mp_enable_preemption(void)
140 {
141 #if NCPUS > 1
142 enable_preemption();
143 #endif /* NCPUS > 1 */
144 }
145
146 extern void __inline__ mp_enable_preemption_no_check(void)
147 {
148 #if NCPUS > 1
149 enable_preemption_no_check();
150 #endif /* NCPUS > 1 */
151 }
152
153 #if 0
154 #ifndef __OPTIMIZE__
155 #undef extern
156 #endif
157 #endif
158
159 #else /* !defined(__GNUC__) */
160
161 #endif /* defined(__GNUC__) */
162
163 #endif /* I386_CPU_DATA */