]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/cpu_data.h
e4ba6fd33b09e679660d56c63c914d7bd7a19c4a
[apple/xnu.git] / osfmk / i386 / cpu_data.h
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 *
28 */
29
30 #ifndef I386_CPU_DATA
31 #define I386_CPU_DATA
32
33 #include <cpus.h>
34 #include <mach_assert.h>
35
36 #if defined(__GNUC__)
37
38 #include <kern/assert.h>
39 #include <kern/kern_types.h>
40
41 #if 0
42 #ifndef __OPTIMIZE__
43 #define extern static
44 #endif
45 #endif
46
47 extern cpu_data_t cpu_data[NCPUS];
48
49 #define get_cpu_data() &cpu_data[cpu_number()]
50
51 /*
52 * Everyone within the osfmk part of the kernel can use the fast
53 * inline versions of these routines. Everyone outside, must call
54 * the real thing,
55 */
56 extern thread_t __inline__ current_thread_fast(void);
57 extern thread_t __inline__ current_thread_fast(void)
58 {
59 register thread_t ct;
60 register int idx = (int)&((cpu_data_t *)0)->active_thread;
61
62 __asm__ volatile (" movl %%gs:(%1),%0" : "=r" (ct) : "r" (idx));
63
64 return (ct);
65 }
66
67 #define current_thread() current_thread_fast()
68
69 extern int __inline__ get_preemption_level(void);
70 extern void __inline__ disable_preemption(void);
71 extern void __inline__ enable_preemption(void);
72 extern void __inline__ enable_preemption_no_check(void);
73 extern void __inline__ mp_disable_preemption(void);
74 extern void __inline__ mp_enable_preemption(void);
75 extern void __inline__ mp_enable_preemption_no_check(void);
76 extern int __inline__ get_simple_lock_count(void);
77 extern int __inline__ get_interrupt_level(void);
78
79 extern int __inline__ get_preemption_level(void)
80 {
81 register int idx = (int)&((cpu_data_t *)0)->preemption_level;
82 register int pl;
83
84 __asm__ volatile (" movl %%gs:(%1),%0" : "=r" (pl) : "r" (idx));
85
86 return (pl);
87 }
88
89 extern void __inline__ disable_preemption(void)
90 {
91 #if MACH_ASSERT
92 extern void _disable_preemption(void);
93
94 _disable_preemption();
95 #else /* MACH_ASSERT */
96 register int idx = (int)&((cpu_data_t *)0)->preemption_level;
97
98 __asm__ volatile (" incl %%gs:(%0)" : : "r" (idx));
99 #endif /* MACH_ASSERT */
100 }
101
102 extern void __inline__ enable_preemption(void)
103 {
104 #if MACH_ASSERT
105 extern void _enable_preemption(void);
106
107 assert(get_preemption_level() > 0);
108 _enable_preemption();
109 #else /* MACH_ASSERT */
110 extern void kernel_preempt_check (void);
111 register int idx = (int)&((cpu_data_t *)0)->preemption_level;
112 register void (*kpc)(void)= kernel_preempt_check;
113
114 __asm__ volatile ("decl %%gs:(%0); jne 1f; \
115 call %1; 1:"
116 : /* no outputs */
117 : "r" (idx), "r" (kpc)
118 : "%eax", "%ecx", "%edx", "cc", "memory");
119 #endif /* MACH_ASSERT */
120 }
121
122 extern void __inline__ enable_preemption_no_check(void)
123 {
124 #if MACH_ASSERT
125 extern void _enable_preemption_no_check(void);
126
127 assert(get_preemption_level() > 0);
128 _enable_preemption_no_check();
129 #else /* MACH_ASSERT */
130 register int idx = (int)&((cpu_data_t *)0)->preemption_level;
131
132 __asm__ volatile ("decl %%gs:(%0)"
133 : /* no outputs */
134 : "r" (idx)
135 : "cc", "memory");
136 #endif /* MACH_ASSERT */
137 }
138
139 extern void __inline__ mp_disable_preemption(void)
140 {
141 #if NCPUS > 1
142 disable_preemption();
143 #endif /* NCPUS > 1 */
144 }
145
146 extern void __inline__ mp_enable_preemption(void)
147 {
148 #if NCPUS > 1
149 enable_preemption();
150 #endif /* NCPUS > 1 */
151 }
152
153 extern void __inline__ mp_enable_preemption_no_check(void)
154 {
155 #if NCPUS > 1
156 enable_preemption_no_check();
157 #endif /* NCPUS > 1 */
158 }
159
160 extern int __inline__ get_simple_lock_count(void)
161 {
162 register int idx = (int)&((cpu_data_t *)0)->simple_lock_count;
163 register int pl;
164
165 __asm__ volatile (" movl %%gs:(%1),%0" : "=r" (pl) : "r" (idx));
166
167 return (pl);
168 }
169
170 extern int __inline__ get_interrupt_level(void)
171 {
172 register int idx = (int)&((cpu_data_t *)0)->interrupt_level;
173 register int pl;
174
175 __asm__ volatile (" movl %%gs:(%1),%0" : "=r" (pl) : "r" (idx));
176
177 return (pl);
178 }
179
180 #if 0
181 #ifndef __OPTIMIZE__
182 #undef extern
183 #endif
184 #endif
185
186 #else /* !defined(__GNUC__) */
187
188 #endif /* defined(__GNUC__) */
189
190 #endif /* I386_CPU_DATA */