]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/cpu_data.h
xnu-344.2.tar.gz
[apple/xnu.git] / osfmk / i386 / cpu_data.h
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 *
25 */
26
27 #ifndef I386_CPU_DATA
28 #define I386_CPU_DATA
29
30 #include <cpus.h>
31 #include <mach_assert.h>
32
33 #if defined(__GNUC__)
34
35 #include <kern/assert.h>
36 #include <kern/kern_types.h>
37
38 #if 0
39 #ifndef __OPTIMIZE__
40 #define extern static
41 #endif
42 #endif
43
44 extern cpu_data_t cpu_data[NCPUS];
45
46 #define get_cpu_data() &cpu_data[cpu_number()]
47
48 /*
49 * Everyone within the osfmk part of the kernel can use the fast
50 * inline versions of these routines. Everyone outside, must call
51 * the real thing,
52 */
53 extern thread_t __inline__ current_thread_fast(void);
54 extern thread_t __inline__ current_thread_fast(void)
55 {
56 register thread_t ct;
57 register int idx = (int)&((cpu_data_t *)0)->active_thread;
58
59 __asm__ volatile (" movl %%gs:(%1),%0" : "=r" (ct) : "r" (idx));
60
61 return (ct);
62 }
63
64 #define current_thread() current_thread_fast()
65
66 extern int __inline__ get_preemption_level(void);
67 extern void __inline__ disable_preemption(void);
68 extern void __inline__ enable_preemption(void);
69 extern void __inline__ enable_preemption_no_check(void);
70 extern void __inline__ mp_disable_preemption(void);
71 extern void __inline__ mp_enable_preemption(void);
72 extern void __inline__ mp_enable_preemption_no_check(void);
73 extern int __inline__ get_simple_lock_count(void);
74 extern int __inline__ get_interrupt_level(void);
75
76 extern int __inline__ get_preemption_level(void)
77 {
78 register int idx = (int)&((cpu_data_t *)0)->preemption_level;
79 register int pl;
80
81 __asm__ volatile (" movl %%gs:(%1),%0" : "=r" (pl) : "r" (idx));
82
83 return (pl);
84 }
85
86 extern void __inline__ disable_preemption(void)
87 {
88 #if MACH_ASSERT
89 extern void _disable_preemption(void);
90
91 _disable_preemption();
92 #else /* MACH_ASSERT */
93 register int idx = (int)&((cpu_data_t *)0)->preemption_level;
94
95 __asm__ volatile (" incl %%gs:(%0)" : : "r" (idx));
96 #endif /* MACH_ASSERT */
97 }
98
99 extern void __inline__ enable_preemption(void)
100 {
101 #if MACH_ASSERT
102 extern void _enable_preemption(void);
103
104 assert(get_preemption_level() > 0);
105 _enable_preemption();
106 #else /* MACH_ASSERT */
107 extern void kernel_preempt_check (void);
108 register int idx = (int)&((cpu_data_t *)0)->preemption_level;
109 register void (*kpc)(void)= kernel_preempt_check;
110
111 __asm__ volatile ("decl %%gs:(%0); jne 1f; \
112 call %1; 1:"
113 : /* no outputs */
114 : "r" (idx), "r" (kpc)
115 : "%eax", "%ecx", "%edx", "cc", "memory");
116 #endif /* MACH_ASSERT */
117 }
118
119 extern void __inline__ enable_preemption_no_check(void)
120 {
121 #if MACH_ASSERT
122 extern void _enable_preemption_no_check(void);
123
124 assert(get_preemption_level() > 0);
125 _enable_preemption_no_check();
126 #else /* MACH_ASSERT */
127 register int idx = (int)&((cpu_data_t *)0)->preemption_level;
128
129 __asm__ volatile ("decl %%gs:(%0)"
130 : /* no outputs */
131 : "r" (idx)
132 : "cc", "memory");
133 #endif /* MACH_ASSERT */
134 }
135
136 extern void __inline__ mp_disable_preemption(void)
137 {
138 #if NCPUS > 1
139 disable_preemption();
140 #endif /* NCPUS > 1 */
141 }
142
143 extern void __inline__ mp_enable_preemption(void)
144 {
145 #if NCPUS > 1
146 enable_preemption();
147 #endif /* NCPUS > 1 */
148 }
149
150 extern void __inline__ mp_enable_preemption_no_check(void)
151 {
152 #if NCPUS > 1
153 enable_preemption_no_check();
154 #endif /* NCPUS > 1 */
155 }
156
157 extern int __inline__ get_simple_lock_count(void)
158 {
159 register int idx = (int)&((cpu_data_t *)0)->simple_lock_count;
160 register int pl;
161
162 __asm__ volatile (" movl %%gs:(%1),%0" : "=r" (pl) : "r" (idx));
163
164 return (pl);
165 }
166
167 extern int __inline__ get_interrupt_level(void)
168 {
169 register int idx = (int)&((cpu_data_t *)0)->interrupt_level;
170 register int pl;
171
172 __asm__ volatile (" movl %%gs:(%1),%0" : "=r" (pl) : "r" (idx));
173
174 return (pl);
175 }
176
177 #if 0
178 #ifndef __OPTIMIZE__
179 #undef extern
180 #endif
181 #endif
182
183 #else /* !defined(__GNUC__) */
184
185 #endif /* defined(__GNUC__) */
186
187 #endif /* I386_CPU_DATA */