]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/cpu_data.h
e4ba6fd33b09e679660d56c63c914d7bd7a19c4a
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
34 #include <mach_assert.h>
38 #include <kern/assert.h>
39 #include <kern/kern_types.h>
47 extern cpu_data_t cpu_data
[NCPUS
];
49 #define get_cpu_data() &cpu_data[cpu_number()]
52 * Everyone within the osfmk part of the kernel can use the fast
53 * inline versions of these routines. Everyone outside, must call
56 extern thread_t __inline__
current_thread_fast(void);
57 extern thread_t __inline__
current_thread_fast(void)
60 register int idx
= (int)&((cpu_data_t
*)0)->active_thread
;
62 __asm__
volatile (" movl %%gs:(%1),%0" : "=r" (ct
) : "r" (idx
));
67 #define current_thread() current_thread_fast()
69 extern int __inline__
get_preemption_level(void);
70 extern void __inline__
disable_preemption(void);
71 extern void __inline__
enable_preemption(void);
72 extern void __inline__
enable_preemption_no_check(void);
73 extern void __inline__
mp_disable_preemption(void);
74 extern void __inline__
mp_enable_preemption(void);
75 extern void __inline__
mp_enable_preemption_no_check(void);
76 extern int __inline__
get_simple_lock_count(void);
77 extern int __inline__
get_interrupt_level(void);
79 extern int __inline__
get_preemption_level(void)
81 register int idx
= (int)&((cpu_data_t
*)0)->preemption_level
;
84 __asm__
volatile (" movl %%gs:(%1),%0" : "=r" (pl
) : "r" (idx
));
89 extern void __inline__
disable_preemption(void)
92 extern void _disable_preemption(void);
94 _disable_preemption();
95 #else /* MACH_ASSERT */
96 register int idx
= (int)&((cpu_data_t
*)0)->preemption_level
;
98 __asm__
volatile (" incl %%gs:(%0)" : : "r" (idx
));
99 #endif /* MACH_ASSERT */
102 extern void __inline__
enable_preemption(void)
105 extern void _enable_preemption(void);
107 assert(get_preemption_level() > 0);
108 _enable_preemption();
109 #else /* MACH_ASSERT */
110 extern void kernel_preempt_check (void);
111 register int idx
= (int)&((cpu_data_t
*)0)->preemption_level
;
112 register void (*kpc
)(void)= kernel_preempt_check
;
114 __asm__
volatile ("decl %%gs:(%0); jne 1f; \
117 : "r" (idx
), "r" (kpc
)
118 : "%eax", "%ecx", "%edx", "cc", "memory");
119 #endif /* MACH_ASSERT */
122 extern void __inline__
enable_preemption_no_check(void)
125 extern void _enable_preemption_no_check(void);
127 assert(get_preemption_level() > 0);
128 _enable_preemption_no_check();
129 #else /* MACH_ASSERT */
130 register int idx
= (int)&((cpu_data_t
*)0)->preemption_level
;
132 __asm__
volatile ("decl %%gs:(%0)"
136 #endif /* MACH_ASSERT */
139 extern void __inline__
mp_disable_preemption(void)
142 disable_preemption();
143 #endif /* NCPUS > 1 */
146 extern void __inline__
mp_enable_preemption(void)
150 #endif /* NCPUS > 1 */
153 extern void __inline__
mp_enable_preemption_no_check(void)
156 enable_preemption_no_check();
157 #endif /* NCPUS > 1 */
160 extern int __inline__
get_simple_lock_count(void)
162 register int idx
= (int)&((cpu_data_t
*)0)->simple_lock_count
;
165 __asm__
volatile (" movl %%gs:(%1),%0" : "=r" (pl
) : "r" (idx
));
170 extern int __inline__
get_interrupt_level(void)
172 register int idx
= (int)&((cpu_data_t
*)0)->interrupt_level
;
175 __asm__
volatile (" movl %%gs:(%1),%0" : "=r" (pl
) : "r" (idx
));
186 #else /* !defined(__GNUC__) */
188 #endif /* defined(__GNUC__) */
190 #endif /* I386_CPU_DATA */