]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/cpu_data.h
a62fc6170a02d380ec3d6e8ba4d3b2755aa2a152
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
31 #include <mach_assert.h>
35 #include <kern/assert.h>
36 #include <kern/kern_types.h>
44 extern cpu_data_t cpu_data
[NCPUS
];
46 #define get_cpu_data() &cpu_data[cpu_number()]
49 * Everyone within the osfmk part of the kernel can use the fast
50 * inline versions of these routines. Everyone outside, must call
53 extern thread_t __inline__
current_thread_fast(void);
54 extern thread_t __inline__
current_thread_fast(void)
57 register int idx
= (int)&((cpu_data_t
*)0)->active_thread
;
59 __asm__
volatile (" movl %%gs:(%1),%0" : "=r" (ct
) : "r" (idx
));
64 #define current_thread() current_thread_fast()
66 extern int __inline__
get_preemption_level(void);
67 extern void __inline__
disable_preemption(void);
68 extern void __inline__
enable_preemption(void);
69 extern void __inline__
enable_preemption_no_check(void);
70 extern void __inline__
mp_disable_preemption(void);
71 extern void __inline__
mp_enable_preemption(void);
72 extern void __inline__
mp_enable_preemption_no_check(void);
73 extern int __inline__
get_simple_lock_count(void);
74 extern int __inline__
get_interrupt_level(void);
76 extern int __inline__
get_preemption_level(void)
78 register int idx
= (int)&((cpu_data_t
*)0)->preemption_level
;
81 __asm__
volatile (" movl %%gs:(%1),%0" : "=r" (pl
) : "r" (idx
));
86 extern void __inline__
disable_preemption(void)
89 extern void _disable_preemption(void);
91 _disable_preemption();
92 #else /* MACH_ASSERT */
93 register int idx
= (int)&((cpu_data_t
*)0)->preemption_level
;
95 __asm__
volatile (" incl %%gs:(%0)" : : "r" (idx
));
96 #endif /* MACH_ASSERT */
99 extern void __inline__
enable_preemption(void)
102 extern void _enable_preemption(void);
104 assert(get_preemption_level() > 0);
105 _enable_preemption();
106 #else /* MACH_ASSERT */
107 extern void kernel_preempt_check (void);
108 register int idx
= (int)&((cpu_data_t
*)0)->preemption_level
;
109 register void (*kpc
)(void)= kernel_preempt_check
;
111 __asm__
volatile ("decl %%gs:(%0); jne 1f; \
114 : "r" (idx
), "r" (kpc
)
115 : "%eax", "%ecx", "%edx", "cc", "memory");
116 #endif /* MACH_ASSERT */
119 extern void __inline__
enable_preemption_no_check(void)
122 extern void _enable_preemption_no_check(void);
124 assert(get_preemption_level() > 0);
125 _enable_preemption_no_check();
126 #else /* MACH_ASSERT */
127 register int idx
= (int)&((cpu_data_t
*)0)->preemption_level
;
129 __asm__
volatile ("decl %%gs:(%0)"
133 #endif /* MACH_ASSERT */
136 extern void __inline__
mp_disable_preemption(void)
139 disable_preemption();
140 #endif /* NCPUS > 1 */
143 extern void __inline__
mp_enable_preemption(void)
147 #endif /* NCPUS > 1 */
150 extern void __inline__
mp_enable_preemption_no_check(void)
153 enable_preemption_no_check();
154 #endif /* NCPUS > 1 */
157 extern int __inline__
get_simple_lock_count(void)
159 register int idx
= (int)&((cpu_data_t
*)0)->simple_lock_count
;
162 __asm__
volatile (" movl %%gs:(%1),%0" : "=r" (pl
) : "r" (idx
));
167 extern int __inline__
get_interrupt_level(void)
169 register int idx
= (int)&((cpu_data_t
*)0)->interrupt_level
;
172 __asm__
volatile (" movl %%gs:(%1),%0" : "=r" (pl
) : "r" (idx
));
183 #else /* !defined(__GNUC__) */
185 #endif /* defined(__GNUC__) */
187 #endif /* I386_CPU_DATA */