]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/cpu_data.h
xnu-201.tar.gz
[apple/xnu.git] / osfmk / i386 / cpu_data.h
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 *
25 */
26
27#ifndef I386_CPU_DATA
28#define I386_CPU_DATA
29
30#include <cpus.h>
31#include <mach_assert.h>
32
33#if defined(__GNUC__)
34
35#include <kern/assert.h>
36#include <kern/kern_types.h>
37
38#if 0
39#ifndef __OPTIMIZE__
40#define extern static
41#endif
42#endif
43
44/*
45 * Everyone within the osfmk part of the kernel can use the fast
46 * inline versions of these routines. Everyone outside, must call
47 * the real thing,
48 */
49extern thread_t __inline__ current_thread_fast(void);
50extern thread_t __inline__ current_thread_fast(void)
51{
52 register thread_t ct;
53 register int idx = (int)&((cpu_data_t *)0)->active_thread;
54
55 __asm__ volatile (" movl %%gs:(%1),%0" : "=r" (ct) : "r" (idx));
56
57 return (ct);
58}
59
60#define current_thread() current_thread_fast()
61
62extern int __inline__ get_preemption_level(void);
63extern void __inline__ disable_preemption(void);
64extern void __inline__ enable_preemption(void);
65extern void __inline__ enable_preemption_no_check(void);
66extern void __inline__ mp_disable_preemption(void);
67extern void __inline__ mp_enable_preemption(void);
68extern void __inline__ mp_enable_preemption_no_check(void);
69extern int __inline__ get_simple_lock_count(void);
70extern int __inline__ get_interrupt_level(void);
71
72extern int __inline__ get_preemption_level(void)
73{
74 register int idx = (int)&((cpu_data_t *)0)->preemption_level;
75 register int pl;
76
77 __asm__ volatile (" movl %%gs:(%1),%0" : "=r" (pl) : "r" (idx));
78
79 return (pl);
80}
81
82extern void __inline__ disable_preemption(void)
83{
84#if MACH_ASSERT
85 extern void _disable_preemption(void);
86
87 _disable_preemption();
88#else /* MACH_ASSERT */
89 register int idx = (int)&((cpu_data_t *)0)->preemption_level;
90
91 __asm__ volatile (" incl %%gs:(%0)" : : "r" (idx));
92#endif /* MACH_ASSERT */
93}
94
95extern void __inline__ enable_preemption(void)
96{
97#if MACH_ASSERT
98 extern void _enable_preemption(void);
99
100 assert(get_preemption_level() > 0);
101 _enable_preemption();
102#else /* MACH_ASSERT */
103 extern void kernel_preempt_check (void);
104 register int idx = (int)&((cpu_data_t *)0)->preemption_level;
105 register void (*kpc)(void)= kernel_preempt_check;
106
107 __asm__ volatile ("decl %%gs:(%0); jne 1f; \
108 call %1; 1:"
109 : /* no outputs */
110 : "r" (idx), "r" (kpc)
111 : "%eax", "%ecx", "%edx", "cc", "memory");
112#endif /* MACH_ASSERT */
113}
114
115extern void __inline__ enable_preemption_no_check(void)
116{
117#if MACH_ASSERT
118 extern void _enable_preemption_no_check(void);
119
120 assert(get_preemption_level() > 0);
121 _enable_preemption_no_check();
122#else /* MACH_ASSERT */
123 register int idx = (int)&((cpu_data_t *)0)->preemption_level;
124
125 __asm__ volatile ("decl %%gs:(%0)"
126 : /* no outputs */
127 : "r" (idx)
128 : "cc", "memory");
129#endif /* MACH_ASSERT */
130}
131
132extern void __inline__ mp_disable_preemption(void)
133{
134#if NCPUS > 1
135 disable_preemption();
136#endif /* NCPUS > 1 */
137}
138
139extern void __inline__ mp_enable_preemption(void)
140{
141#if NCPUS > 1
142 enable_preemption();
143#endif /* NCPUS > 1 */
144}
145
146extern void __inline__ mp_enable_preemption_no_check(void)
147{
148#if NCPUS > 1
149 enable_preemption_no_check();
150#endif /* NCPUS > 1 */
151}
152
153extern int __inline__ get_simple_lock_count(void)
154{
155 register int idx = (int)&((cpu_data_t *)0)->simple_lock_count;
156 register int pl;
157
158 __asm__ volatile (" movl %%gs:(%1),%0" : "=r" (pl) : "r" (idx));
159
160 return (pl);
161}
162
163extern int __inline__ get_interrupt_level(void)
164{
165 register int idx = (int)&((cpu_data_t *)0)->interrupt_level;
166 register int pl;
167
168 __asm__ volatile (" movl %%gs:(%1),%0" : "=r" (pl) : "r" (idx));
169
170 return (pl);
171}
172
173#if 0
174#ifndef __OPTIMIZE__
175#undef extern
176#endif
177#endif
178
179#else /* !defined(__GNUC__) */
180
181#endif /* defined(__GNUC__) */
182
183#endif /* I386_CPU_DATA */